summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/CodingStyle100
-rw-r--r--Documentation/DocBook/kernel-api.tmpl13
-rw-r--r--Documentation/RCU/whatisRCU.txt1
-rw-r--r--Documentation/SubmitChecklist57
-rw-r--r--Documentation/devices.txt135
-rw-r--r--Documentation/feature-removal-schedule.txt15
-rw-r--r--Documentation/filesystems/Locking9
-rw-r--r--Documentation/filesystems/porting7
-rw-r--r--Documentation/filesystems/vfs.txt6
-rw-r--r--Documentation/ia64/aliasing.txt208
-rw-r--r--Documentation/ioctl-number.txt2
-rw-r--r--Documentation/kernel-parameters.txt3
-rw-r--r--Documentation/networking/tuntap.txt11
-rw-r--r--Documentation/power/swsusp.txt45
-rw-r--r--Documentation/power/video.txt4
-rw-r--r--Documentation/sparse.txt36
-rw-r--r--Documentation/sysctl/vm.txt13
-rw-r--r--Documentation/vm/page_migration114
-rw-r--r--MAINTAINERS6
-rw-r--r--arch/alpha/kernel/alpha_ksyms.c12
-rw-r--r--arch/alpha/kernel/osf_sys.c2
-rw-r--r--arch/alpha/kernel/signal.c2
-rw-r--r--arch/arm/common/Makefile1
-rw-r--r--arch/arm/common/dmabounce.c67
-rw-r--r--arch/arm/common/uengine.c58
-rw-r--r--arch/arm/configs/lpd270_defconfig963
-rw-r--r--arch/arm/kernel/entry-armv.S24
-rw-r--r--arch/arm/kernel/irq.c4
-rw-r--r--arch/arm/kernel/iwmmxt.S2
-rw-r--r--arch/arm/kernel/process.c24
-rw-r--r--arch/arm/kernel/signal.c21
-rw-r--r--arch/arm/mach-ep93xx/core.c5
-rw-r--r--arch/arm/mach-ep93xx/gesbc9312.c24
-rw-r--r--arch/arm/mach-ep93xx/ts72xx.c23
-rw-r--r--arch/arm/mach-imx/dma.c65
-rw-r--r--arch/arm/mach-ixp2000/core.c3
-rw-r--r--arch/arm/mach-ixp23xx/core.c5
-rw-r--r--arch/arm/mach-ixp23xx/espresso.c22
-rw-r--r--arch/arm/mach-ixp23xx/ixdp2351.c22
-rw-r--r--arch/arm/mach-ixp23xx/roadrunner.c22
-rw-r--r--arch/arm/mach-ixp4xx/common.c2
-rw-r--r--arch/arm/mach-ixp4xx/nas100d-setup.c41
-rw-r--r--arch/arm/mach-ixp4xx/nslu2-setup.c48
-rw-r--r--arch/arm/mach-omap1/board-ams-delta.c11
-rw-r--r--arch/arm/mach-pnx4008/clock.c129
-rw-r--r--arch/arm/mach-pnx4008/serial.c2
-rw-r--r--arch/arm/mach-s3c2410/Kconfig8
-rw-r--r--arch/arm/mach-s3c2410/Makefile4
-rw-r--r--arch/arm/mach-s3c2410/clock.c228
-rw-r--r--arch/arm/mach-s3c2410/clock.h11
-rw-r--r--arch/arm/mach-s3c2410/cpu.h1
-rw-r--r--arch/arm/mach-s3c2410/s3c2410-clock.c263
-rw-r--r--arch/arm/mach-s3c2410/s3c2410.c25
-rw-r--r--arch/arm/mach-s3c2410/s3c2410.h2
-rw-r--r--arch/arm/mach-s3c2410/s3c2440-clock.c4
-rw-r--r--arch/arm/mach-s3c2410/s3c2442-clock.c2
-rw-r--r--arch/arm/mach-s3c2410/s3c244x.c2
-rw-r--r--arch/arm/nwfpe/fpmodule.c25
-rw-r--r--arch/arm/plat-omap/timer32k.c3
-rw-r--r--arch/arm/vfp/Makefile5
-rw-r--r--arch/arm/vfp/vfphw.S4
-rw-r--r--arch/arm/vfp/vfpmodule.c71
-rw-r--r--arch/frv/kernel/entry.S2
-rw-r--r--arch/frv/kernel/frv_ksyms.c18
-rw-r--r--arch/frv/kernel/irq-routing.c8
-rw-r--r--arch/frv/kernel/irq.c6
-rw-r--r--arch/frv/kernel/pm.c40
-rw-r--r--arch/frv/kernel/process.c2
-rw-r--r--arch/frv/kernel/setup.c2
-rw-r--r--arch/frv/kernel/signal.c22
-rw-r--r--arch/frv/kernel/sys_frv.c2
-rw-r--r--arch/frv/kernel/sysctl.c4
-rw-r--r--arch/frv/kernel/uaccess.c6
-rw-r--r--arch/frv/mb93090-mb00/pci-irq.c10
-rw-r--r--arch/frv/mm/kmap.c6
-rw-r--r--arch/h8300/kernel/signal.c2
-rw-r--r--arch/i386/Kconfig7
-rw-r--r--arch/i386/kernel/acpi/boot.c8
-rw-r--r--arch/i386/kernel/acpi/processor.c2
-rw-r--r--arch/i386/kernel/acpi/sleep.c19
-rw-r--r--arch/i386/kernel/acpi/wakeup.S11
-rw-r--r--arch/i386/kernel/apic.c4
-rw-r--r--arch/i386/kernel/apm.c39
-rw-r--r--arch/i386/kernel/cpu/common.c2
-rw-r--r--arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c291
-rw-r--r--arch/i386/kernel/cpu/cpufreq/powernow-k8.c30
-rw-r--r--arch/i386/kernel/cpu/cpufreq/powernow-k8.h4
-rw-r--r--arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c266
-rw-r--r--arch/i386/kernel/cpu/cyrix.c9
-rw-r--r--arch/i386/kernel/cpu/proc.c2
-rw-r--r--arch/i386/kernel/i387.c2
-rw-r--r--arch/i386/kernel/i8259.c4
-rw-r--r--arch/i386/kernel/io_apic.c31
-rw-r--r--arch/i386/kernel/irq.c4
-rw-r--r--arch/i386/kernel/kprobes.c2
-rw-r--r--arch/i386/kernel/microcode.c73
-rw-r--r--arch/i386/kernel/setup.c106
-rw-r--r--arch/i386/kernel/srat.c19
-rw-r--r--arch/i386/kernel/syscall_table.S1
-rw-r--r--arch/i386/kernel/traps.c11
-rw-r--r--arch/i386/lib/usercopy.c137
-rw-r--r--arch/i386/mm/fault.c21
-rw-r--r--arch/i386/mm/init.c3
-rw-r--r--arch/i386/mm/pageattr.c8
-rw-r--r--arch/i386/pci/irq.c15
-rw-r--r--arch/i386/power/cpu.c2
-rw-r--r--arch/ia64/Kconfig4
-rw-r--r--arch/ia64/Makefile2
-rw-r--r--arch/ia64/hp/common/sba_iommu.c4
-rw-r--r--arch/ia64/kernel/acpi.c32
-rw-r--r--arch/ia64/kernel/asm-offsets.c16
-rw-r--r--arch/ia64/kernel/efi.c156
-rw-r--r--arch/ia64/kernel/efi_stub.S2
-rw-r--r--arch/ia64/kernel/entry.S2
-rw-r--r--arch/ia64/kernel/entry.h1
-rw-r--r--arch/ia64/kernel/mca_asm.S28
-rw-r--r--arch/ia64/kernel/perfmon.c10
-rw-r--r--arch/ia64/kernel/sal.c6
-rw-r--r--arch/ia64/kernel/setup.c1
-rw-r--r--arch/ia64/kernel/uncached.c200
-rw-r--r--arch/ia64/mm/init.c2
-rw-r--r--arch/ia64/mm/ioremap.c27
-rw-r--r--arch/ia64/pci/pci.c19
-rw-r--r--arch/ia64/sn/kernel/setup.c4
-rw-r--r--arch/ia64/sn/kernel/sn2/cache.c15
-rw-r--r--arch/ia64/sn/kernel/sn2/sn_hwperf.c50
-rw-r--r--arch/ia64/sn/pci/tioce_provider.c4
-rw-r--r--arch/m68k/kernel/entry.S4
-rw-r--r--arch/m68k/kernel/ints.c6
-rw-r--r--arch/m68k/kernel/signal.c2
-rw-r--r--arch/m68k/kernel/traps.c8
-rw-r--r--arch/m68k/lib/Makefile4
-rw-r--r--arch/m68k/lib/uaccess.c222
-rw-r--r--arch/m68k/mac/config.c13
-rw-r--r--arch/m68k/mac/macints.c1
-rw-r--r--arch/m68k/mac/via.c17
-rw-r--r--arch/m68k/mm/motorola.c12
-rw-r--r--arch/m68k/mm/sun3mmu.c5
-rw-r--r--arch/m68knommu/kernel/signal.c2
-rw-r--r--arch/mips/kernel/scall32-o32.S2
-rw-r--r--arch/mips/kernel/sysirix.c12
-rw-r--r--arch/parisc/hpux/sys_hpux.c10
-rw-r--r--arch/parisc/kernel/signal.c2
-rw-r--r--arch/powerpc/Kconfig53
-rw-r--r--arch/powerpc/Kconfig.debug13
-rw-r--r--arch/powerpc/Makefile1
-rw-r--r--arch/powerpc/boot/main.c27
-rw-r--r--arch/powerpc/boot/prom.h7
-rw-r--r--arch/powerpc/configs/cell_defconfig34
-rw-r--r--arch/powerpc/configs/mpc85xx_cds_defconfig846
-rw-r--r--arch/powerpc/configs/mpc8641_hpcn_defconfig921
-rw-r--r--arch/powerpc/configs/pmac32_defconfig204
-rw-r--r--arch/powerpc/configs/pseries_defconfig75
-rw-r--r--arch/powerpc/kernel/align.c189
-rw-r--r--arch/powerpc/kernel/asm-offsets.c5
-rw-r--r--arch/powerpc/kernel/cpu_setup_6xx.S2
-rw-r--r--arch/powerpc/kernel/cpu_setup_power4.S17
-rw-r--r--arch/powerpc/kernel/cputable.c137
-rw-r--r--arch/powerpc/kernel/crash.c13
-rw-r--r--arch/powerpc/kernel/crash_dump.c11
-rw-r--r--arch/powerpc/kernel/entry_64.S2
-rw-r--r--arch/powerpc/kernel/fpu.S6
-rw-r--r--arch/powerpc/kernel/head_32.S14
-rw-r--r--arch/powerpc/kernel/head_64.S29
-rw-r--r--arch/powerpc/kernel/iomap.c2
-rw-r--r--arch/powerpc/kernel/iommu.c23
-rw-r--r--arch/powerpc/kernel/irq.c29
-rw-r--r--arch/powerpc/kernel/lparcfg.c4
-rw-r--r--arch/powerpc/kernel/machine_kexec_64.c99
-rw-r--r--arch/powerpc/kernel/misc_32.S2
-rw-r--r--arch/powerpc/kernel/misc_64.S5
-rw-r--r--arch/powerpc/kernel/nvram_64.c2
-rw-r--r--arch/powerpc/kernel/pci_32.c7
-rw-r--r--arch/powerpc/kernel/pci_64.c62
-rw-r--r--arch/powerpc/kernel/pci_direct_iommu.c18
-rw-r--r--arch/powerpc/kernel/pci_dn.c6
-rw-r--r--arch/powerpc/kernel/pci_iommu.c41
-rw-r--r--arch/powerpc/kernel/proc_ppc64.c2
-rw-r--r--arch/powerpc/kernel/process.c55
-rw-r--r--arch/powerpc/kernel/prom.c145
-rw-r--r--arch/powerpc/kernel/prom_init.c120
-rw-r--r--arch/powerpc/kernel/prom_parse.c25
-rw-r--r--arch/powerpc/kernel/ptrace.c2
-rw-r--r--arch/powerpc/kernel/rtas-rtc.c30
-rw-r--r--arch/powerpc/kernel/rtas.c108
-rw-r--r--arch/powerpc/kernel/rtas_flash.c25
-rw-r--r--arch/powerpc/kernel/rtas_pci.c4
-rw-r--r--arch/powerpc/kernel/setup-common.c20
-rw-r--r--arch/powerpc/kernel/setup.h3
-rw-r--r--arch/powerpc/kernel/setup_32.c18
-rw-r--r--arch/powerpc/kernel/setup_64.c31
-rw-r--r--arch/powerpc/kernel/signal_32.c23
-rw-r--r--arch/powerpc/kernel/signal_64.c16
-rw-r--r--arch/powerpc/kernel/smp.c2
-rw-r--r--arch/powerpc/kernel/systbl.S311
-rw-r--r--arch/powerpc/kernel/time.c67
-rw-r--r--arch/powerpc/kernel/traps.c8
-rw-r--r--arch/powerpc/kernel/udbg.c7
-rw-r--r--arch/powerpc/kernel/vdso.c57
-rw-r--r--arch/powerpc/kernel/vector.S4
-rw-r--r--arch/powerpc/kernel/vio.c344
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S5
-rw-r--r--arch/powerpc/lib/Makefile5
-rw-r--r--arch/powerpc/lib/bitops.c150
-rw-r--r--arch/powerpc/mm/hash_low_32.S34
-rw-r--r--arch/powerpc/mm/hash_low_64.S31
-rw-r--r--arch/powerpc/mm/hash_native_64.c2
-rw-r--r--arch/powerpc/mm/hash_utils_64.c84
-rw-r--r--arch/powerpc/mm/lmb.c43
-rw-r--r--arch/powerpc/mm/mem.c6
-rw-r--r--arch/powerpc/mm/mmu_context_32.c2
-rw-r--r--arch/powerpc/mm/mmu_context_64.c3
-rw-r--r--arch/powerpc/mm/numa.c8
-rw-r--r--arch/powerpc/mm/ppc_mmu_32.c16
-rw-r--r--arch/powerpc/mm/slb.c32
-rw-r--r--arch/powerpc/mm/slb_low.S17
-rw-r--r--arch/powerpc/mm/stab.c4
-rw-r--r--arch/powerpc/mm/tlb_32.c6
-rw-r--r--arch/powerpc/mm/tlb_64.c5
-rw-r--r--arch/powerpc/oprofile/Kconfig1
-rw-r--r--arch/powerpc/oprofile/Makefile4
-rw-r--r--arch/powerpc/oprofile/common.c6
-rw-r--r--arch/powerpc/oprofile/op_model_power4.c37
-rw-r--r--arch/powerpc/platforms/85xx/Kconfig9
-rw-r--r--arch/powerpc/platforms/85xx/Makefile1
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_cds.c359
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_cds.h43
-rw-r--r--arch/powerpc/platforms/86xx/Kconfig36
-rw-r--r--arch/powerpc/platforms/86xx/Makefile10
-rw-r--r--arch/powerpc/platforms/86xx/mpc8641_hpcn.h54
-rw-r--r--arch/powerpc/platforms/86xx/mpc86xx.h28
-rw-r--r--arch/powerpc/platforms/86xx/mpc86xx_hpcn.c326
-rw-r--r--arch/powerpc/platforms/86xx/mpc86xx_pcie.c173
-rw-r--r--arch/powerpc/platforms/86xx/mpc86xx_smp.c117
-rw-r--r--arch/powerpc/platforms/86xx/pci.c325
-rw-r--r--arch/powerpc/platforms/Makefile1
-rw-r--r--arch/powerpc/platforms/cell/Kconfig9
-rw-r--r--arch/powerpc/platforms/cell/Makefile23
-rw-r--r--arch/powerpc/platforms/cell/cbe_regs.c128
-rw-r--r--arch/powerpc/platforms/cell/cbe_regs.h129
-rw-r--r--arch/powerpc/platforms/cell/interrupt.c40
-rw-r--r--arch/powerpc/platforms/cell/iommu.c18
-rw-r--r--arch/powerpc/platforms/cell/pervasive.c104
-rw-r--r--arch/powerpc/platforms/cell/pervasive.h37
-rw-r--r--arch/powerpc/platforms/cell/ras.c112
-rw-r--r--arch/powerpc/platforms/cell/ras.h9
-rw-r--r--arch/powerpc/platforms/cell/setup.c14
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c179
-rw-r--r--arch/powerpc/platforms/cell/spu_callbacks.c314
-rw-r--r--arch/powerpc/platforms/cell/spu_priv1.c133
-rw-r--r--arch/powerpc/platforms/cell/spu_priv1_mmio.c159
-rw-r--r--arch/powerpc/platforms/cell/spufs/Makefile14
-rw-r--r--arch/powerpc/platforms/cell/spufs/context.c12
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c67
-rw-r--r--arch/powerpc/platforms/cell/spufs/hw_ops.c1
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c36
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c3
-rw-r--r--arch/powerpc/platforms/cell/spufs/spu_restore_dump.h_shipped1122
-rw-r--r--arch/powerpc/platforms/cell/spufs/spu_save_dump.h_shipped922
-rw-r--r--arch/powerpc/platforms/cell/spufs/switch.c48
-rw-r--r--arch/powerpc/platforms/iseries/Makefile6
-rw-r--r--arch/powerpc/platforms/iseries/call_pci.h19
-rw-r--r--arch/powerpc/platforms/iseries/dt.c615
-rw-r--r--arch/powerpc/platforms/iseries/iommu.c46
-rw-r--r--arch/powerpc/platforms/iseries/irq.c7
-rw-r--r--arch/powerpc/platforms/iseries/irq.h2
-rw-r--r--arch/powerpc/platforms/iseries/mf.c9
-rw-r--r--arch/powerpc/platforms/iseries/pci.c347
-rw-r--r--arch/powerpc/platforms/iseries/setup.c271
-rw-r--r--arch/powerpc/platforms/iseries/setup.h2
-rw-r--r--arch/powerpc/platforms/iseries/vio.c131
-rw-r--r--arch/powerpc/platforms/maple/pci.c3
-rw-r--r--arch/powerpc/platforms/maple/setup.c2
-rw-r--r--arch/powerpc/platforms/powermac/cpufreq_32.c2
-rw-r--r--arch/powerpc/platforms/powermac/feature.c2
-rw-r--r--arch/powerpc/platforms/powermac/pci.c3
-rw-r--r--arch/powerpc/platforms/powermac/pfunc_core.c14
-rw-r--r--arch/powerpc/platforms/powermac/setup.c2
-rw-r--r--arch/powerpc/platforms/pseries/Makefile5
-rw-r--r--arch/powerpc/platforms/pseries/eeh_cache.c2
-rw-r--r--arch/powerpc/platforms/pseries/eeh_driver.c55
-rw-r--r--arch/powerpc/platforms/pseries/eeh_event.c50
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c182
-rw-r--r--arch/powerpc/platforms/pseries/rtasd.c6
-rw-r--r--arch/powerpc/platforms/pseries/scanlog.c6
-rw-r--r--arch/powerpc/platforms/pseries/setup.c6
-rw-r--r--arch/powerpc/platforms/pseries/vio.c274
-rw-r--r--arch/powerpc/platforms/pseries/xics.c25
-rw-r--r--arch/powerpc/sysdev/Makefile4
-rw-r--r--arch/powerpc/sysdev/dart_iommu.c2
-rw-r--r--arch/powerpc/sysdev/mpic.c20
-rw-r--r--arch/ppc/Kconfig4
-rw-r--r--arch/ppc/mm/init.c2
-rw-r--r--arch/ppc/mm/mmu_context.c2
-rw-r--r--arch/ppc/mm/tlb.c6
-rw-r--r--arch/ppc/platforms/4xx/Kconfig2
-rw-r--r--arch/ppc/platforms/4xx/cpci405.c139
-rw-r--r--arch/ppc/platforms/4xx/cpci405.h30
-rw-r--r--arch/s390/Kconfig8
-rw-r--r--arch/s390/Makefile2
-rw-r--r--arch/s390/hypfs/Makefile7
-rw-r--r--arch/s390/hypfs/hypfs.h30
-rw-r--r--arch/s390/hypfs/hypfs_diag.c696
-rw-r--r--arch/s390/hypfs/hypfs_diag.h16
-rw-r--r--arch/s390/hypfs/inode.c491
-rw-r--r--arch/sh64/kernel/signal.c2
-rw-r--r--arch/sparc64/solaris/fs.c4
-rw-r--r--arch/um/Kconfig.debug1
-rw-r--r--arch/um/sys-ppc/misc.S6
-rw-r--r--arch/v850/kernel/signal.c2
-rw-r--r--arch/x86_64/Kconfig1
-rw-r--r--arch/x86_64/ia32/ia32entry.S1
-rw-r--r--arch/x86_64/kernel/acpi/Makefile1
-rw-r--r--arch/x86_64/kernel/acpi/processor.c72
-rw-r--r--arch/x86_64/kernel/acpi/sleep.c7
-rw-r--r--arch/x86_64/kernel/apic.c2
-rw-r--r--arch/x86_64/kernel/i387.c2
-rw-r--r--arch/x86_64/kernel/setup.c95
-rw-r--r--arch/x86_64/mm/srat.c33
-rw-r--r--arch/xtensa/Kconfig4
-rw-r--r--arch/xtensa/kernel/entry.S2
-rw-r--r--arch/xtensa/kernel/signal.c12
-rw-r--r--block/Kconfig.iosched2
-rw-r--r--block/as-iosched.c64
-rw-r--r--block/cfq-iosched.c199
-rw-r--r--block/deadline-iosched.c52
-rw-r--r--block/elevator.c3
-rw-r--r--block/ll_rw_blk.c15
-rw-r--r--drivers/acpi/Kconfig5
-rw-r--r--drivers/acpi/acpi_memhotplug.c12
-rw-r--r--drivers/acpi/asus_acpi.c32
-rw-r--r--drivers/acpi/bus.c22
-rw-r--r--drivers/acpi/dispatcher/dsfield.c13
-rw-r--r--drivers/acpi/dispatcher/dsinit.c6
-rw-r--r--drivers/acpi/dispatcher/dsmethod.c232
-rw-r--r--drivers/acpi/dispatcher/dsmthdat.c43
-rw-r--r--drivers/acpi/dispatcher/dsobject.c25
-rw-r--r--drivers/acpi/dispatcher/dsopcode.c63
-rw-r--r--drivers/acpi/dispatcher/dsutils.c25
-rw-r--r--drivers/acpi/dispatcher/dswexec.c26
-rw-r--r--drivers/acpi/dispatcher/dswload.c67
-rw-r--r--drivers/acpi/dispatcher/dswscope.c10
-rw-r--r--drivers/acpi/dispatcher/dswstate.c72
-rw-r--r--drivers/acpi/ec.c72
-rw-r--r--drivers/acpi/events/evevent.c10
-rw-r--r--drivers/acpi/events/evgpe.c85
-rw-r--r--drivers/acpi/events/evgpeblk.c96
-rw-r--r--drivers/acpi/events/evmisc.c41
-rw-r--r--drivers/acpi/events/evregion.c115
-rw-r--r--drivers/acpi/events/evrgnini.c48
-rw-r--r--drivers/acpi/events/evsci.c8
-rw-r--r--drivers/acpi/events/evxface.c49
-rw-r--r--drivers/acpi/events/evxfevnt.c67
-rw-r--r--drivers/acpi/events/evxfregn.c15
-rw-r--r--drivers/acpi/executer/exconfig.c54
-rw-r--r--drivers/acpi/executer/exconvrt.c12
-rw-r--r--drivers/acpi/executer/excreate.c25
-rw-r--r--drivers/acpi/executer/exdump.c36
-rw-r--r--drivers/acpi/executer/exfield.c18
-rw-r--r--drivers/acpi/executer/exfldio.c67
-rw-r--r--drivers/acpi/executer/exmisc.c25
-rw-r--r--drivers/acpi/executer/exmutex.c15
-rw-r--r--drivers/acpi/executer/exnames.c28
-rw-r--r--drivers/acpi/executer/exoparg1.c101
-rw-r--r--drivers/acpi/executer/exoparg2.c89
-rw-r--r--drivers/acpi/executer/exoparg3.c17
-rw-r--r--drivers/acpi/executer/exoparg6.c3
-rw-r--r--drivers/acpi/executer/exprep.c45
-rw-r--r--drivers/acpi/executer/exregion.c40
-rw-r--r--drivers/acpi/executer/exresnte.c17
-rw-r--r--drivers/acpi/executer/exresolv.c77
-rw-r--r--drivers/acpi/executer/exresop.c12
-rw-r--r--drivers/acpi/executer/exstore.c15
-rw-r--r--drivers/acpi/executer/exstoren.c7
-rw-r--r--drivers/acpi/executer/exstorob.c17
-rw-r--r--drivers/acpi/executer/exsystem.c12
-rw-r--r--drivers/acpi/executer/exutils.c17
-rw-r--r--drivers/acpi/fan.c40
-rw-r--r--drivers/acpi/hardware/hwacpi.c6
-rw-r--r--drivers/acpi/hardware/hwgpe.c8
-rw-r--r--drivers/acpi/hardware/hwregs.c146
-rw-r--r--drivers/acpi/hardware/hwsleep.c31
-rw-r--r--drivers/acpi/hardware/hwtimer.c20
-rw-r--r--drivers/acpi/hotkey.c2
-rw-r--r--drivers/acpi/ibm_acpi.c70
-rw-r--r--drivers/acpi/motherboard.c61
-rw-r--r--drivers/acpi/namespace/nsaccess.c46
-rw-r--r--drivers/acpi/namespace/nsalloc.c118
-rw-r--r--drivers/acpi/namespace/nsdump.c15
-rw-r--r--drivers/acpi/namespace/nsdumpdv.c6
-rw-r--r--drivers/acpi/namespace/nseval.c484
-rw-r--r--drivers/acpi/namespace/nsinit.c298
-rw-r--r--drivers/acpi/namespace/nsload.c27
-rw-r--r--drivers/acpi/namespace/nsnames.c14
-rw-r--r--drivers/acpi/namespace/nsobject.c15
-rw-r--r--drivers/acpi/namespace/nsparse.c6
-rw-r--r--drivers/acpi/namespace/nssearch.c148
-rw-r--r--drivers/acpi/namespace/nsutils.c104
-rw-r--r--drivers/acpi/namespace/nswalk.c6
-rw-r--r--drivers/acpi/namespace/nsxfeval.c203
-rw-r--r--drivers/acpi/namespace/nsxfname.c22
-rw-r--r--drivers/acpi/namespace/nsxfobj.c11
-rw-r--r--drivers/acpi/numa.c48
-rw-r--r--drivers/acpi/osl.c159
-rw-r--r--drivers/acpi/parser/psargs.c25
-rw-r--r--drivers/acpi/parser/psloop.c25
-rw-r--r--drivers/acpi/parser/psopcode.c6
-rw-r--r--drivers/acpi/parser/psparse.c35
-rw-r--r--drivers/acpi/parser/psscope.c17
-rw-r--r--drivers/acpi/parser/pstree.c8
-rw-r--r--drivers/acpi/parser/psutils.c5
-rw-r--r--drivers/acpi/parser/pswalk.c5
-rw-r--r--drivers/acpi/parser/psxface.c46
-rw-r--r--drivers/acpi/pci_link.c25
-rw-r--r--drivers/acpi/processor_core.c16
-rw-r--r--drivers/acpi/processor_idle.c8
-rw-r--r--drivers/acpi/processor_perflib.c247
-rw-r--r--drivers/acpi/resources/rscalc.c108
-rw-r--r--drivers/acpi/resources/rscreate.c33
-rw-r--r--drivers/acpi/resources/rsdump.c42
-rw-r--r--drivers/acpi/resources/rsinfo.c1
-rw-r--r--drivers/acpi/resources/rslist.c102
-rw-r--r--drivers/acpi/resources/rsmisc.c12
-rw-r--r--drivers/acpi/resources/rsutils.c155
-rw-r--r--drivers/acpi/resources/rsxface.c395
-rw-r--r--drivers/acpi/scan.c178
-rw-r--r--drivers/acpi/sleep/main.c8
-rw-r--r--drivers/acpi/sleep/wakeup.c3
-rw-r--r--drivers/acpi/system.c6
-rw-r--r--drivers/acpi/tables.c4
-rw-r--r--drivers/acpi/tables/tbconvrt.c46
-rw-r--r--drivers/acpi/tables/tbget.c65
-rw-r--r--drivers/acpi/tables/tbgetall.c11
-rw-r--r--drivers/acpi/tables/tbinstal.c50
-rw-r--r--drivers/acpi/tables/tbrsdt.c46
-rw-r--r--drivers/acpi/tables/tbutils.c149
-rw-r--r--drivers/acpi/tables/tbxface.c42
-rw-r--r--drivers/acpi/tables/tbxfroot.c82
-rw-r--r--drivers/acpi/thermal.c25
-rw-r--r--drivers/acpi/utilities/utalloc.c634
-rw-r--r--drivers/acpi/utilities/utcache.c18
-rw-r--r--drivers/acpi/utilities/utcopy.c41
-rw-r--r--drivers/acpi/utilities/utdebug.c65
-rw-r--r--drivers/acpi/utilities/utdelete.c62
-rw-r--r--drivers/acpi/utilities/uteval.c141
-rw-r--r--drivers/acpi/utilities/utglobal.c62
-rw-r--r--drivers/acpi/utilities/utinit.c26
-rw-r--r--drivers/acpi/utilities/utmath.c8
-rw-r--r--drivers/acpi/utilities/utmisc.c293
-rw-r--r--drivers/acpi/utilities/utmutex.c42
-rw-r--r--drivers/acpi/utilities/utobject.c23
-rw-r--r--drivers/acpi/utilities/utresrc.c256
-rw-r--r--drivers/acpi/utilities/utstate.c36
-rw-r--r--drivers/acpi/utilities/utxface.c44
-rw-r--r--drivers/acpi/utils.c2
-rw-r--r--drivers/acpi/video.c23
-rw-r--r--drivers/base/core.c19
-rw-r--r--drivers/block/Kconfig1
-rw-r--r--drivers/block/loop.c2
-rw-r--r--drivers/block/viodasd.c2
-rw-r--r--drivers/cdrom/viocd.c2
-rw-r--r--drivers/char/agp/hp-agp.c2
-rw-r--r--drivers/char/hpet.c5
-rw-r--r--drivers/char/hvc_console.c4
-rw-r--r--drivers/char/hvc_rtas.c37
-rw-r--r--drivers/char/hvsi.c2
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c25
-rw-r--r--drivers/char/pcmcia/synclink_cs.c54
-rw-r--r--drivers/char/sonypi.c10
-rw-r--r--drivers/char/viotape.c2
-rw-r--r--drivers/connector/cn_proc.c1
-rw-r--r--drivers/connector/connector.c7
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c13
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c13
-rw-r--r--drivers/i2c/busses/Kconfig4
-rw-r--r--drivers/ide/ide-cd.c8
-rw-r--r--drivers/ide/ide-dma.c6
-rw-r--r--drivers/ide/ide-floppy.c12
-rw-r--r--drivers/ide/ide-io.c91
-rw-r--r--drivers/ide/ide-iops.c12
-rw-r--r--drivers/ide/ide-lib.c3
-rw-r--r--drivers/ide/ide-taskfile.c3
-rw-r--r--drivers/ide/ide.c8
-rw-r--r--drivers/ide/legacy/q40ide.c1
-rw-r--r--drivers/ide/pci/sgiioc4.c6
-rw-r--r--drivers/ide/pci/trm290.c3
-rw-r--r--drivers/ieee1394/Kconfig2
-rw-r--r--drivers/infiniband/core/uverbs_main.c7
-rw-r--r--drivers/infiniband/hw/ipath/ipath_fs.c13
-rw-r--r--drivers/input/evdev.c2
-rw-r--r--drivers/input/keyboard/amikbd.c34
-rw-r--r--drivers/isdn/capi/capifs.c6
-rw-r--r--drivers/isdn/i4l/isdn_common.c8
-rw-r--r--drivers/isdn/sc/ioctl.c1
-rw-r--r--drivers/leds/Kconfig6
-rw-r--r--drivers/leds/Makefile1
-rw-r--r--drivers/leds/leds-ams-delta.c162
-rw-r--r--drivers/macintosh/Kconfig1
-rw-r--r--drivers/macintosh/via-pmu.c4
-rw-r--r--drivers/md/raid6algos.c7
-rw-r--r--drivers/misc/ibmasm/ibmasmfs.c7
-rw-r--r--drivers/mmc/at91_mci.c141
-rw-r--r--drivers/mmc/imxmmc.c2
-rw-r--r--drivers/mmc/omap.c6
-rw-r--r--drivers/mmc/sdhci.c8
-rw-r--r--drivers/net/3c527.c3
-rw-r--r--drivers/net/8139cp.c181
-rw-r--r--drivers/net/82596.c3
-rw-r--r--drivers/net/8390.c10
-rw-r--r--drivers/net/Kconfig23
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/a2065.c3
-rw-r--r--drivers/net/ariadne.c3
-rw-r--r--drivers/net/arm/at91_ether.c156
-rw-r--r--drivers/net/arm/at91_ether.h1
-rw-r--r--drivers/net/arm/ether1.c3
-rw-r--r--drivers/net/arm/ether3.c3
-rw-r--r--drivers/net/atarilance.c3
-rw-r--r--drivers/net/b44.c277
-rw-r--r--drivers/net/b44.h7
-rw-r--r--drivers/net/bnx2.c4
-rw-r--r--drivers/net/cassini.c3
-rw-r--r--drivers/net/chelsio/sge.c4
-rw-r--r--drivers/net/declance.c3
-rw-r--r--drivers/net/depca.c7
-rw-r--r--drivers/net/e1000/e1000_main.c10
-rw-r--r--drivers/net/eepro.c3
-rw-r--r--drivers/net/eexpress.c3
-rw-r--r--drivers/net/epic100.c7
-rw-r--r--drivers/net/eth16i.c3
-rw-r--r--drivers/net/forcedeth.c6
-rw-r--r--drivers/net/hp100.c7
-rw-r--r--drivers/net/ibmveth.c2
-rw-r--r--drivers/net/ioc3-eth.c2
-rw-r--r--drivers/net/iseries_veth.c27
-rw-r--r--drivers/net/ixgb/ixgb_main.c4
-rw-r--r--drivers/net/lance.c3
-rw-r--r--drivers/net/lasi_82596.c3
-rw-r--r--drivers/net/loopback.c4
-rw-r--r--drivers/net/lp486e.c3
-rw-r--r--drivers/net/myri10ge/myri10ge.c15
-rw-r--r--drivers/net/netx-eth.c516
-rw-r--r--drivers/net/pcmcia/fmvj18x_cs.c3
-rw-r--r--drivers/net/pcmcia/xirc2ps_cs.c5
-rw-r--r--drivers/net/phy/Kconfig5
-rw-r--r--drivers/net/phy/Makefile1
-rw-r--r--drivers/net/phy/vitesse.c112
-rw-r--r--drivers/net/r8169.c5
-rw-r--r--drivers/net/s2io.c64
-rw-r--r--drivers/net/seeq8005.c3
-rw-r--r--drivers/net/sis190.c3
-rw-r--r--drivers/net/sk98lin/skge.c2
-rw-r--r--drivers/net/skge.c3
-rw-r--r--drivers/net/sky2.c4
-rw-r--r--drivers/net/smc9194.c3
-rw-r--r--drivers/net/sonic.c3
-rw-r--r--drivers/net/starfire.c3
-rw-r--r--drivers/net/sundance.c8
-rw-r--r--drivers/net/tg3.c4
-rw-r--r--drivers/net/tokenring/olympic.c4
-rw-r--r--drivers/net/tun.c3
-rw-r--r--drivers/net/typhoon.c2
-rw-r--r--drivers/net/via-rhine.c7
-rw-r--r--drivers/net/via-velocity.c6
-rw-r--r--drivers/net/wan/c101.c6
-rw-r--r--drivers/net/wan/hdlc_generic.c24
-rw-r--r--drivers/net/wan/n2.c5
-rw-r--r--drivers/net/wan/pci200syn.c1
-rw-r--r--drivers/net/wan/wanxl.c12
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx.h100
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_debugfs.c33
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_leds.c4
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_main.c221
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_phy.c9
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_pio.c44
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_pio.h13
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_sysfs.c38
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_wx.c107
-rw-r--r--drivers/net/wireless/ipw2200.c41
-rw-r--r--drivers/net/wireless/ipw2200.h1
-rw-r--r--drivers/net/wireless/ray_cs.c3
-rw-r--r--drivers/net/wireless/wavelan.c18
-rw-r--r--drivers/net/wireless/wavelan_cs.c7
-rw-r--r--drivers/net/yellowfin.c12
-rw-r--r--drivers/net/znet.c3
-rw-r--r--drivers/oprofile/oprofilefs.c6
-rw-r--r--drivers/parport/Kconfig5
-rw-r--r--drivers/parport/parport_arc.c139
-rw-r--r--drivers/pnp/pnpacpi/rsparser.c199
-rw-r--r--drivers/s390/net/qeth_eddp.c12
-rw-r--r--drivers/s390/net/qeth_main.c4
-rw-r--r--drivers/s390/net/qeth_tso.h2
-rw-r--r--drivers/scsi/53c700.h6
-rw-r--r--drivers/scsi/aacraid/aachba.c15
-rw-r--r--drivers/scsi/aacraid/commsup.c6
-rw-r--r--drivers/scsi/arm/queue.c6
-rw-r--r--drivers/scsi/ibmmca.c3
-rw-r--r--drivers/scsi/ide-scsi.c6
-rw-r--r--drivers/scsi/megaraid.c2
-rw-r--r--drivers/scsi/osst.c2
-rw-r--r--drivers/scsi/st.c2
-rw-r--r--drivers/scsi/wd33c93.c4
-rw-r--r--drivers/serial/Kconfig19
-rw-r--r--drivers/serial/Makefile1
-rw-r--r--drivers/serial/amba-pl010.c2
-rw-r--r--drivers/serial/ioc4_serial.c9
-rw-r--r--drivers/serial/netx-serial.c749
-rw-r--r--drivers/serial/pxa.c1
-rw-r--r--drivers/sn/ioc4.c64
-rw-r--r--drivers/usb/core/inode.c6
-rw-r--r--drivers/usb/core/usb.c2
-rw-r--r--drivers/usb/gadget/file_storage.c2
-rw-r--r--drivers/usb/gadget/inode.c6
-rw-r--r--drivers/usb/host/Kconfig3
-rw-r--r--drivers/video/aty/atyfb_base.c4
-rw-r--r--drivers/w1/Kconfig1
-rw-r--r--fs/9p/vfs_super.c21
-rw-r--r--fs/Kconfig2
-rw-r--r--fs/adfs/super.c15
-rw-r--r--fs/affs/super.c12
-rw-r--r--fs/afs/dir.c4
-rw-r--r--fs/afs/mntpt.c11
-rw-r--r--fs/afs/super.c24
-rw-r--r--fs/aio.c4
-rw-r--r--fs/autofs/init.c6
-rw-r--r--fs/autofs4/init.c6
-rw-r--r--fs/befs/linuxvfs.c12
-rw-r--r--fs/bfs/inode.c9
-rw-r--r--fs/binfmt_elf.c347
-rw-r--r--fs/binfmt_elf_fdpic.c26
-rw-r--r--fs/binfmt_misc.c6
-rw-r--r--fs/block_dev.c6
-rw-r--r--fs/buffer.c2
-rw-r--r--fs/cifs/cifsfs.c13
-rw-r--r--fs/cifs/cifsfs.h2
-rw-r--r--fs/cifs/file.c26
-rw-r--r--fs/coda/file.c2
-rw-r--r--fs/coda/inode.c12
-rw-r--r--fs/coda/upcall.c4
-rw-r--r--fs/compat.c8
-rw-r--r--fs/configfs/mount.c6
-rw-r--r--fs/cramfs/inode.c15
-rw-r--r--fs/dcache.c40
-rw-r--r--fs/debugfs/inode.c8
-rw-r--r--fs/devfs/base.c8
-rw-r--r--fs/devpts/inode.c6
-rw-r--r--fs/direct-io.c18
-rw-r--r--fs/efs/super.c12
-rw-r--r--fs/eventpoll.c13
-rw-r--r--fs/ext2/dir.c3
-rw-r--r--fs/ext2/super.c36
-rw-r--r--fs/ext3/super.c70
-rw-r--r--fs/fat/inode.c8
-rw-r--r--fs/fat/misc.c1
-rw-r--r--fs/file_table.c2
-rw-r--r--fs/freevxfs/vxfs_subr.c3
-rw-r--r--fs/freevxfs/vxfs_super.c20
-rw-r--r--fs/fs-writeback.c6
-rw-r--r--fs/fuse/file.c2
-rw-r--r--fs/fuse/inode.c11
-rw-r--r--fs/hfs/bnode.c2
-rw-r--r--fs/hfs/btree.c2
-rw-r--r--fs/hfs/super.c11
-rw-r--r--fs/hfsplus/bitmap.c15
-rw-r--r--fs/hfsplus/bnode.c2
-rw-r--r--fs/hfsplus/btree.c2
-rw-r--r--fs/hfsplus/super.c12
-rw-r--r--fs/hostfs/hostfs_kern.c12
-rw-r--r--fs/hpfs/super.c10
-rw-r--r--fs/hppfs/hppfs_kern.c10
-rw-r--r--fs/hugetlbfs/inode.c31
-rw-r--r--fs/inotify_user.c6
-rw-r--r--fs/ioprio.c6
-rw-r--r--fs/isofs/inode.c13
-rw-r--r--fs/jbd/checkpoint.c419
-rw-r--r--fs/jbd/commit.c21
-rw-r--r--fs/jbd/transaction.c12
-rw-r--r--fs/jffs/inode-v23.c11
-rw-r--r--fs/jffs2/fs.c4
-rw-r--r--fs/jffs2/os-linux.h2
-rw-r--r--fs/jffs2/super.c49
-rw-r--r--fs/jfs/jfs_metapage.c5
-rw-r--r--fs/jfs/super.c11
-rw-r--r--fs/libfs.c16
-rw-r--r--fs/locks.c66
-rw-r--r--fs/minix/dir.c3
-rw-r--r--fs/minix/inode.c17
-rw-r--r--fs/mpage.c22
-rw-r--r--fs/msdos/namei.c9
-rw-r--r--fs/namei.c3
-rw-r--r--fs/namespace.c9
-rw-r--r--fs/ncpfs/inode.c11
-rw-r--r--fs/nfs/file.c4
-rw-r--r--fs/nfs/inode.c101
-rw-r--r--fs/nfsd/nfs4xdr.c2
-rw-r--r--fs/nfsd/nfsctl.c6
-rw-r--r--fs/nfsd/vfs.c2
-rw-r--r--fs/ntfs/aops.h3
-rw-r--r--fs/ntfs/attrib.c6
-rw-r--r--fs/ntfs/file.c3
-rw-r--r--fs/ntfs/super.c14
-rw-r--r--fs/ocfs2/dlm/dlmfs.c6
-rw-r--r--fs/ocfs2/super.c22
-rw-r--r--fs/ocfs2/symlink.c3
-rw-r--r--fs/open.c28
-rw-r--r--fs/openpromfs/inode.c6
-rw-r--r--fs/partitions/check.c8
-rw-r--r--fs/pipe.c9
-rw-r--r--fs/proc/root.c6
-rw-r--r--fs/qnx4/inode.c13
-rw-r--r--fs/ramfs/inode.c13
-rw-r--r--fs/reiserfs/super.c17
-rw-r--r--fs/reiserfs/xattr.c3
-rw-r--r--fs/romfs/inode.c11
-rw-r--r--fs/select.c83
-rw-r--r--fs/smbfs/inode.c12
-rw-r--r--fs/smbfs/proc.c4
-rw-r--r--fs/smbfs/proto.h2
-rw-r--r--fs/splice.c46
-rw-r--r--fs/super.c111
-rw-r--r--fs/sync.c2
-rw-r--r--fs/sysfs/mount.c6
-rw-r--r--fs/sysv/dir.c3
-rw-r--r--fs/sysv/inode.c3
-rw-r--r--fs/sysv/super.c13
-rw-r--r--fs/udf/super.c12
-rw-r--r--fs/ufs/super.c9
-rw-r--r--fs/vfat/namei.c9
-rw-r--r--fs/xfs/linux-2.6/xfs_stats.c4
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c13
-rw-r--r--fs/xfs/linux-2.6/xfs_sysctl.c2
-rw-r--r--include/acpi/acconfig.h18
-rw-r--r--include/acpi/acdisasm.h173
-rw-r--r--include/acpi/acdispat.h6
-rw-r--r--include/acpi/acevents.h4
-rw-r--r--include/acpi/acexcep.h6
-rw-r--r--include/acpi/acglobal.h29
-rw-r--r--include/acpi/aclocal.h284
-rw-r--r--include/acpi/acmacros.h81
-rw-r--r--include/acpi/acnamesp.h28
-rw-r--r--include/acpi/acobject.h188
-rw-r--r--include/acpi/acopcode.h2
-rw-r--r--include/acpi/acoutput.h10
-rw-r--r--include/acpi/acparser.h4
-rw-r--r--include/acpi/acpi_bus.h10
-rw-r--r--include/acpi/acpi_numa.h23
-rw-r--r--include/acpi/acpiosxf.h35
-rw-r--r--include/acpi/acpixf.h4
-rw-r--r--include/acpi/acresrc.h20
-rw-r--r--include/acpi/acstruct.h121
-rw-r--r--include/acpi/actables.h6
-rw-r--r--include/acpi/actbl.h400
-rw-r--r--include/acpi/actbl1.h639
-rw-r--r--include/acpi/actbl2.h230
-rw-r--r--include/acpi/actypes.h88
-rw-r--r--include/acpi/acutils.h95
-rw-r--r--include/acpi/amlcode.h6
-rw-r--r--include/acpi/amlresrc.h85
-rw-r--r--include/acpi/pdc_intel.h5
-rw-r--r--include/acpi/platform/acenv.h47
-rw-r--r--include/acpi/platform/aclinux.h23
-rw-r--r--include/acpi/processor.h27
-rw-r--r--include/asm-alpha/irq.h4
-rw-r--r--include/asm-arm/arch-imx/imx-dma.h8
-rw-r--r--include/asm-arm/arch-ixp23xx/ixp23xx.h3
-rw-r--r--include/asm-arm/arch-ixp23xx/platform.h15
-rw-r--r--include/asm-arm/arch-netx/eth.h27
-rw-r--r--include/asm-arm/arch-pnx4008/debug-macro.S4
-rw-r--r--include/asm-arm/arch-pnx4008/gpio.h102
-rw-r--r--include/asm-arm/arch-pnx4008/pm.h29
-rw-r--r--include/asm-arm/arch-s3c2410/regs-dsc.h2
-rw-r--r--include/asm-arm/irq.h4
-rw-r--r--include/asm-arm/thread_notify.h48
-rw-r--r--include/asm-arm26/irq.h4
-rw-r--r--include/asm-frv/atomic.h4
-rw-r--r--include/asm-frv/checksum.h2
-rw-r--r--include/asm-frv/highmem.h2
-rw-r--r--include/asm-frv/io.h40
-rw-r--r--include/asm-frv/mb-regs.h27
-rw-r--r--include/asm-frv/signal.h6
-rw-r--r--include/asm-frv/uaccess.h64
-rw-r--r--include/asm-frv/unistd.h19
-rw-r--r--include/asm-generic/memory_model.h27
-rw-r--r--include/asm-h8300/irq.h4
-rw-r--r--include/asm-i386/alternative.h2
-rw-r--r--include/asm-i386/apic.h2
-rw-r--r--include/asm-i386/apicdef.h1
-rw-r--r--include/asm-i386/cpufeature.h12
-rw-r--r--include/asm-i386/mce.h5
-rw-r--r--include/asm-i386/mtrr.h4
-rw-r--r--include/asm-i386/processor.h14
-rw-r--r--include/asm-i386/uaccess.h35
-rw-r--r--include/asm-i386/unistd.h3
-rw-r--r--include/asm-ia64/io.h1
-rw-r--r--include/asm-ia64/mca.h9
-rw-r--r--include/asm-ia64/pgtable.h22
-rw-r--r--include/asm-ia64/sn/sn_sal.h2
-rw-r--r--include/asm-ia64/unistd.h2
-rw-r--r--include/asm-m68k/irq.h4
-rw-r--r--include/asm-m68k/processor.h8
-rw-r--r--include/asm-m68k/uaccess.h1084
-rw-r--r--include/asm-m68k/unistd.h39
-rw-r--r--include/asm-m68knommu/irq.h4
-rw-r--r--include/asm-m68knommu/ptrace.h2
-rw-r--r--include/asm-mips/mach-au1x00/au1xxx_psc.h9
-rw-r--r--include/asm-mips/mach-db1x00/db1x00.h12
-rw-r--r--include/asm-mips/mmzone.h1
-rw-r--r--include/asm-parisc/mmzone.h5
-rw-r--r--include/asm-powerpc/bitops.h6
-rw-r--r--include/asm-powerpc/cputable.h82
-rw-r--r--include/asm-powerpc/delay.h13
-rw-r--r--include/asm-powerpc/eeh.h15
-rw-r--r--include/asm-powerpc/eeh_event.h10
-rw-r--r--include/asm-powerpc/elf.h2
-rw-r--r--include/asm-powerpc/hvcall.h10
-rw-r--r--include/asm-powerpc/immap_86xx.h199
-rw-r--r--include/asm-powerpc/io.h6
-rw-r--r--include/asm-powerpc/iommu.h6
-rw-r--r--include/asm-powerpc/irq.h86
-rw-r--r--include/asm-powerpc/iseries/iommu.h (renamed from arch/powerpc/platforms/iseries/iommu.h)6
-rw-r--r--include/asm-powerpc/kdump.h29
-rw-r--r--include/asm-powerpc/kexec.h16
-rw-r--r--include/asm-powerpc/machdep.h5
-rw-r--r--include/asm-powerpc/mmu.h16
-rw-r--r--include/asm-powerpc/mmu_context.h12
-rw-r--r--include/asm-powerpc/mpc86xx.h47
-rw-r--r--include/asm-powerpc/mpic.h10
-rw-r--r--include/asm-powerpc/paca.h4
-rw-r--r--include/asm-powerpc/page.h11
-rw-r--r--include/asm-powerpc/pci-bridge.h14
-rw-r--r--include/asm-powerpc/pgtable-4k.h2
-rw-r--r--include/asm-powerpc/pgtable-64k.h2
-rw-r--r--include/asm-powerpc/pgtable.h10
-rw-r--r--include/asm-powerpc/processor.h16
-rw-r--r--include/asm-powerpc/prom.h9
-rw-r--r--include/asm-powerpc/ptrace.h2
-rw-r--r--include/asm-powerpc/reg.h69
-rw-r--r--include/asm-powerpc/rtas.h9
-rw-r--r--include/asm-powerpc/spu.h29
-rw-r--r--include/asm-powerpc/spu_csa.h13
-rw-r--r--include/asm-powerpc/spu_priv1.h182
-rw-r--r--include/asm-powerpc/systbl.h306
-rw-r--r--include/asm-powerpc/tce.h35
-rw-r--r--include/asm-powerpc/topology.h9
-rw-r--r--include/asm-powerpc/udbg.h3
-rw-r--r--include/asm-powerpc/vio.h16
-rw-r--r--include/asm-ppc/mmu.h23
-rw-r--r--include/asm-ppc/mmu_context.h27
-rw-r--r--include/asm-ppc/mpc85xx.h3
-rw-r--r--include/asm-ppc/pgtable.h2
-rw-r--r--include/asm-s390/irq.h4
-rw-r--r--include/asm-sparc/irq.h4
-rw-r--r--include/asm-v850/irq.h2
-rw-r--r--include/asm-x86_64/acpi.h2
-rw-r--r--include/asm-x86_64/apicdef.h2
-rw-r--r--include/asm-x86_64/mmzone.h1
-rw-r--r--include/asm-x86_64/numa.h1
-rw-r--r--include/asm-x86_64/unistd.h4
-rw-r--r--include/asm-xtensa/checksum.h3
-rw-r--r--include/asm-xtensa/rwsem.h7
-rw-r--r--include/asm-xtensa/uaccess.h34
-rw-r--r--include/linux/acpi.h9
-rw-r--r--include/linux/blkdev.h18
-rw-r--r--include/linux/blktrace_api.h6
-rw-r--r--include/linux/bootmem.h4
-rw-r--r--include/linux/cn_proc.h21
-rw-r--r--include/linux/coda_linux.h2
-rw-r--r--include/linux/coda_psdev.h2
-rw-r--r--include/linux/cpufreq.h4
-rw-r--r--include/linux/dcache.h1
-rw-r--r--include/linux/delay.h5
-rw-r--r--include/linux/efi.h1
-rw-r--r--include/linux/ethtool.h2
-rw-r--r--include/linux/fs.h41
-rw-r--r--include/linux/genalloc.h35
-rw-r--r--include/linux/hdlc.h2
-rw-r--r--include/linux/hugetlb.h8
-rw-r--r--include/linux/interrupt.h21
-rw-r--r--include/linux/ioc4.h5
-rw-r--r--include/linux/irq.h11
-rw-r--r--include/linux/irqreturn.h25
-rw-r--r--include/linux/jbd.h8
-rw-r--r--include/linux/kernel.h5
-rw-r--r--include/linux/kexec.h1
-rw-r--r--include/linux/list.h23
-rw-r--r--include/linux/migrate.h20
-rw-r--r--include/linux/mm.h11
-rw-r--r--include/linux/mmzone.h12
-rw-r--r--include/linux/module.h2
-rw-r--r--include/linux/mount.h5
-rw-r--r--include/linux/netdevice.h23
-rw-r--r--include/linux/page-flags.h14
-rw-r--r--include/linux/pagemap.h7
-rw-r--r--include/linux/percpu_counter.h38
-rw-r--r--include/linux/prctl.h7
-rw-r--r--include/linux/ptrace.h4
-rw-r--r--include/linux/radix-tree.h5
-rw-r--r--include/linux/ramfs.h4
-rw-r--r--include/linux/rbtree.h4
-rw-r--r--include/linux/rcupdate.h3
-rw-r--r--include/linux/rmap.h1
-rw-r--r--include/linux/sched.h13
-rw-r--r--include/linux/security.h45
-rw-r--r--include/linux/skbuff.h26
-rw-r--r--include/linux/slab.h50
-rw-r--r--include/linux/string.h1
-rw-r--r--include/linux/suspend.h1
-rw-r--r--include/linux/swap.h101
-rw-r--r--include/linux/swapops.h53
-rw-r--r--include/linux/syscalls.h10
-rw-r--r--include/linux/sysctl.h1
-rw-r--r--include/linux/tcp.h2
-rw-r--r--include/linux/uaccess.h22
-rw-r--r--include/linux/vmalloc.h8
-rw-r--r--include/linux/writeback.h5
-rw-r--r--include/linux/zorro.h42
-rw-r--r--include/net/ieee80211.h5
-rw-r--r--include/net/protocol.h1
-rw-r--r--include/net/sock.h5
-rw-r--r--include/net/tcp.h6
-rw-r--r--ipc/mqueue.c10
-rw-r--r--ipc/shm.c3
-rw-r--r--kernel/acct.c2
-rw-r--r--kernel/auditsc.c1
-rw-r--r--kernel/compat.c23
-rw-r--r--kernel/cpuset.c16
-rw-r--r--kernel/exit.c5
-rw-r--r--kernel/fork.c5
-rw-r--r--kernel/futex.c8
-rw-r--r--kernel/irq/handle.c5
-rw-r--r--kernel/irq/migration.c4
-rw-r--r--kernel/irq/proc.c3
-rw-r--r--kernel/irq/spurious.c12
-rw-r--r--kernel/kexec.c6
-rw-r--r--kernel/ksysfs.c19
-rw-r--r--kernel/power/power.h6
-rw-r--r--kernel/power/snapshot.c260
-rw-r--r--kernel/power/swsusp.c32
-rw-r--r--kernel/rcupdate.c13
-rw-r--r--kernel/sched.c12
-rw-r--r--kernel/sys.c14
-rw-r--r--kernel/sys_ni.c2
-rw-r--r--kernel/sysctl.c11
-rw-r--r--kernel/timer.c30
-rw-r--r--kernel/workqueue.c4
-rw-r--r--lib/Makefile1
-rw-r--r--lib/genalloc.c263
-rw-r--r--lib/percpu_counter.c46
-rw-r--r--lib/radix-tree.c197
-rw-r--r--lib/string.c30
-rw-r--r--mm/Kconfig4
-rw-r--r--mm/filemap.c183
-rw-r--r--mm/filemap.h6
-rw-r--r--mm/fremap.c9
-rw-r--r--mm/hugetlb.c282
-rw-r--r--mm/memory.c125
-rw-r--r--mm/memory_hotplug.c27
-rw-r--r--mm/mempolicy.c36
-rw-r--r--mm/migrate.c1058
-rw-r--r--mm/mmap.c12
-rw-r--r--mm/mprotect.c37
-rw-r--r--mm/msync.c3
-rw-r--r--mm/oom_kill.c9
-rw-r--r--mm/page-writeback.c3
-rw-r--r--mm/page_alloc.c184
-rw-r--r--mm/pdflush.c3
-rw-r--r--mm/rmap.c107
-rw-r--r--mm/shmem.c18
-rw-r--r--mm/slab.c249
-rw-r--r--mm/sparse.c22
-rw-r--r--mm/swap.c42
-rw-r--r--mm/swapfile.c43
-rw-r--r--mm/truncate.c22
-rw-r--r--mm/vmalloc.c122
-rw-r--r--mm/vmscan.c240
-rw-r--r--net/bridge/br_forward.c4
-rw-r--r--net/bridge/br_if.c17
-rw-r--r--net/bridge/br_netfilter.c2
-rw-r--r--net/core/dev.c143
-rw-r--r--net/core/ethtool.c29
-rw-r--r--net/core/link_watch.c5
-rw-r--r--net/core/skbuff.c178
-rw-r--r--net/dccp/Kconfig2
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_wx.c2
-rw-r--r--net/ipv4/af_inet.c51
-rw-r--r--net/ipv4/ip_output.c16
-rw-r--r--net/ipv4/tcp.c66
-rw-r--r--net/ipv4/tcp_input.c2
-rw-r--r--net/ipv4/tcp_output.c47
-rw-r--r--net/ipv4/xfrm4_output.c54
-rw-r--r--net/ipv6/addrconf.c9
-rw-r--r--net/ipv6/ip6_output.c7
-rw-r--r--net/ipv6/xfrm6_output.c39
-rw-r--r--net/sched/sch_generic.c29
-rw-r--r--net/socket.c7
-rw-r--r--net/sunrpc/rpc_pipe.c6
-rw-r--r--security/dummy.c14
-rw-r--r--security/inode.c8
-rw-r--r--security/selinux/hooks.c18
-rw-r--r--security/selinux/selinuxfs.c7
-rw-r--r--sound/oss/au1550_ac97.c2
-rw-r--r--sound/oss/cs46xx.c1272
1002 files changed, 30357 insertions, 16124 deletions
diff --git a/Documentation/CodingStyle b/Documentation/CodingStyle
index ce5d2c038cf5..6d2412ec91ed 100644
--- a/Documentation/CodingStyle
+++ b/Documentation/CodingStyle
@@ -155,7 +155,83 @@ problem, which is called the function-growth-hormone-imbalance syndrome.
See next chapter.
- Chapter 5: Functions
+ Chapter 5: Typedefs
+
+Please don't use things like "vps_t".
+
+It's a _mistake_ to use typedef for structures and pointers. When you see a
+
+ vps_t a;
+
+in the source, what does it mean?
+
+In contrast, if it says
+
+ struct virtual_container *a;
+
+you can actually tell what "a" is.
+
+Lots of people think that typedefs "help readability". Not so. They are
+useful only for:
+
+ (a) totally opaque objects (where the typedef is actively used to _hide_
+ what the object is).
+
+ Example: "pte_t" etc. opaque objects that you can only access using
+ the proper accessor functions.
+
+ NOTE! Opaqueness and "accessor functions" are not good in themselves.
+ The reason we have them for things like pte_t etc. is that there
+ really is absolutely _zero_ portably accessible information there.
+
+ (b) Clear integer types, where the abstraction _helps_ avoid confusion
+ whether it is "int" or "long".
+
+ u8/u16/u32 are perfectly fine typedefs, although they fit into
+ category (d) better than here.
+
+ NOTE! Again - there needs to be a _reason_ for this. If something is
+ "unsigned long", then there's no reason to do
+
+ typedef unsigned long myflags_t;
+
+ but if there is a clear reason for why it under certain circumstances
+ might be an "unsigned int" and under other configurations might be
+ "unsigned long", then by all means go ahead and use a typedef.
+
+ (c) when you use sparse to literally create a _new_ type for
+ type-checking.
+
+ (d) New types which are identical to standard C99 types, in certain
+ exceptional circumstances.
+
+ Although it would only take a short amount of time for the eyes and
+ brain to become accustomed to the standard types like 'uint32_t',
+ some people object to their use anyway.
+
+ Therefore, the Linux-specific 'u8/u16/u32/u64' types and their
+ signed equivalents which are identical to standard types are
+ permitted -- although they are not mandatory in new code of your
+ own.
+
+ When editing existing code which already uses one or the other set
+ of types, you should conform to the existing choices in that code.
+
+ (e) Types safe for use in userspace.
+
+ In certain structures which are visible to userspace, we cannot
+ require C99 types and cannot use the 'u32' form above. Thus, we
+ use __u32 and similar types in all structures which are shared
+ with userspace.
+
+Maybe there are other cases too, but the rule should basically be to NEVER
+EVER use a typedef unless you can clearly match one of those rules.
+
+In general, a pointer, or a struct that has elements that can reasonably
+be directly accessed should _never_ be a typedef.
+
+
+ Chapter 6: Functions
Functions should be short and sweet, and do just one thing. They should
fit on one or two screenfuls of text (the ISO/ANSI screen size is 80x24,
@@ -183,7 +259,7 @@ and it gets confused. You know you're brilliant, but maybe you'd like
to understand what you did 2 weeks from now.
- Chapter 6: Centralized exiting of functions
+ Chapter 7: Centralized exiting of functions
Albeit deprecated by some people, the equivalent of the goto statement is
used frequently by compilers in form of the unconditional jump instruction.
@@ -220,7 +296,7 @@ out:
return result;
}
- Chapter 7: Commenting
+ Chapter 8: Commenting
Comments are good, but there is also a danger of over-commenting. NEVER
try to explain HOW your code works in a comment: it's much better to
@@ -240,7 +316,7 @@ When commenting the kernel API functions, please use the kerneldoc format.
See the files Documentation/kernel-doc-nano-HOWTO.txt and scripts/kernel-doc
for details.
- Chapter 8: You've made a mess of it
+ Chapter 9: You've made a mess of it
That's OK, we all do. You've probably been told by your long-time Unix
user helper that "GNU emacs" automatically formats the C sources for
@@ -288,7 +364,7 @@ re-formatting you may want to take a look at the man page. But
remember: "indent" is not a fix for bad programming.
- Chapter 9: Configuration-files
+ Chapter 10: Configuration-files
For configuration options (arch/xxx/Kconfig, and all the Kconfig files),
somewhat different indentation is used.
@@ -313,7 +389,7 @@ support for file-systems, for instance) should be denoted (DANGEROUS), other
experimental options should be denoted (EXPERIMENTAL).
- Chapter 10: Data structures
+ Chapter 11: Data structures
Data structures that have visibility outside the single-threaded
environment they are created and destroyed in should always have
@@ -344,7 +420,7 @@ Remember: if another thread can find your data structure, and you don't
have a reference count on it, you almost certainly have a bug.
- Chapter 11: Macros, Enums and RTL
+ Chapter 12: Macros, Enums and RTL
Names of macros defining constants and labels in enums are capitalized.
@@ -399,7 +475,7 @@ The cpp manual deals with macros exhaustively. The gcc internals manual also
covers RTL which is used frequently with assembly language in the kernel.
- Chapter 12: Printing kernel messages
+ Chapter 13: Printing kernel messages
Kernel developers like to be seen as literate. Do mind the spelling
of kernel messages to make a good impression. Do not use crippled
@@ -410,7 +486,7 @@ Kernel messages do not have to be terminated with a period.
Printing numbers in parentheses (%d) adds no value and should be avoided.
- Chapter 13: Allocating memory
+ Chapter 14: Allocating memory
The kernel provides the following general purpose memory allocators:
kmalloc(), kzalloc(), kcalloc(), and vmalloc(). Please refer to the API
@@ -429,7 +505,7 @@ from void pointer to any other pointer type is guaranteed by the C programming
language.
- Chapter 14: The inline disease
+ Chapter 15: The inline disease
There appears to be a common misperception that gcc has a magic "make me
faster" speedup option called "inline". While the use of inlines can be
@@ -457,7 +533,7 @@ something it would have done anyway.
- Chapter 15: References
+ Appendix I: References
The C Programming Language, Second Edition
by Brian W. Kernighan and Dennis M. Ritchie.
@@ -481,4 +557,4 @@ Kernel CodingStyle, by greg@kroah.com at OLS 2002:
http://www.kroah.com/linux/talks/ols_2002_kernel_codingstyle_talk/html/
--
-Last updated on 30 December 2005 by a community effort on LKML.
+Last updated on 30 April 2006.
diff --git a/Documentation/DocBook/kernel-api.tmpl b/Documentation/DocBook/kernel-api.tmpl
index ca02e04a906c..31b727ceb127 100644
--- a/Documentation/DocBook/kernel-api.tmpl
+++ b/Documentation/DocBook/kernel-api.tmpl
@@ -117,6 +117,7 @@ X!Ilib/string.c
<chapter id="mm">
<title>Memory Management in Linux</title>
<sect1><title>The Slab Cache</title>
+!Iinclude/linux/slab.h
!Emm/slab.c
</sect1>
<sect1><title>User Space Memory Access</title>
@@ -331,6 +332,18 @@ X!Earch/i386/kernel/mca.c
!Esecurity/security.c
</chapter>
+ <chapter id="audit">
+ <title>Audit Interfaces</title>
+!Ekernel/audit.c
+!Ikernel/auditsc.c
+!Ikernel/auditfilter.c
+ </chapter>
+
+ <chapter id="accounting">
+ <title>Accounting Framework</title>
+!Ikernel/acct.c
+ </chapter>
+
<chapter id="pmfuncs">
<title>Power Management</title>
!Ekernel/power/pm.c
diff --git a/Documentation/RCU/whatisRCU.txt b/Documentation/RCU/whatisRCU.txt
index 07cb93b82ba9..6e459420ee9f 100644
--- a/Documentation/RCU/whatisRCU.txt
+++ b/Documentation/RCU/whatisRCU.txt
@@ -790,7 +790,6 @@ RCU pointer update:
RCU grace period:
- synchronize_kernel (deprecated)
synchronize_net
synchronize_sched
synchronize_rcu
diff --git a/Documentation/SubmitChecklist b/Documentation/SubmitChecklist
new file mode 100644
index 000000000000..8230098da529
--- /dev/null
+++ b/Documentation/SubmitChecklist
@@ -0,0 +1,57 @@
+Linux Kernel patch sumbittal checklist
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Here are some basic things that developers should do if they
+want to see their kernel patch submittals accepted quicker.
+
+These are all above and beyond the documentation that is provided
+in Documentation/SubmittingPatches and elsewhere about submitting
+Linux kernel patches.
+
+
+
+- Builds cleanly with applicable or modified CONFIG options =y, =m, and =n.
+ No gcc warnings/errors, no linker warnings/errors.
+
+- Passes allnoconfig, allmodconfig
+
+- Builds on multiple CPU arch-es by using local cross-compile tools
+ or something like PLM at OSDL.
+
+- ppc64 is a good architecture for cross-compilation checking because it
+ tends to use `unsigned long' for 64-bit quantities.
+
+- Matches kernel coding style(!)
+
+- Any new or modified CONFIG options don't muck up the config menu.
+
+- All new Kconfig options have help text.
+
+- Has been carefully reviewed with respect to relevant Kconfig
+ combinations. This is very hard to get right with testing --
+ brainpower pays off here.
+
+- Check cleanly with sparse.
+
+- Use 'make checkstack' and 'make namespacecheck' and fix any
+ problems that they find. Note: checkstack does not point out
+ problems explicitly, but any one function that uses more than
+ 512 bytes on the stack is a candidate for change.
+
+- Include kernel-doc to document global kernel APIs. (Not required
+ for static functions, but OK there also.) Use 'make htmldocs'
+ or 'make mandocs' to check the kernel-doc and fix any issues.
+
+- Has been tested with CONFIG_PREEMPT, CONFIG_DEBUG_PREEMPT,
+ CONFIG_DEBUG_SLAB, CONFIG_DEBUG_PAGEALLOC, CONFIG_DEBUG_MUTEXES,
+ CONFIG_DEBUG_SPINLOCK, CONFIG_DEBUG_SPINLOCK_SLEEP all simultaneously
+ enabled.
+
+- Has been build- and runtime tested with and without CONFIG_SMP and
+ CONFIG_PREEMPT.
+
+- If the patch affects IO/Disk, etc: has been tested with and without
+ CONFIG_LBD.
+
+
+2006-APR-27
diff --git a/Documentation/devices.txt b/Documentation/devices.txt
index b369a8c46a73..b2f593fc76ca 100644
--- a/Documentation/devices.txt
+++ b/Documentation/devices.txt
@@ -3,7 +3,7 @@
Maintained by Torben Mathiasen <device@lanana.org>
- Last revised: 25 January 2005
+ Last revised: 01 March 2006
This list is the Linux Device List, the official registry of allocated
device numbers and /dev directory nodes for the Linux operating
@@ -94,7 +94,6 @@ Your cooperation is appreciated.
9 = /dev/urandom Faster, less secure random number gen.
10 = /dev/aio Asyncronous I/O notification interface
11 = /dev/kmsg Writes to this come out as printk's
- 12 = /dev/oldmem Access to crash dump from kexec kernel
1 block RAM disk
0 = /dev/ram0 First RAM disk
1 = /dev/ram1 Second RAM disk
@@ -262,13 +261,13 @@ Your cooperation is appreciated.
NOTE: These devices permit both read and write access.
7 block Loopback devices
- 0 = /dev/loop0 First loopback device
- 1 = /dev/loop1 Second loopback device
+ 0 = /dev/loop0 First loop device
+ 1 = /dev/loop1 Second loop device
...
- The loopback devices are used to mount filesystems not
+ The loop devices are used to mount filesystems not
associated with block devices. The binding to the
- loopback devices is handled by mount(8) or losetup(8).
+ loop devices is handled by mount(8) or losetup(8).
8 block SCSI disk devices (0-15)
0 = /dev/sda First SCSI disk whole disk
@@ -943,7 +942,7 @@ Your cooperation is appreciated.
240 = /dev/ftlp FTL on 16th Memory Technology Device
Partitions are handled in the same way as for IDE
- disks (see major number 3) expect that the partition
+ disks (see major number 3) except that the partition
limit is 15 rather than 63 per disk (same as SCSI.)
45 char isdn4linux ISDN BRI driver
@@ -1168,7 +1167,7 @@ Your cooperation is appreciated.
The filename of the encrypted container and the passwords
are sent via ioctls (using the sdmount tool) to the master
node which then activates them via one of the
- /dev/scramdisk/x nodes for loopback mounting (all handled
+ /dev/scramdisk/x nodes for loop mounting (all handled
through the sdmount tool).
Requested by: andy@scramdisklinux.org
@@ -2538,18 +2537,32 @@ Your cooperation is appreciated.
0 = /dev/usb/lp0 First USB printer
...
15 = /dev/usb/lp15 16th USB printer
- 16 = /dev/usb/mouse0 First USB mouse
- ...
- 31 = /dev/usb/mouse15 16th USB mouse
- 32 = /dev/usb/ez0 First USB firmware loader
- ...
- 47 = /dev/usb/ez15 16th USB firmware loader
48 = /dev/usb/scanner0 First USB scanner
...
63 = /dev/usb/scanner15 16th USB scanner
64 = /dev/usb/rio500 Diamond Rio 500
65 = /dev/usb/usblcd USBLCD Interface (info@usblcd.de)
66 = /dev/usb/cpad0 Synaptics cPad (mouse/LCD)
+ 96 = /dev/usb/hiddev0 1st USB HID device
+ ...
+ 111 = /dev/usb/hiddev15 16th USB HID device
+ 112 = /dev/usb/auer0 1st auerswald ISDN device
+ ...
+ 127 = /dev/usb/auer15 16th auerswald ISDN device
+ 128 = /dev/usb/brlvgr0 First Braille Voyager device
+ ...
+ 131 = /dev/usb/brlvgr3 Fourth Braille Voyager device
+ 132 = /dev/usb/idmouse ID Mouse (fingerprint scanner) device
+ 133 = /dev/usb/sisusbvga1 First SiSUSB VGA device
+ ...
+ 140 = /dev/usb/sisusbvga8 Eigth SISUSB VGA device
+ 144 = /dev/usb/lcd USB LCD device
+ 160 = /dev/usb/legousbtower0 1st USB Legotower device
+ ...
+ 175 = /dev/usb/legousbtower15 16th USB Legotower device
+ 240 = /dev/usb/dabusb0 First daubusb device
+ ...
+ 243 = /dev/usb/dabusb3 Fourth dabusb device
180 block USB block devices
0 = /dev/uba First USB block device
@@ -2710,6 +2723,17 @@ Your cooperation is appreciated.
1 = /dev/cpu/1/msr MSRs on CPU 1
...
+202 block Xen Virtual Block Device
+ 0 = /dev/xvda First Xen VBD whole disk
+ 16 = /dev/xvdb Second Xen VBD whole disk
+ 32 = /dev/xvdc Third Xen VBD whole disk
+ ...
+ 240 = /dev/xvdp Sixteenth Xen VBD whole disk
+
+ Partitions are handled in the same way as for IDE
+ disks (see major number 3) except that the limit on
+ partitions is 15.
+
203 char CPU CPUID information
0 = /dev/cpu/0/cpuid CPUID on CPU 0
1 = /dev/cpu/1/cpuid CPUID on CPU 1
@@ -2747,11 +2771,26 @@ Your cooperation is appreciated.
46 = /dev/ttyCPM0 PPC CPM (SCC or SMC) - port 0
...
47 = /dev/ttyCPM5 PPC CPM (SCC or SMC) - port 5
- 50 = /dev/ttyIOC40 Altix serial card
+ 50 = /dev/ttyIOC0 Altix serial card
+ ...
+ 81 = /dev/ttyIOC31 Altix serial card
+ 82 = /dev/ttyVR0 NEC VR4100 series SIU
+ 83 = /dev/ttyVR1 NEC VR4100 series DSIU
+ 84 = /dev/ttyIOC84 Altix ioc4 serial card
+ ...
+ 115 = /dev/ttyIOC115 Altix ioc4 serial card
+ 116 = /dev/ttySIOC0 Altix ioc3 serial card
+ ...
+ 147 = /dev/ttySIOC31 Altix ioc3 serial card
+ 148 = /dev/ttyPSC0 PPC PSC - port 0
+ ...
+ 153 = /dev/ttyPSC5 PPC PSC - port 5
+ 154 = /dev/ttyAT0 ATMEL serial port 0
...
- 81 = /dev/ttyIOC431 Altix serial card
- 82 = /dev/ttyVR0 NEC VR4100 series SIU
- 83 = /dev/ttyVR1 NEC VR4100 series DSIU
+ 169 = /dev/ttyAT15 ATMEL serial port 15
+ 170 = /dev/ttyNX0 Hilscher netX serial port 0
+ ...
+ 185 = /dev/ttyNX15 Hilscher netX serial port 15
205 char Low-density serial ports (alternate device)
0 = /dev/culu0 Callout device for ttyLU0
@@ -2786,8 +2825,8 @@ Your cooperation is appreciated.
50 = /dev/cuioc40 Callout device for ttyIOC40
...
81 = /dev/cuioc431 Callout device for ttyIOC431
- 82 = /dev/cuvr0 Callout device for ttyVR0
- 83 = /dev/cuvr1 Callout device for ttyVR1
+ 82 = /dev/cuvr0 Callout device for ttyVR0
+ 83 = /dev/cuvr1 Callout device for ttyVR1
206 char OnStream SC-x0 tape devices
@@ -2897,7 +2936,6 @@ Your cooperation is appreciated.
...
196 = /dev/dvb/adapter3/video0 first video decoder of fourth card
-
216 char Bluetooth RFCOMM TTY devices
0 = /dev/rfcomm0 First Bluetooth RFCOMM TTY device
1 = /dev/rfcomm1 Second Bluetooth RFCOMM TTY device
@@ -3002,12 +3040,43 @@ Your cooperation is appreciated.
ioctl()'s can be used to rewind the tape regardless of
the device used to access it.
-231 char InfiniBand MAD
+231 char InfiniBand
0 = /dev/infiniband/umad0
1 = /dev/infiniband/umad1
- ...
+ ...
+ 63 = /dev/infiniband/umad63 63rd InfiniBandMad device
+ 64 = /dev/infiniband/issm0 First InfiniBand IsSM device
+ 65 = /dev/infiniband/issm1 Second InfiniBand IsSM device
+ ...
+ 127 = /dev/infiniband/issm63 63rd InfiniBand IsSM device
+ 128 = /dev/infiniband/uverbs0 First InfiniBand verbs device
+ 129 = /dev/infiniband/uverbs1 Second InfiniBand verbs device
+ ...
+ 159 = /dev/infiniband/uverbs31 31st InfiniBand verbs device
+
+232 char Biometric Devices
+ 0 = /dev/biometric/sensor0/fingerprint first fingerprint sensor on first device
+ 1 = /dev/biometric/sensor0/iris first iris sensor on first device
+ 2 = /dev/biometric/sensor0/retina first retina sensor on first device
+ 3 = /dev/biometric/sensor0/voiceprint first voiceprint sensor on first device
+ 4 = /dev/biometric/sensor0/facial first facial sensor on first device
+ 5 = /dev/biometric/sensor0/hand first hand sensor on first device
+ ...
+ 10 = /dev/biometric/sensor1/fingerprint first fingerprint sensor on second device
+ ...
+ 20 = /dev/biometric/sensor2/fingerprint first fingerprint sensor on third device
+ ...
-232-239 UNASSIGNED
+233 char PathScale InfiniPath interconnect
+ 0 = /dev/ipath Primary device for programs (any unit)
+ 1 = /dev/ipath0 Access specifically to unit 0
+ 2 = /dev/ipath1 Access specifically to unit 1
+ ...
+ 4 = /dev/ipath3 Access specifically to unit 3
+ 129 = /dev/ipath_sma Device used by Subnet Management Agent
+ 130 = /dev/ipath_diag Device used by diagnostics programs
+
+234-239 UNASSIGNED
240-254 char LOCAL/EXPERIMENTAL USE
240-254 block LOCAL/EXPERIMENTAL USE
@@ -3021,6 +3090,24 @@ Your cooperation is appreciated.
This major is reserved to assist the expansion to a
larger number space. No device nodes with this major
should ever be created on the filesystem.
+ (This is probaly not true anymore, but I'll leave it
+ for now /Torben)
+
+---LARGE MAJORS!!!!!---
+
+256 char Equinox SST multi-port serial boards
+ 0 = /dev/ttyEQ0 First serial port on first Equinox SST board
+ 127 = /dev/ttyEQ127 Last serial port on first Equinox SST board
+ 128 = /dev/ttyEQ128 First serial port on second Equinox SST board
+ ...
+ 1027 = /dev/ttyEQ1027 Last serial port on eighth Equinox SST board
+
+256 block Resident Flash Disk Flash Translation Layer
+ 0 = /dev/rfda First RFD FTL layer
+ 16 = /dev/rfdb Second RFD FTL layer
+ ...
+ 240 = /dev/rfdp 16th RFD FTL layer
+
**** ADDITIONAL /dev DIRECTORY ENTRIES
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index f7293297f326..027285d0c26c 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -33,21 +33,6 @@ Who: Adrian Bunk <bunk@stusta.de>
---------------------------
-What: RCU API moves to EXPORT_SYMBOL_GPL
-When: April 2006
-Files: include/linux/rcupdate.h, kernel/rcupdate.c
-Why: Outside of Linux, the only implementations of anything even
- vaguely resembling RCU that I am aware of are in DYNIX/ptx,
- VM/XA, Tornado, and K42. I do not expect anyone to port binary
- drivers or kernel modules from any of these, since the first two
- are owned by IBM and the last two are open-source research OSes.
- So these will move to GPL after a grace period to allow
- people, who might be using implementations that I am not aware
- of, to adjust to this upcoming change.
-Who: Paul E. McKenney <paulmck@us.ibm.com>
-
----------------------------
-
What: raw1394: requests of type RAW1394_REQ_ISO_SEND, RAW1394_REQ_ISO_LISTEN
When: November 2006
Why: Deprecated in favour of the new ioctl-based rawiso interface, which is
diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking
index 1045da582b9b..d31efbbdfe50 100644
--- a/Documentation/filesystems/Locking
+++ b/Documentation/filesystems/Locking
@@ -99,7 +99,7 @@ prototypes:
int (*sync_fs)(struct super_block *sb, int wait);
void (*write_super_lockfs) (struct super_block *);
void (*unlockfs) (struct super_block *);
- int (*statfs) (struct super_block *, struct kstatfs *);
+ int (*statfs) (struct dentry *, struct kstatfs *);
int (*remount_fs) (struct super_block *, int *, char *);
void (*clear_inode) (struct inode *);
void (*umount_begin) (struct super_block *);
@@ -142,15 +142,16 @@ see also dquot_operations section.
--------------------------- file_system_type ---------------------------
prototypes:
- struct super_block *(*get_sb) (struct file_system_type *, int,
- const char *, void *);
+ struct int (*get_sb) (struct file_system_type *, int,
+ const char *, void *, struct vfsmount *);
void (*kill_sb) (struct super_block *);
locking rules:
may block BKL
get_sb yes yes
kill_sb yes yes
-->get_sb() returns error or a locked superblock (exclusive on ->s_umount).
+->get_sb() returns error or 0 with locked superblock attached to the vfsmount
+(exclusive on ->s_umount).
->kill_sb() takes a write-locked superblock, does all shutdown work on it,
unlocks and drops the reference.
diff --git a/Documentation/filesystems/porting b/Documentation/filesystems/porting
index 2f388460cbe7..5531694059ab 100644
--- a/Documentation/filesystems/porting
+++ b/Documentation/filesystems/porting
@@ -50,10 +50,11 @@ Turn your foo_read_super() into a function that would return 0 in case of
success and negative number in case of error (-EINVAL unless you have more
informative error value to report). Call it foo_fill_super(). Now declare
-struct super_block foo_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+int foo_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_bdev(fs_type, flags, dev_name, data, ext2_fill_super);
+ return get_sb_bdev(fs_type, flags, dev_name, data, foo_fill_super,
+ mnt);
}
(or similar with s/bdev/nodev/ or s/bdev/single/, depending on the kind of
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index 3a2e5520c1e3..9d3aed628bc1 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -113,8 +113,8 @@ members are defined:
struct file_system_type {
const char *name;
int fs_flags;
- struct super_block *(*get_sb) (struct file_system_type *, int,
- const char *, void *);
+ struct int (*get_sb) (struct file_system_type *, int,
+ const char *, void *, struct vfsmount *);
void (*kill_sb) (struct super_block *);
struct module *owner;
struct file_system_type * next;
@@ -211,7 +211,7 @@ struct super_operations {
int (*sync_fs)(struct super_block *sb, int wait);
void (*write_super_lockfs) (struct super_block *);
void (*unlockfs) (struct super_block *);
- int (*statfs) (struct super_block *, struct kstatfs *);
+ int (*statfs) (struct dentry *, struct kstatfs *);
int (*remount_fs) (struct super_block *, int *, char *);
void (*clear_inode) (struct inode *);
void (*umount_begin) (struct super_block *);
diff --git a/Documentation/ia64/aliasing.txt b/Documentation/ia64/aliasing.txt
new file mode 100644
index 000000000000..38f9a52d1820
--- /dev/null
+++ b/Documentation/ia64/aliasing.txt
@@ -0,0 +1,208 @@
+ MEMORY ATTRIBUTE ALIASING ON IA-64
+
+ Bjorn Helgaas
+ <bjorn.helgaas@hp.com>
+ May 4, 2006
+
+
+MEMORY ATTRIBUTES
+
+ Itanium supports several attributes for virtual memory references.
+ The attribute is part of the virtual translation, i.e., it is
+ contained in the TLB entry. The ones of most interest to the Linux
+ kernel are:
+
+ WB Write-back (cacheable)
+ UC Uncacheable
+ WC Write-coalescing
+
+ System memory typically uses the WB attribute. The UC attribute is
+ used for memory-mapped I/O devices. The WC attribute is uncacheable
+ like UC is, but writes may be delayed and combined to increase
+ performance for things like frame buffers.
+
+ The Itanium architecture requires that we avoid accessing the same
+ page with both a cacheable mapping and an uncacheable mapping[1].
+
+ The design of the chipset determines which attributes are supported
+ on which regions of the address space. For example, some chipsets
+ support either WB or UC access to main memory, while others support
+ only WB access.
+
+MEMORY MAP
+
+ Platform firmware describes the physical memory map and the
+ supported attributes for each region. At boot-time, the kernel uses
+ the EFI GetMemoryMap() interface. ACPI can also describe memory
+ devices and the attributes they support, but Linux/ia64 currently
+ doesn't use this information.
+
+ The kernel uses the efi_memmap table returned from GetMemoryMap() to
+ learn the attributes supported by each region of physical address
+ space. Unfortunately, this table does not completely describe the
+ address space because some machines omit some or all of the MMIO
+ regions from the map.
+
+ The kernel maintains another table, kern_memmap, which describes the
+ memory Linux is actually using and the attribute for each region.
+ This contains only system memory; it does not contain MMIO space.
+
+ The kern_memmap table typically contains only a subset of the system
+ memory described by the efi_memmap. Linux/ia64 can't use all memory
+ in the system because of constraints imposed by the identity mapping
+ scheme.
+
+ The efi_memmap table is preserved unmodified because the original
+ boot-time information is required for kexec.
+
+KERNEL IDENTITY MAPPINGS
+
+ Linux/ia64 identity mappings are done with large pages, currently
+ either 16MB or 64MB, referred to as "granules." Cacheable mappings
+ are speculative[2], so the processor can read any location in the
+ page at any time, independent of the programmer's intentions. This
+ means that to avoid attribute aliasing, Linux can create a cacheable
+ identity mapping only when the entire granule supports cacheable
+ access.
+
+ Therefore, kern_memmap contains only full granule-sized regions that
+ can referenced safely by an identity mapping.
+
+ Uncacheable mappings are not speculative, so the processor will
+ generate UC accesses only to locations explicitly referenced by
+ software. This allows UC identity mappings to cover granules that
+ are only partially populated, or populated with a combination of UC
+ and WB regions.
+
+USER MAPPINGS
+
+ User mappings are typically done with 16K or 64K pages. The smaller
+ page size allows more flexibility because only 16K or 64K has to be
+ homogeneous with respect to memory attributes.
+
+POTENTIAL ATTRIBUTE ALIASING CASES
+
+ There are several ways the kernel creates new mappings:
+
+ mmap of /dev/mem
+
+ This uses remap_pfn_range(), which creates user mappings. These
+ mappings may be either WB or UC. If the region being mapped
+ happens to be in kern_memmap, meaning that it may also be mapped
+ by a kernel identity mapping, the user mapping must use the same
+ attribute as the kernel mapping.
+
+ If the region is not in kern_memmap, the user mapping should use
+ an attribute reported as being supported in the EFI memory map.
+
+ Since the EFI memory map does not describe MMIO on some
+ machines, this should use an uncacheable mapping as a fallback.
+
+ mmap of /sys/class/pci_bus/.../legacy_mem
+
+ This is very similar to mmap of /dev/mem, except that legacy_mem
+ only allows mmap of the one megabyte "legacy MMIO" area for a
+ specific PCI bus. Typically this is the first megabyte of
+ physical address space, but it may be different on machines with
+ several VGA devices.
+
+ "X" uses this to access VGA frame buffers. Using legacy_mem
+ rather than /dev/mem allows multiple instances of X to talk to
+ different VGA cards.
+
+ The /dev/mem mmap constraints apply.
+
+ However, since this is for mapping legacy MMIO space, WB access
+ does not make sense. This matters on machines without legacy
+ VGA support: these machines may have WB memory for the entire
+ first megabyte (or even the entire first granule).
+
+ On these machines, we could mmap legacy_mem as WB, which would
+ be safe in terms of attribute aliasing, but X has no way of
+ knowing that it is accessing regular memory, not a frame buffer,
+ so the kernel should fail the mmap rather than doing it with WB.
+
+ read/write of /dev/mem
+
+ This uses copy_from_user(), which implicitly uses a kernel
+ identity mapping. This is obviously safe for things in
+ kern_memmap.
+
+ There may be corner cases of things that are not in kern_memmap,
+ but could be accessed this way. For example, registers in MMIO
+ space are not in kern_memmap, but could be accessed with a UC
+ mapping. This would not cause attribute aliasing. But
+ registers typically can be accessed only with four-byte or
+ eight-byte accesses, and the copy_from_user() path doesn't allow
+ any control over the access size, so this would be dangerous.
+
+ ioremap()
+
+ This returns a kernel identity mapping for use inside the
+ kernel.
+
+ If the region is in kern_memmap, we should use the attribute
+ specified there. Otherwise, if the EFI memory map reports that
+ the entire granule supports WB, we should use that (granules
+ that are partially reserved or occupied by firmware do not appear
+ in kern_memmap). Otherwise, we should use a UC mapping.
+
+PAST PROBLEM CASES
+
+ mmap of various MMIO regions from /dev/mem by "X" on Intel platforms
+
+ The EFI memory map may not report these MMIO regions.
+
+ These must be allowed so that X will work. This means that
+ when the EFI memory map is incomplete, every /dev/mem mmap must
+ succeed. It may create either WB or UC user mappings, depending
+ on whether the region is in kern_memmap or the EFI memory map.
+
+ mmap of 0x0-0xA0000 /dev/mem by "hwinfo" on HP sx1000 with VGA enabled
+
+ See https://bugzilla.novell.com/show_bug.cgi?id=140858.
+
+ The EFI memory map reports the following attributes:
+ 0x00000-0x9FFFF WB only
+ 0xA0000-0xBFFFF UC only (VGA frame buffer)
+ 0xC0000-0xFFFFF WB only
+
+ This mmap is done with user pages, not kernel identity mappings,
+ so it is safe to use WB mappings.
+
+ The kernel VGA driver may ioremap the VGA frame buffer at 0xA0000,
+ which will use a granule-sized UC mapping covering 0-0xFFFFF. This
+ granule covers some WB-only memory, but since UC is non-speculative,
+ the processor will never generate an uncacheable reference to the
+ WB-only areas unless the driver explicitly touches them.
+
+ mmap of 0x0-0xFFFFF legacy_mem by "X"
+
+ If the EFI memory map reports this entire range as WB, there
+ is no VGA MMIO hole, and the mmap should fail or be done with
+ a WB mapping.
+
+ There's no easy way for X to determine whether the 0xA0000-0xBFFFF
+ region is a frame buffer or just memory, so I think it's best to
+ just fail this mmap request rather than using a WB mapping. As
+ far as I know, there's no need to map legacy_mem with WB
+ mappings.
+
+ Otherwise, a UC mapping of the entire region is probably safe.
+ The VGA hole means the region will not be in kern_memmap. The
+ HP sx1000 chipset doesn't support UC access to the memory surrounding
+ the VGA hole, but X doesn't need that area anyway and should not
+ reference it.
+
+ mmap of 0xA0000-0xBFFFF legacy_mem by "X" on HP sx1000 with VGA disabled
+
+ The EFI memory map reports the following attributes:
+ 0x00000-0xFFFFF WB only (no VGA MMIO hole)
+
+ This is a special case of the previous case, and the mmap should
+ fail for the same reason as above.
+
+NOTES
+
+ [1] SDM rev 2.2, vol 2, sec 4.4.1.
+ [2] SDM rev 2.2, vol 2, sec 4.4.6.
diff --git a/Documentation/ioctl-number.txt b/Documentation/ioctl-number.txt
index 171a44ebd939..1543802ef53e 100644
--- a/Documentation/ioctl-number.txt
+++ b/Documentation/ioctl-number.txt
@@ -85,7 +85,9 @@ Code Seq# Include File Comments
<mailto:maassen@uni-freiburg.de>
'C' all linux/soundcard.h
'D' all asm-s390/dasd.h
+'E' all linux/input.h
'F' all linux/fb.h
+'H' all linux/hiddev.h
'I' all linux/isdn.h
'J' 00-1F drivers/scsi/gdth_ioctl.h
'K' all linux/kd.h
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index a9d3a1794b23..bca6f389da66 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -147,6 +147,9 @@ running once the system is up.
acpi_irq_isa= [HW,ACPI] If irq_balance, mark listed IRQs used by ISA
Format: <irq>,<irq>...
+ acpi_os_name= [HW,ACPI] Tell ACPI BIOS the name of the OS
+ Format: To spoof as Windows 98: ="Microsoft Windows"
+
acpi_osi= [HW,ACPI] empty param disables _OSI
acpi_serialize [HW,ACPI] force serialization of AML methods
diff --git a/Documentation/networking/tuntap.txt b/Documentation/networking/tuntap.txt
index 76750fb9151a..839cbb71388b 100644
--- a/Documentation/networking/tuntap.txt
+++ b/Documentation/networking/tuntap.txt
@@ -39,10 +39,13 @@ Copyright (C) 1999-2000 Maxim Krasnyansky <max_mk@yahoo.com>
mknod /dev/net/tun c 10 200
Set permissions:
- e.g. chmod 0700 /dev/net/tun
- if you want the device only accessible by root. Giving regular users the
- right to assign network devices is NOT a good idea. Users could assign
- bogus network interfaces to trick firewalls or administrators.
+ e.g. chmod 0666 /dev/net/tun
+ There's no harm in allowing the device to be accessible by non-root users,
+ since CAP_NET_ADMIN is required for creating network devices or for
+ connecting to network devices which aren't owned by the user in question.
+ If you want to create persistent devices and give ownership of them to
+ unprivileged users, then you need the /dev/net/tun device to be usable by
+ those users.
Driver module autoloading
diff --git a/Documentation/power/swsusp.txt b/Documentation/power/swsusp.txt
index 516c5019013b..823b2cf6e3dc 100644
--- a/Documentation/power/swsusp.txt
+++ b/Documentation/power/swsusp.txt
@@ -350,9 +350,34 @@ Q: How do I make suspend more verbose?
A: If you want to see any non-error kernel messages on the virtual
terminal the kernel switches to during suspend, you have to set the
-kernel console loglevel to at least 5, for example by doing
-
- echo 5 > /proc/sys/kernel/printk
+kernel console loglevel to at least 4 (KERN_WARNING), for example by
+doing
+
+ # save the old loglevel
+ read LOGLEVEL DUMMY < /proc/sys/kernel/printk
+ # set the loglevel so we see the progress bar.
+ # if the level is higher than needed, we leave it alone.
+ if [ $LOGLEVEL -lt 5 ]; then
+ echo 5 > /proc/sys/kernel/printk
+ fi
+
+ IMG_SZ=0
+ read IMG_SZ < /sys/power/image_size
+ echo -n disk > /sys/power/state
+ RET=$?
+ #
+ # the logic here is:
+ # if image_size > 0 (without kernel support, IMG_SZ will be zero),
+ # then try again with image_size set to zero.
+ if [ $RET -ne 0 -a $IMG_SZ -ne 0 ]; then # try again with minimal image size
+ echo 0 > /sys/power/image_size
+ echo -n disk > /sys/power/state
+ RET=$?
+ fi
+
+ # restore previous loglevel
+ echo $LOGLEVEL > /proc/sys/kernel/printk
+ exit $RET
Q: Is this true that if I have a mounted filesystem on a USB device and
I suspend to disk, I can lose data unless the filesystem has been mounted
@@ -380,3 +405,17 @@ safest thing is to unmount all filesystems on removable media (such USB,
Firewire, CompactFlash, MMC, external SATA, or even IDE hotplug bays)
before suspending; then remount them after resuming.
+Q: I upgraded the kernel from 2.6.15 to 2.6.16. Both kernels were
+compiled with the similar configuration files. Anyway I found that
+suspend to disk (and resume) is much slower on 2.6.16 compared to
+2.6.15. Any idea for why that might happen or how can I speed it up?
+
+A: This is because the size of the suspend image is now greater than
+for 2.6.15 (by saving more data we can get more responsive system
+after resume).
+
+There's the /sys/power/image_size knob that controls the size of the
+image. If you set it to 0 (eg. by echo 0 > /sys/power/image_size as
+root), the 2.6.15 behavior should be restored. If it is still too
+slow, take a look at suspend.sf.net -- userland suspend is faster and
+supports LZF compression to speed it up further.
diff --git a/Documentation/power/video.txt b/Documentation/power/video.txt
index 43a889f8f08d..d859faa3a463 100644
--- a/Documentation/power/video.txt
+++ b/Documentation/power/video.txt
@@ -90,6 +90,7 @@ Table of known working notebooks:
Model hack (or "how to do it")
------------------------------------------------------------------------------
Acer Aspire 1406LC ole's late BIOS init (7), turn off DRI
+Acer TM 230 s3_bios (2)
Acer TM 242FX vbetool (6)
Acer TM C110 video_post (8)
Acer TM C300 vga=normal (only suspend on console, not in X), vbetool (6) or video_post (8)
@@ -115,6 +116,7 @@ Dell D610 vga=normal and X (possibly vbestate (6) too, but not tested)
Dell Inspiron 4000 ??? (*)
Dell Inspiron 500m ??? (*)
Dell Inspiron 510m ???
+Dell Inspiron 5150 vbetool needed (6)
Dell Inspiron 600m ??? (*)
Dell Inspiron 8200 ??? (*)
Dell Inspiron 8500 ??? (*)
@@ -125,6 +127,7 @@ HP NX7000 ??? (*)
HP Pavilion ZD7000 vbetool post needed, need open-source nv driver for X
HP Omnibook XE3 athlon version none (1)
HP Omnibook XE3GC none (1), video is S3 Savage/IX-MV
+HP Omnibook XE3L-GF vbetool (6)
HP Omnibook 5150 none (1), (S1 also works OK)
IBM TP T20, model 2647-44G none (1), video is S3 Inc. 86C270-294 Savage/IX-MV, vesafb gets "interesting" but X work.
IBM TP A31 / Type 2652-M5G s3_mode (3) [works ok with BIOS 1.04 2002-08-23, but not at all with BIOS 1.11 2004-11-05 :-(]
@@ -157,6 +160,7 @@ Sony Vaio vgn-s260 X or boot-radeon can init it (5)
Sony Vaio vgn-S580BH vga=normal, but suspend from X. Console will be blank unless you return to X.
Sony Vaio vgn-FS115B s3_bios (2),s3_mode (4)
Toshiba Libretto L5 none (1)
+Toshiba Libretto 100CT/110CT vbetool (6)
Toshiba Portege 3020CT s3_mode (3)
Toshiba Satellite 4030CDT s3_mode (3) (S1 also works OK)
Toshiba Satellite 4080XCDT s3_mode (3) (S1 also works OK)
diff --git a/Documentation/sparse.txt b/Documentation/sparse.txt
index 3f1c5464b1c9..5a311c38dd1a 100644
--- a/Documentation/sparse.txt
+++ b/Documentation/sparse.txt
@@ -1,5 +1,6 @@
Copyright 2004 Linus Torvalds
Copyright 2004 Pavel Machek <pavel@suse.cz>
+Copyright 2006 Bob Copeland <me@bobcopeland.com>
Using sparse for typechecking
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -41,15 +42,8 @@ sure that bitwise types don't get mixed up (little-endian vs big-endian
vs cpu-endian vs whatever), and there the constant "0" really _is_
special.
-Use
-
- make C=[12] CF=-Wbitwise
-
-or you don't get any checking at all.
-
-
-Where to get sparse
-~~~~~~~~~~~~~~~~~~~
+Getting sparse
+~~~~~~~~~~~~~~
With git, you can just get it from
@@ -57,7 +51,7 @@ With git, you can just get it from
and DaveJ has tar-balls at
- http://www.codemonkey.org.uk/projects/git-snapshots/sparse/
+ http://www.codemonkey.org.uk/projects/git-snapshots/sparse/
Once you have it, just do
@@ -65,8 +59,20 @@ Once you have it, just do
make
make install
-as your regular user, and it will install sparse in your ~/bin directory.
-After that, doing a kernel make with "make C=1" will run sparse on all the
-C files that get recompiled, or with "make C=2" will run sparse on the
-files whether they need to be recompiled or not (ie the latter is fast way
-to check the whole tree if you have already built it).
+as a regular user, and it will install sparse in your ~/bin directory.
+
+Using sparse
+~~~~~~~~~~~~
+
+Do a kernel make with "make C=1" to run sparse on all the C files that get
+recompiled, or use "make C=2" to run sparse on the files whether they need to
+be recompiled or not. The latter is a fast way to check the whole tree if you
+have already built it.
+
+The optional make variable CF can be used to pass arguments to sparse. The
+build system passes -Wbitwise to sparse automatically. To perform endianness
+checks, you may define __CHECK_ENDIAN__:
+
+ make C=2 CF="-D__CHECK_ENDIAN__"
+
+These checks are disabled by default as they generate a host of warnings.
diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt
index a46c10fcddfc..2dc246af4885 100644
--- a/Documentation/sysctl/vm.txt
+++ b/Documentation/sysctl/vm.txt
@@ -29,6 +29,7 @@ Currently, these files are in /proc/sys/vm:
- drop-caches
- zone_reclaim_mode
- zone_reclaim_interval
+- panic_on_oom
==============================================================
@@ -178,3 +179,15 @@ Time is set in seconds and set by default to 30 seconds.
Reduce the interval if undesired off node allocations occur. However, too
frequent scans will have a negative impact onoff node allocation performance.
+=============================================================
+
+panic_on_oom
+
+This enables or disables panic on out-of-memory feature. If this is set to 1,
+the kernel panics when out-of-memory happens. If this is set to 0, the kernel
+will kill some rogue process, called oom_killer. Usually, oom_killer can kill
+rogue processes and system will survive. If you want to panic the system
+rather than killing rogue processes, set this to 1.
+
+The default value is 0.
+
diff --git a/Documentation/vm/page_migration b/Documentation/vm/page_migration
index 0dd4ef30c361..99f89aa10169 100644
--- a/Documentation/vm/page_migration
+++ b/Documentation/vm/page_migration
@@ -26,8 +26,13 @@ a process are located. See also the numa_maps manpage in the numactl package.
Manual migration is useful if for example the scheduler has relocated
a process to a processor on a distant node. A batch scheduler or an
administrator may detect the situation and move the pages of the process
-nearer to the new processor. At some point in the future we may have
-some mechanism in the scheduler that will automatically move the pages.
+nearer to the new processor. The kernel itself does only provide
+manual page migration support. Automatic page migration may be implemented
+through user space processes that move pages. A special function call
+"move_pages" allows the moving of individual pages within a process.
+A NUMA profiler may f.e. obtain a log showing frequent off node
+accesses and may use the result to move pages to more advantageous
+locations.
Larger installations usually partition the system using cpusets into
sections of nodes. Paul Jackson has equipped cpusets with the ability to
@@ -62,22 +67,14 @@ A. In kernel use of migrate_pages()
It also prevents the swapper or other scans to encounter
the page.
-2. Generate a list of newly allocates page. These pages will contain the
- contents of the pages from the first list after page migration is
- complete.
+2. We need to have a function of type new_page_t that can be
+ passed to migrate_pages(). This function should figure out
+ how to allocate the correct new page given the old page.
3. The migrate_pages() function is called which attempts
- to do the migration. It returns the moved pages in the
- list specified as the third parameter and the failed
- migrations in the fourth parameter. The first parameter
- will contain the pages that could still be retried.
-
-4. The leftover pages of various types are returned
- to the LRU using putback_to_lru_pages() or otherwise
- disposed of. The pages will still have the refcount as
- increased by isolate_lru_pages() if putback_to_lru_pages() is not
- used! The kernel may want to handle the various cases of failures in
- different ways.
+ to do the migration. It will call the function to allocate
+ the new page for each page that is considered for
+ moving.
B. How migrate_pages() works
----------------------------
@@ -93,83 +90,58 @@ Steps:
2. Insure that writeback is complete.
-3. Make sure that the page has assigned swap cache entry if
- it is an anonyous page. The swap cache reference is necessary
- to preserve the information contain in the page table maps while
- page migration occurs.
-
-4. Prep the new page that we want to move to. It is locked
+3. Prep the new page that we want to move to. It is locked
and set to not being uptodate so that all accesses to the new
page immediately lock while the move is in progress.
-5. All the page table references to the page are either dropped (file
- backed pages) or converted to swap references (anonymous pages).
- This should decrease the reference count.
+4. The new page is prepped with some settings from the old page so that
+ accesses to the new page will discover a page with the correct settings.
+
+5. All the page table references to the page are converted
+ to migration entries or dropped (nonlinear vmas).
+ This decrease the mapcount of a page. If the resulting
+ mapcount is not zero then we do not migrate the page.
+ All user space processes that attempt to access the page
+ will now wait on the page lock.
6. The radix tree lock is taken. This will cause all processes trying
- to reestablish a pte to block on the radix tree spinlock.
+ to access the page via the mapping to block on the radix tree spinlock.
7. The refcount of the page is examined and we back out if references remain
otherwise we know that we are the only one referencing this page.
8. The radix tree is checked and if it does not contain the pointer to this
- page then we back out because someone else modified the mapping first.
-
-9. The mapping is checked. If the mapping is gone then a truncate action may
- be in progress and we back out.
-
-10. The new page is prepped with some settings from the old page so that
- accesses to the new page will be discovered to have the correct settings.
+ page then we back out because someone else modified the radix tree.
-11. The radix tree is changed to point to the new page.
+9. The radix tree is changed to point to the new page.
-12. The reference count of the old page is dropped because the radix tree
- reference is gone.
+10. The reference count of the old page is dropped because the radix tree
+ reference is gone. A reference to the new page is established because
+ the new page is referenced to by the radix tree.
-13. The radix tree lock is dropped. With that lookups become possible again
- and other processes will move from spinning on the tree lock to sleeping on
- the locked new page.
+11. The radix tree lock is dropped. With that lookups in the mapping
+ become possible again. Processes will move from spinning on the tree_lock
+ to sleeping on the locked new page.
-14. The page contents are copied to the new page.
+12. The page contents are copied to the new page.
-15. The remaining page flags are copied to the new page.
+13. The remaining page flags are copied to the new page.
-16. The old page flags are cleared to indicate that the page does
- not use any information anymore.
+14. The old page flags are cleared to indicate that the page does
+ not provide any information anymore.
-17. Queued up writeback on the new page is triggered.
+15. Queued up writeback on the new page is triggered.
-18. If swap pte's were generated for the page then replace them with real
- ptes. This will reenable access for processes not blocked by the page lock.
+16. If migration entries were page then replace them with real ptes. Doing
+ so will enable access for user space processes not already waiting for
+ the page lock.
19. The page locks are dropped from the old and new page.
- Processes waiting on the page lock can continue.
+ Processes waiting on the page lock will redo their page faults
+ and will reach the new page.
20. The new page is moved to the LRU and can be scanned by the swapper
etc again.
-TODO list
----------
-
-- Page migration requires the use of swap handles to preserve the
- information of the anonymous page table entries. This means that swap
- space is reserved but never used. The maximum number of swap handles used
- is determined by CHUNK_SIZE (see mm/mempolicy.c) per ongoing migration.
- Reservation of pages could be avoided by having a special type of swap
- handle that does not require swap space and that would only track the page
- references. Something like that was proposed by Marcelo Tosatti in the
- past (search for migration cache on lkml or linux-mm@kvack.org).
-
-- Page migration unmaps ptes for file backed pages and requires page
- faults to reestablish these ptes. This could be optimized by somehow
- recording the references before migration and then reestablish them later.
- However, there are several locking challenges that have to be overcome
- before this is possible.
-
-- Page migration generates read ptes for anonymous pages. Dirty page
- faults are required to make the pages writable again. It may be possible
- to generate a pte marked dirty if it is known that the page is dirty and
- that this process has the only reference to that page.
-
-Christoph Lameter, March 8, 2006.
+Christoph Lameter, May 8, 2006.
diff --git a/MAINTAINERS b/MAINTAINERS
index f4cf0148c901..4dcd2f1f14d6 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -574,6 +574,12 @@ L: linuxppc-dev@ozlabs.org
W: http://www.penguinppc.org/ppc64/
S: Supported
+BROADCOM B44 10/100 ETHERNET DRIVER
+P: Gary Zambrano
+M: zambrano@broadcom.com
+L: netdev@vger.kernel.org
+S: Supported
+
BROADCOM BNX2 GIGABIT ETHERNET DRIVER
P: Michael Chan
M: mchan@broadcom.com
diff --git a/arch/alpha/kernel/alpha_ksyms.c b/arch/alpha/kernel/alpha_ksyms.c
index 2b245ad731ee..d3848c5b0d2b 100644
--- a/arch/alpha/kernel/alpha_ksyms.c
+++ b/arch/alpha/kernel/alpha_ksyms.c
@@ -53,10 +53,6 @@ extern void __divqu (void);
extern void __remqu (void);
EXPORT_SYMBOL(alpha_mv);
-EXPORT_SYMBOL(enable_irq);
-EXPORT_SYMBOL(disable_irq);
-EXPORT_SYMBOL(disable_irq_nosync);
-EXPORT_SYMBOL(probe_irq_mask);
EXPORT_SYMBOL(screen_info);
EXPORT_SYMBOL(perf_irq);
EXPORT_SYMBOL(callback_getenv);
@@ -68,19 +64,13 @@ EXPORT_SYMBOL(alpha_using_srm);
/* platform dependent support */
EXPORT_SYMBOL(strcat);
-EXPORT_SYMBOL(strcmp);
EXPORT_SYMBOL(strcpy);
EXPORT_SYMBOL(strlen);
-EXPORT_SYMBOL(strncmp);
EXPORT_SYMBOL(strncpy);
-EXPORT_SYMBOL(strnlen);
EXPORT_SYMBOL(strncat);
-EXPORT_SYMBOL(strstr);
EXPORT_SYMBOL(strchr);
EXPORT_SYMBOL(strrchr);
-EXPORT_SYMBOL(memcmp);
EXPORT_SYMBOL(memmove);
-EXPORT_SYMBOL(memscan);
EXPORT_SYMBOL(__memcpy);
EXPORT_SYMBOL(__memset);
EXPORT_SYMBOL(__memsetw);
@@ -122,11 +112,9 @@ EXPORT_SYMBOL(alpha_write_fp_reg_s);
/* In-kernel system calls. */
EXPORT_SYMBOL(kernel_thread);
-EXPORT_SYMBOL(sys_open);
EXPORT_SYMBOL(sys_dup);
EXPORT_SYMBOL(sys_exit);
EXPORT_SYMBOL(sys_write);
-EXPORT_SYMBOL(sys_read);
EXPORT_SYMBOL(sys_lseek);
EXPORT_SYMBOL(execve);
EXPORT_SYMBOL(sys_setsid);
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index 31afe3d91ac6..e15dcf4f3dcd 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -244,7 +244,7 @@ do_osf_statfs(struct dentry * dentry, struct osf_statfs __user *buffer,
unsigned long bufsiz)
{
struct kstatfs linux_stat;
- int error = vfs_statfs(dentry->d_inode->i_sb, &linux_stat);
+ int error = vfs_statfs(dentry, &linux_stat);
if (!error)
error = linux_to_osf_statfs(&linux_stat, buffer, bufsiz);
return error;
diff --git a/arch/alpha/kernel/signal.c b/arch/alpha/kernel/signal.c
index 2e45e8604e32..741da0945dc4 100644
--- a/arch/alpha/kernel/signal.c
+++ b/arch/alpha/kernel/signal.c
@@ -375,7 +375,7 @@ give_sigsegv:
static inline void __user *
get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size)
{
- if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! on_sig_stack(sp))
+ if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! sas_ss_flags(sp))
sp = current->sas_ss_sp + current->sas_ss_size;
return (void __user *)((sp - frame_size) & -32ul);
diff --git a/arch/arm/common/Makefile b/arch/arm/common/Makefile
index 847e3e6356c6..e1289a256ce5 100644
--- a/arch/arm/common/Makefile
+++ b/arch/arm/common/Makefile
@@ -16,3 +16,4 @@ obj-$(CONFIG_SHARP_PARAM) += sharpsl_param.o
obj-$(CONFIG_SHARPSL_PM) += sharpsl_pm.o
obj-$(CONFIG_SHARP_SCOOP) += scoop.o
obj-$(CONFIG_ARCH_IXP2000) += uengine.o
+obj-$(CONFIG_ARCH_IXP23XX) += uengine.o
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c
index 7971d0dc6892..5b7c26395b44 100644
--- a/arch/arm/common/dmabounce.c
+++ b/arch/arm/common/dmabounce.c
@@ -77,6 +77,8 @@ struct dmabounce_device_info {
#endif
struct dmabounce_pool small;
struct dmabounce_pool large;
+
+ rwlock_t lock;
};
static LIST_HEAD(dmabounce_devs);
@@ -116,6 +118,7 @@ alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr,
struct safe_buffer *buf;
struct dmabounce_pool *pool;
struct device *dev = device_info->dev;
+ unsigned long flags;
dev_dbg(dev, "%s(ptr=%p, size=%d, dir=%d)\n",
__func__, ptr, size, dir);
@@ -163,8 +166,12 @@ alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr,
print_alloc_stats(device_info);
#endif
+ write_lock_irqsave(&device_info->lock, flags);
+
list_add(&buf->node, &device_info->safe_buffers);
+ write_unlock_irqrestore(&device_info->lock, flags);
+
return buf;
}
@@ -172,22 +179,32 @@ alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr,
static inline struct safe_buffer *
find_safe_buffer(struct dmabounce_device_info *device_info, dma_addr_t safe_dma_addr)
{
- struct safe_buffer *b;
+ struct safe_buffer *b = NULL;
+ unsigned long flags;
+
+ read_lock_irqsave(&device_info->lock, flags);
list_for_each_entry(b, &device_info->safe_buffers, node)
if (b->safe_dma_addr == safe_dma_addr)
- return b;
+ break;
- return NULL;
+ read_unlock_irqrestore(&device_info->lock, flags);
+ return b;
}
static inline void
free_safe_buffer(struct dmabounce_device_info *device_info, struct safe_buffer *buf)
{
+ unsigned long flags;
+
dev_dbg(device_info->dev, "%s(buf=%p)\n", __func__, buf);
+ write_lock_irqsave(&device_info->lock, flags);
+
list_del(&buf->node);
+ write_unlock_irqrestore(&device_info->lock, flags);
+
if (buf->pool)
dma_pool_free(buf->pool->pool, buf->safe, buf->safe_dma_addr);
else
@@ -396,7 +413,6 @@ dma_addr_t
dma_map_single(struct device *dev, void *ptr, size_t size,
enum dma_data_direction dir)
{
- unsigned long flags;
dma_addr_t dma_addr;
dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
@@ -404,12 +420,8 @@ dma_map_single(struct device *dev, void *ptr, size_t size,
BUG_ON(dir == DMA_NONE);
- local_irq_save(flags);
-
dma_addr = map_single(dev, ptr, size, dir);
- local_irq_restore(flags);
-
return dma_addr;
}
@@ -424,25 +436,18 @@ void
dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
enum dma_data_direction dir)
{
- unsigned long flags;
-
dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
__func__, (void *) dma_addr, size, dir);
BUG_ON(dir == DMA_NONE);
- local_irq_save(flags);
-
unmap_single(dev, dma_addr, size, dir);
-
- local_irq_restore(flags);
}
int
dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir)
{
- unsigned long flags;
int i;
dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n",
@@ -450,8 +455,6 @@ dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
BUG_ON(dir == DMA_NONE);
- local_irq_save(flags);
-
for (i = 0; i < nents; i++, sg++) {
struct page *page = sg->page;
unsigned int offset = sg->offset;
@@ -462,8 +465,6 @@ dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
map_single(dev, ptr, length, dir);
}
- local_irq_restore(flags);
-
return nents;
}
@@ -471,7 +472,6 @@ void
dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir)
{
- unsigned long flags;
int i;
dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n",
@@ -479,55 +479,38 @@ dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
BUG_ON(dir == DMA_NONE);
- local_irq_save(flags);
-
for (i = 0; i < nents; i++, sg++) {
dma_addr_t dma_addr = sg->dma_address;
unsigned int length = sg->length;
unmap_single(dev, dma_addr, length, dir);
}
-
- local_irq_restore(flags);
}
void
dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr, size_t size,
enum dma_data_direction dir)
{
- unsigned long flags;
-
dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
__func__, (void *) dma_addr, size, dir);
- local_irq_save(flags);
-
sync_single(dev, dma_addr, size, dir);
-
- local_irq_restore(flags);
}
void
dma_sync_single_for_device(struct device *dev, dma_addr_t dma_addr, size_t size,
enum dma_data_direction dir)
{
- unsigned long flags;
-
dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
__func__, (void *) dma_addr, size, dir);
- local_irq_save(flags);
-
sync_single(dev, dma_addr, size, dir);
-
- local_irq_restore(flags);
}
void
dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir)
{
- unsigned long flags;
int i;
dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n",
@@ -535,23 +518,18 @@ dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents,
BUG_ON(dir == DMA_NONE);
- local_irq_save(flags);
-
for (i = 0; i < nents; i++, sg++) {
dma_addr_t dma_addr = sg->dma_address;
unsigned int length = sg->length;
sync_single(dev, dma_addr, length, dir);
}
-
- local_irq_restore(flags);
}
void
dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir)
{
- unsigned long flags;
int i;
dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n",
@@ -559,16 +537,12 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents,
BUG_ON(dir == DMA_NONE);
- local_irq_save(flags);
-
for (i = 0; i < nents; i++, sg++) {
dma_addr_t dma_addr = sg->dma_address;
unsigned int length = sg->length;
sync_single(dev, dma_addr, length, dir);
}
-
- local_irq_restore(flags);
}
static int
@@ -622,6 +596,7 @@ dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
device_info->dev = dev;
INIT_LIST_HEAD(&device_info->safe_buffers);
+ rwlock_init(&device_info->lock);
#ifdef STATS
device_info->total_allocs = 0;
diff --git a/arch/arm/common/uengine.c b/arch/arm/common/uengine.c
index a1310b71004e..dfca596a9a27 100644
--- a/arch/arm/common/uengine.c
+++ b/arch/arm/common/uengine.c
@@ -18,10 +18,26 @@
#include <linux/module.h>
#include <linux/string.h>
#include <asm/hardware.h>
-#include <asm/arch/ixp2000-regs.h>
+#include <asm/arch/hardware.h>
#include <asm/hardware/uengine.h>
#include <asm/io.h>
+#if defined(CONFIG_ARCH_IXP2000)
+#define IXP_UENGINE_CSR_VIRT_BASE IXP2000_UENGINE_CSR_VIRT_BASE
+#define IXP_PRODUCT_ID IXP2000_PRODUCT_ID
+#define IXP_MISC_CONTROL IXP2000_MISC_CONTROL
+#define IXP_RESET1 IXP2000_RESET1
+#else
+#if defined(CONFIG_ARCH_IXP23XX)
+#define IXP_UENGINE_CSR_VIRT_BASE IXP23XX_UENGINE_CSR_VIRT_BASE
+#define IXP_PRODUCT_ID IXP23XX_PRODUCT_ID
+#define IXP_MISC_CONTROL IXP23XX_MISC_CONTROL
+#define IXP_RESET1 IXP23XX_RESET1
+#else
+#error unknown platform
+#endif
+#endif
+
#define USTORE_ADDRESS 0x000
#define USTORE_DATA_LOWER 0x004
#define USTORE_DATA_UPPER 0x008
@@ -43,7 +59,7 @@ u32 ixp2000_uengine_mask;
static void *ixp2000_uengine_csr_area(int uengine)
{
- return ((void *)IXP2000_UENGINE_CSR_VIRT_BASE) + (uengine << 10);
+ return ((void *)IXP_UENGINE_CSR_VIRT_BASE) + (uengine << 10);
}
/*
@@ -91,8 +107,13 @@ EXPORT_SYMBOL(ixp2000_uengine_csr_write);
void ixp2000_uengine_reset(u32 uengine_mask)
{
- ixp2000_reg_wrb(IXP2000_RESET1, uengine_mask & ixp2000_uengine_mask);
- ixp2000_reg_wrb(IXP2000_RESET1, 0);
+ u32 value;
+
+ value = ixp2000_reg_read(IXP_RESET1) & ~ixp2000_uengine_mask;
+
+ uengine_mask &= ixp2000_uengine_mask;
+ ixp2000_reg_wrb(IXP_RESET1, value | uengine_mask);
+ ixp2000_reg_wrb(IXP_RESET1, value);
}
EXPORT_SYMBOL(ixp2000_uengine_reset);
@@ -235,11 +256,12 @@ static int check_ixp_type(struct ixp2000_uengine_code *c)
u32 product_id;
u32 rev;
- product_id = ixp2000_reg_read(IXP2000_PRODUCT_ID);
+ product_id = ixp2000_reg_read(IXP_PRODUCT_ID);
if (((product_id >> 16) & 0x1f) != 0)
return 0;
switch ((product_id >> 8) & 0xff) {
+#ifdef CONFIG_ARCH_IXP2000
case 0: /* IXP2800 */
if (!(c->cpu_model_bitmask & 4))
return 0;
@@ -254,6 +276,14 @@ static int check_ixp_type(struct ixp2000_uengine_code *c)
if (!(c->cpu_model_bitmask & 2))
return 0;
break;
+#endif
+
+#ifdef CONFIG_ARCH_IXP23XX
+ case 4: /* IXP23xx */
+ if (!(c->cpu_model_bitmask & 0x3f0))
+ return 0;
+ break;
+#endif
default:
return 0;
@@ -432,7 +462,8 @@ static int __init ixp2000_uengine_init(void)
/*
* Determine number of microengines present.
*/
- switch ((ixp2000_reg_read(IXP2000_PRODUCT_ID) >> 8) & 0x1fff) {
+ switch ((ixp2000_reg_read(IXP_PRODUCT_ID) >> 8) & 0x1fff) {
+#ifdef CONFIG_ARCH_IXP2000
case 0: /* IXP2800 */
case 1: /* IXP2850 */
ixp2000_uengine_mask = 0x00ff00ff;
@@ -441,10 +472,17 @@ static int __init ixp2000_uengine_init(void)
case 2: /* IXP2400 */
ixp2000_uengine_mask = 0x000f000f;
break;
+#endif
+
+#ifdef CONFIG_ARCH_IXP23XX
+ case 4: /* IXP23xx */
+ ixp2000_uengine_mask = (*IXP23XX_EXP_CFG_FUSE >> 8) & 0xf;
+ break;
+#endif
default:
printk(KERN_INFO "Detected unknown IXP2000 model (%.8x)\n",
- (unsigned int)ixp2000_reg_read(IXP2000_PRODUCT_ID));
+ (unsigned int)ixp2000_reg_read(IXP_PRODUCT_ID));
ixp2000_uengine_mask = 0x00000000;
break;
}
@@ -457,15 +495,15 @@ static int __init ixp2000_uengine_init(void)
/*
* Synchronise timestamp counters across all microengines.
*/
- value = ixp2000_reg_read(IXP2000_MISC_CONTROL);
- ixp2000_reg_wrb(IXP2000_MISC_CONTROL, value & ~0x80);
+ value = ixp2000_reg_read(IXP_MISC_CONTROL);
+ ixp2000_reg_wrb(IXP_MISC_CONTROL, value & ~0x80);
for (uengine = 0; uengine < 32; uengine++) {
if (ixp2000_uengine_mask & (1 << uengine)) {
ixp2000_uengine_csr_write(uengine, TIMESTAMP_LOW, 0);
ixp2000_uengine_csr_write(uengine, TIMESTAMP_HIGH, 0);
}
}
- ixp2000_reg_wrb(IXP2000_MISC_CONTROL, value | 0x80);
+ ixp2000_reg_wrb(IXP_MISC_CONTROL, value | 0x80);
return 0;
}
diff --git a/arch/arm/configs/lpd270_defconfig b/arch/arm/configs/lpd270_defconfig
new file mode 100644
index 000000000000..d08bbe59483a
--- /dev/null
+++ b/arch/arm/configs/lpd270_defconfig
@@ -0,0 +1,963 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.17-git2
+# Wed Jun 21 22:20:18 2006
+#
+CONFIG_ARM=y
+CONFIG_MMU=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_ARCH_MTD_XIP=y
+CONFIG_VECTORS_BASE=0xffff0000
+
+#
+# Code maturity level options
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+
+#
+# General setup
+#
+CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+# CONFIG_POSIX_MQUEUE is not set
+# CONFIG_BSD_PROCESS_ACCT is not set
+CONFIG_SYSCTL=y
+# CONFIG_AUDIT is not set
+# CONFIG_IKCONFIG is not set
+# CONFIG_RELAY is not set
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_UID16=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+# CONFIG_EMBEDDED is not set
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SHMEM=y
+CONFIG_SLAB=y
+# CONFIG_TINY_SHMEM is not set
+CONFIG_BASE_SMALL=0
+# CONFIG_SLOB is not set
+
+#
+# Loadable module support
+#
+CONFIG_MODULES=y
+# CONFIG_MODULE_UNLOAD is not set
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+# CONFIG_KMOD is not set
+
+#
+# Block layer
+#
+# CONFIG_BLK_DEV_IO_TRACE is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+CONFIG_DEFAULT_AS=y
+# CONFIG_DEFAULT_DEADLINE is not set
+# CONFIG_DEFAULT_CFQ is not set
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="anticipatory"
+
+#
+# System Type
+#
+# CONFIG_ARCH_AAEC2000 is not set
+# CONFIG_ARCH_INTEGRATOR is not set
+# CONFIG_ARCH_REALVIEW is not set
+# CONFIG_ARCH_VERSATILE is not set
+# CONFIG_ARCH_AT91RM9200 is not set
+# CONFIG_ARCH_CLPS7500 is not set
+# CONFIG_ARCH_CLPS711X is not set
+# CONFIG_ARCH_CO285 is not set
+# CONFIG_ARCH_EBSA110 is not set
+# CONFIG_ARCH_EP93XX is not set
+# CONFIG_ARCH_FOOTBRIDGE is not set
+# CONFIG_ARCH_NETX is not set
+# CONFIG_ARCH_H720X is not set
+# CONFIG_ARCH_IMX is not set
+# CONFIG_ARCH_IOP3XX is not set
+# CONFIG_ARCH_IXP4XX is not set
+# CONFIG_ARCH_IXP2000 is not set
+# CONFIG_ARCH_IXP23XX is not set
+# CONFIG_ARCH_L7200 is not set
+# CONFIG_ARCH_PNX4008 is not set
+CONFIG_ARCH_PXA=y
+# CONFIG_ARCH_RPC is not set
+# CONFIG_ARCH_SA1100 is not set
+# CONFIG_ARCH_S3C2410 is not set
+# CONFIG_ARCH_SHARK is not set
+# CONFIG_ARCH_LH7A40X is not set
+# CONFIG_ARCH_OMAP is not set
+
+#
+# Intel PXA2xx Implementations
+#
+# CONFIG_ARCH_LUBBOCK is not set
+CONFIG_MACH_LOGICPD_PXA270=y
+# CONFIG_MACH_MAINSTONE is not set
+# CONFIG_ARCH_PXA_IDP is not set
+# CONFIG_PXA_SHARPSL is not set
+CONFIG_PXA27x=y
+CONFIG_IWMMXT=y
+
+#
+# Processor Type
+#
+CONFIG_CPU_32=y
+CONFIG_CPU_XSCALE=y
+CONFIG_CPU_32v5=y
+CONFIG_CPU_ABRT_EV5T=y
+CONFIG_CPU_CACHE_VIVT=y
+CONFIG_CPU_TLB_V4WBI=y
+
+#
+# Processor Features
+#
+# CONFIG_ARM_THUMB is not set
+CONFIG_XSCALE_PMU=y
+
+#
+# Bus support
+#
+
+#
+# PCCARD (PCMCIA/CardBus) support
+#
+# CONFIG_PCCARD is not set
+
+#
+# Kernel Features
+#
+# CONFIG_PREEMPT is not set
+# CONFIG_NO_IDLE_HZ is not set
+CONFIG_HZ=100
+# CONFIG_AEABI is not set
+# CONFIG_ARCH_DISCONTIGMEM_ENABLE is not set
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+# CONFIG_SPARSEMEM_STATIC is not set
+CONFIG_SPLIT_PTLOCK_CPUS=4096
+CONFIG_ALIGNMENT_TRAP=y
+
+#
+# Boot options
+#
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_CMDLINE="root=/dev/nfs ip=bootp console=ttyS0,115200 mem=64M"
+# CONFIG_XIP_KERNEL is not set
+
+#
+# Floating point emulation
+#
+
+#
+# At least one emulation must be selected
+#
+CONFIG_FPE_NWFPE=y
+# CONFIG_FPE_NWFPE_XP is not set
+# CONFIG_FPE_FASTFPE is not set
+
+#
+# Userspace binary formats
+#
+CONFIG_BINFMT_ELF=y
+# CONFIG_BINFMT_AOUT is not set
+# CONFIG_BINFMT_MISC is not set
+# CONFIG_ARTHUR is not set
+
+#
+# Power management options
+#
+# CONFIG_PM is not set
+# CONFIG_PM_LEGACY is not set
+# CONFIG_PM_DEBUG is not set
+# CONFIG_APM is not set
+
+#
+# Networking
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+# CONFIG_NETDEBUG is not set
+# CONFIG_PACKET is not set
+CONFIG_UNIX=y
+CONFIG_XFRM=y
+# CONFIG_XFRM_USER is not set
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+CONFIG_IP_PNP=y
+# CONFIG_IP_PNP_DHCP is not set
+CONFIG_IP_PNP_BOOTP=y
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+# CONFIG_INET_TUNNEL is not set
+CONFIG_INET_XFRM_MODE_TRANSPORT=y
+CONFIG_INET_XFRM_MODE_TUNNEL=y
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_BIC=y
+# CONFIG_IPV6 is not set
+# CONFIG_INET6_XFRM_TUNNEL is not set
+# CONFIG_INET6_TUNNEL is not set
+# CONFIG_NETWORK_SECMARK is not set
+# CONFIG_NETFILTER is not set
+
+#
+# DCCP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_DCCP is not set
+
+#
+# SCTP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_SCTP is not set
+
+#
+# TIPC Configuration (EXPERIMENTAL)
+#
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_NET_DIVERT is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+
+#
+# QoS and/or fair queueing
+#
+# CONFIG_NET_SCHED is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_IEEE80211 is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+# CONFIG_FW_LOADER is not set
+# CONFIG_DEBUG_DRIVER is not set
+
+#
+# Connector - unified userspace <-> kernelspace linker
+#
+# CONFIG_CONNECTOR is not set
+
+#
+# Memory Technology Devices (MTD)
+#
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_CONCAT is not set
+CONFIG_MTD_PARTITIONS=y
+CONFIG_MTD_REDBOOT_PARTS=y
+CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-1
+# CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED is not set
+# CONFIG_MTD_REDBOOT_PARTS_READONLY is not set
+# CONFIG_MTD_CMDLINE_PARTS is not set
+# CONFIG_MTD_AFS_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+CONFIG_MTD_CFI=y
+# CONFIG_MTD_JEDECPROBE is not set
+CONFIG_MTD_GEN_PROBE=y
+CONFIG_MTD_CFI_ADV_OPTIONS=y
+CONFIG_MTD_CFI_NOSWAP=y
+# CONFIG_MTD_CFI_BE_BYTE_SWAP is not set
+# CONFIG_MTD_CFI_LE_BYTE_SWAP is not set
+CONFIG_MTD_CFI_GEOMETRY=y
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+# CONFIG_MTD_CFI_I1 is not set
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+# CONFIG_MTD_OTP is not set
+CONFIG_MTD_CFI_INTELEXT=y
+# CONFIG_MTD_CFI_AMDSTD is not set
+# CONFIG_MTD_CFI_STAA is not set
+CONFIG_MTD_CFI_UTIL=y
+# CONFIG_MTD_RAM is not set
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
+# CONFIG_MTD_OBSOLETE_CHIPS is not set
+# CONFIG_MTD_XIP is not set
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+# CONFIG_MTD_PHYSMAP is not set
+# CONFIG_MTD_ARM_INTEGRATOR is not set
+# CONFIG_MTD_SHARP_SL is not set
+# CONFIG_MTD_PLATRAM is not set
+
+#
+# Self-contained MTD device drivers
+#
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+
+#
+# NAND Flash Device Drivers
+#
+# CONFIG_MTD_NAND is not set
+
+#
+# OneNAND Flash Device Drivers
+#
+# CONFIG_MTD_ONENAND is not set
+
+#
+# Parallel port support
+#
+# CONFIG_PARPORT is not set
+
+#
+# Plug and Play support
+#
+
+#
+# Block devices
+#
+# CONFIG_BLK_DEV_COW_COMMON is not set
+# CONFIG_BLK_DEV_LOOP is not set
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_RAM is not set
+# CONFIG_BLK_DEV_INITRD is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+
+#
+# ATA/ATAPI/MFM/RLL support
+#
+CONFIG_IDE=y
+CONFIG_BLK_DEV_IDE=y
+
+#
+# Please see Documentation/ide.txt for help/info on IDE drives
+#
+# CONFIG_BLK_DEV_IDE_SATA is not set
+CONFIG_BLK_DEV_IDEDISK=y
+# CONFIG_IDEDISK_MULTI_MODE is not set
+# CONFIG_BLK_DEV_IDECD is not set
+# CONFIG_BLK_DEV_IDETAPE is not set
+# CONFIG_BLK_DEV_IDEFLOPPY is not set
+# CONFIG_IDE_TASK_IOCTL is not set
+
+#
+# IDE chipset support/bugfixes
+#
+# CONFIG_IDE_GENERIC is not set
+# CONFIG_IDE_ARM is not set
+# CONFIG_BLK_DEV_IDEDMA is not set
+# CONFIG_IDEDMA_AUTO is not set
+# CONFIG_BLK_DEV_HD is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+# CONFIG_SCSI is not set
+
+#
+# Multi-device support (RAID and LVM)
+#
+# CONFIG_MD is not set
+
+#
+# Fusion MPT device support
+#
+# CONFIG_FUSION is not set
+
+#
+# IEEE 1394 (FireWire) support
+#
+
+#
+# I2O device support
+#
+
+#
+# Network device support
+#
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+
+#
+# PHY device support
+#
+# CONFIG_PHYLIB is not set
+
+#
+# Ethernet (10 or 100Mbit)
+#
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=y
+CONFIG_SMC91X=y
+# CONFIG_DM9000 is not set
+# CONFIG_SMC911X is not set
+
+#
+# Ethernet (1000 Mbit)
+#
+
+#
+# Ethernet (10000 Mbit)
+#
+
+#
+# Token Ring devices
+#
+
+#
+# Wireless LAN (non-hamradio)
+#
+# CONFIG_NET_RADIO is not set
+
+#
+# Wan interfaces
+#
+# CONFIG_WAN is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_SHAPER is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+
+#
+# ISDN subsystem
+#
+# CONFIG_ISDN is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+CONFIG_INPUT_MOUSEDEV_PSAUX=y
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_TSDEV is not set
+CONFIG_INPUT_EVDEV=y
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+CONFIG_INPUT_KEYBOARD=y
+CONFIG_KEYBOARD_ATKBD=y
+# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_KEYBOARD_LKKBD is not set
+# CONFIG_KEYBOARD_XTKBD is not set
+# CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Hardware I/O ports
+#
+CONFIG_SERIO=y
+# CONFIG_SERIO_SERPORT is not set
+CONFIG_SERIO_LIBPS2=y
+# CONFIG_SERIO_RAW is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_SERIAL_PXA=y
+CONFIG_SERIAL_PXA_CONSOLE=y
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_UNIX98_PTYS=y
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
+
+#
+# IPMI
+#
+# CONFIG_IPMI_HANDLER is not set
+
+#
+# Watchdog Cards
+#
+# CONFIG_WATCHDOG is not set
+# CONFIG_NVRAM is not set
+# CONFIG_DTLK is not set
+# CONFIG_R3964 is not set
+
+#
+# Ftape, the floppy tape device driver
+#
+# CONFIG_RAW_DRIVER is not set
+
+#
+# TPM devices
+#
+# CONFIG_TCG_TPM is not set
+# CONFIG_TELCLOCK is not set
+
+#
+# I2C support
+#
+# CONFIG_I2C is not set
+
+#
+# SPI support
+#
+# CONFIG_SPI is not set
+# CONFIG_SPI_MASTER is not set
+
+#
+# Dallas's 1-wire bus
+#
+# CONFIG_W1 is not set
+
+#
+# Hardware Monitoring support
+#
+CONFIG_HWMON=y
+# CONFIG_HWMON_VID is not set
+# CONFIG_SENSORS_F71805F is not set
+# CONFIG_HWMON_DEBUG_CHIP is not set
+
+#
+# Misc devices
+#
+
+#
+# LED devices
+#
+# CONFIG_NEW_LEDS is not set
+
+#
+# LED drivers
+#
+
+#
+# LED Triggers
+#
+
+#
+# Multimedia devices
+#
+# CONFIG_VIDEO_DEV is not set
+CONFIG_VIDEO_V4L2=y
+
+#
+# Digital Video Broadcasting Devices
+#
+# CONFIG_DVB is not set
+
+#
+# Graphics support
+#
+CONFIG_FB=y
+CONFIG_FB_CFB_FILLRECT=y
+CONFIG_FB_CFB_COPYAREA=y
+CONFIG_FB_CFB_IMAGEBLIT=y
+# CONFIG_FB_MACMODES is not set
+CONFIG_FB_FIRMWARE_EDID=y
+# CONFIG_FB_MODE_HELPERS is not set
+# CONFIG_FB_TILEBLITTING is not set
+# CONFIG_FB_S1D13XXX is not set
+CONFIG_FB_PXA=y
+# CONFIG_FB_PXA_PARAMETERS is not set
+# CONFIG_FB_VIRTUAL is not set
+
+#
+# Console display driver support
+#
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_DUMMY_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
+# CONFIG_FONTS is not set
+CONFIG_FONT_8x8=y
+CONFIG_FONT_8x16=y
+
+#
+# Logo configuration
+#
+CONFIG_LOGO=y
+CONFIG_LOGO_LINUX_MONO=y
+CONFIG_LOGO_LINUX_VGA16=y
+CONFIG_LOGO_LINUX_CLUT224=y
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Sound
+#
+CONFIG_SOUND=y
+
+#
+# Advanced Linux Sound Architecture
+#
+CONFIG_SND=y
+CONFIG_SND_TIMER=y
+CONFIG_SND_PCM=y
+# CONFIG_SND_SEQUENCER is not set
+# CONFIG_SND_MIXER_OSS is not set
+# CONFIG_SND_PCM_OSS is not set
+# CONFIG_SND_DYNAMIC_MINORS is not set
+# CONFIG_SND_SUPPORT_OLD_API is not set
+CONFIG_SND_VERBOSE_PROCFS=y
+# CONFIG_SND_VERBOSE_PRINTK is not set
+# CONFIG_SND_DEBUG is not set
+
+#
+# Generic devices
+#
+CONFIG_SND_AC97_CODEC=y
+CONFIG_SND_AC97_BUS=y
+# CONFIG_SND_DUMMY is not set
+# CONFIG_SND_MTPAV is not set
+# CONFIG_SND_SERIAL_U16550 is not set
+# CONFIG_SND_MPU401 is not set
+
+#
+# ALSA ARM devices
+#
+CONFIG_SND_PXA2XX_PCM=y
+CONFIG_SND_PXA2XX_AC97=y
+
+#
+# Open Sound System
+#
+# CONFIG_SOUND_PRIME is not set
+
+#
+# USB support
+#
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB_ARCH_HAS_OHCI=y
+# CONFIG_USB_ARCH_HAS_EHCI is not set
+# CONFIG_USB is not set
+
+#
+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+#
+
+#
+# USB Gadget Support
+#
+# CONFIG_USB_GADGET is not set
+
+#
+# MMC/SD Card support
+#
+# CONFIG_MMC is not set
+
+#
+# Real Time Clock
+#
+CONFIG_RTC_LIB=y
+# CONFIG_RTC_CLASS is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT2_FS_XIP is not set
+# CONFIG_EXT3_FS is not set
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_OCFS2_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_ROMFS_FS is not set
+CONFIG_INOTIFY=y
+CONFIG_INOTIFY_USER=y
+# CONFIG_QUOTA is not set
+CONFIG_DNOTIFY=y
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=y
+CONFIG_MSDOS_FS=y
+# CONFIG_VFAT_FS is not set
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_SYSFS=y
+# CONFIG_TMPFS is not set
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_RAMFS=y
+# CONFIG_CONFIGFS_FS is not set
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_JFFS_FS is not set
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_FS_DEBUG=0
+CONFIG_JFFS2_FS_WRITEBUFFER=y
+# CONFIG_JFFS2_SUMMARY is not set
+# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
+CONFIG_JFFS2_ZLIB=y
+CONFIG_JFFS2_RTIME=y
+# CONFIG_JFFS2_RUBIN is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+
+#
+# Network File Systems
+#
+CONFIG_NFS_FS=y
+# CONFIG_NFS_V3 is not set
+# CONFIG_NFS_V4 is not set
+# CONFIG_NFS_DIRECTIO is not set
+# CONFIG_NFSD is not set
+CONFIG_ROOT_NFS=y
+CONFIG_LOCKD=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+# CONFIG_RPCSEC_GSS_KRB5 is not set
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+# CONFIG_9P_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+
+#
+# Native Language Support
+#
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+# CONFIG_NLS_CODEPAGE_437 is not set
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+# CONFIG_NLS_ASCII is not set
+CONFIG_NLS_ISO8859_1=y
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+# CONFIG_NLS_UTF8 is not set
+
+#
+# Profiling support
+#
+# CONFIG_PROFILING is not set
+
+#
+# Kernel hacking
+#
+# CONFIG_PRINTK_TIME is not set
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_DEBUG_KOBJECT is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+CONFIG_DEBUG_INFO=y
+# CONFIG_DEBUG_FS is not set
+# CONFIG_DEBUG_VM is not set
+CONFIG_FRAME_POINTER=y
+# CONFIG_UNWIND_INFO is not set
+CONFIG_FORCED_INLINING=y
+# CONFIG_RCU_TORTURE_TEST is not set
+CONFIG_DEBUG_USER=y
+# CONFIG_DEBUG_WAITQ is not set
+CONFIG_DEBUG_ERRORS=y
+CONFIG_DEBUG_LL=y
+# CONFIG_DEBUG_ICEDCC is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+
+#
+# Cryptographic options
+#
+# CONFIG_CRYPTO is not set
+
+#
+# Hardware crypto devices
+#
+
+#
+# Library routines
+#
+# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC16 is not set
+CONFIG_CRC32=y
+# CONFIG_LIBCRC32C is not set
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=y
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index ab8e600c18c8..86c92523a346 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -20,6 +20,7 @@
#include <asm/glue.h>
#include <asm/vfpmacros.h>
#include <asm/arch/entry-macro.S>
+#include <asm/thread_notify.h>
#include "entry-header.S"
@@ -560,10 +561,8 @@ ENTRY(__switch_to)
add ip, r1, #TI_CPU_SAVE
ldr r3, [r2, #TI_TP_VALUE]
stmia ip!, {r4 - sl, fp, sp, lr} @ Store most regs on stack
-#ifndef CONFIG_MMU
- add r2, r2, #TI_CPU_DOMAIN
-#else
- ldr r6, [r2, #TI_CPU_DOMAIN]!
+#ifdef CONFIG_MMU
+ ldr r6, [r2, #TI_CPU_DOMAIN]
#endif
#if __LINUX_ARM_ARCH__ >= 6
#ifdef CONFIG_CPU_32v6K
@@ -585,21 +584,20 @@ ENTRY(__switch_to)
#ifdef CONFIG_MMU
mcr p15, 0, r6, c3, c0, 0 @ Set domain register
#endif
-#ifdef CONFIG_VFP
- @ Always disable VFP so we can lazily save/restore the old
- @ state. This occurs in the context of the previous thread.
- VFPFMRX r4, FPEXC
- bic r4, r4, #FPEXC_ENABLE
- VFPFMXR FPEXC, r4
-#endif
#if defined(CONFIG_IWMMXT)
bl iwmmxt_task_switch
#elif defined(CONFIG_CPU_XSCALE)
- add r4, r2, #40 @ cpu_context_save->extra
+ add r4, r2, #TI_CPU_DOMAIN + 40 @ cpu_context_save->extra
ldmib r4, {r4, r5}
mar acc0, r4, r5
#endif
- ldmib r2, {r4 - sl, fp, sp, pc} @ Load all regs saved previously
+ mov r5, r0
+ add r4, r2, #TI_CPU_SAVE
+ ldr r0, =thread_notify_head
+ mov r1, #THREAD_NOTIFY_SWITCH
+ bl atomic_notifier_call_chain
+ mov r0, r5
+ ldmia r4, {r4 - sl, fp, sp, pc} @ Load all regs saved previously
__INIT
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index bcc19fbb32df..ec20f8935e8b 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -52,7 +52,7 @@
*/
#define MAX_IRQ_CNT 100000
-static int noirqdebug;
+static int noirqdebug __read_mostly;
static volatile unsigned long irq_err_count;
static DEFINE_SPINLOCK(irq_controller_lock);
static LIST_HEAD(irq_pending);
@@ -81,7 +81,7 @@ irqreturn_t no_action(int irq, void *dev_id, struct pt_regs *regs)
void do_bad_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
{
- irq_err_count += 1;
+ irq_err_count++;
printk(KERN_ERR "IRQ: spurious interrupt %d\n", irq);
}
diff --git a/arch/arm/kernel/iwmmxt.S b/arch/arm/kernel/iwmmxt.S
index 24c7b0477a09..af9e0ae952d5 100644
--- a/arch/arm/kernel/iwmmxt.S
+++ b/arch/arm/kernel/iwmmxt.S
@@ -285,7 +285,7 @@ ENTRY(iwmmxt_task_switch)
bne 1f @ yes: block them for next task
ldr r5, =concan_owner
- add r6, r2, #(TI_IWMMXT_STATE - TI_CPU_DOMAIN) @ get next task Concan save area
+ add r6, r2, #TI_IWMMXT_STATE @ get next task Concan save area
ldr r5, [r5] @ get current Concan owner
teq r5, r6 @ next task owns it?
movne pc, lr @ no: leave Concan disabled
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 17c38dbf2f3c..e1c77ee885a7 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -33,6 +33,7 @@
#include <asm/leds.h>
#include <asm/processor.h>
#include <asm/system.h>
+#include <asm/thread_notify.h>
#include <asm/uaccess.h>
#include <asm/mach/time.h>
@@ -338,13 +339,9 @@ void exit_thread(void)
{
}
-static void default_fp_init(union fp_state *fp)
-{
- memset(fp, 0, sizeof(union fp_state));
-}
+ATOMIC_NOTIFIER_HEAD(thread_notify_head);
-void (*fp_init)(union fp_state *) = default_fp_init;
-EXPORT_SYMBOL(fp_init);
+EXPORT_SYMBOL_GPL(thread_notify_head);
void flush_thread(void)
{
@@ -353,22 +350,21 @@ void flush_thread(void)
memset(thread->used_cp, 0, sizeof(thread->used_cp));
memset(&tsk->thread.debug, 0, sizeof(struct debug_info));
+ memset(&thread->fpstate, 0, sizeof(union fp_state));
+
+ thread_notify(THREAD_NOTIFY_FLUSH, thread);
#if defined(CONFIG_IWMMXT)
iwmmxt_task_release(thread);
#endif
- fp_init(&thread->fpstate);
-#if defined(CONFIG_VFP)
- vfp_flush_thread(&thread->vfpstate);
-#endif
}
void release_thread(struct task_struct *dead_task)
{
-#if defined(CONFIG_VFP)
- vfp_release_thread(&task_thread_info(dead_task)->vfpstate);
-#endif
+ struct thread_info *thread = task_thread_info(dead_task);
+
+ thread_notify(THREAD_NOTIFY_RELEASE, thread);
#if defined(CONFIG_IWMMXT)
- iwmmxt_task_release(task_thread_info(dead_task));
+ iwmmxt_task_release(thread);
#endif
}
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index a0cd0a90a10d..f094277485c8 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -665,17 +665,33 @@ static int do_signal(sigset_t *oldset, struct pt_regs *regs, int syscall)
if (syscall) {
if (regs->ARM_r0 == -ERESTART_RESTARTBLOCK) {
if (thumb_mode(regs)) {
- regs->ARM_r7 = __NR_restart_syscall;
+ regs->ARM_r7 = __NR_restart_syscall - __NR_SYSCALL_BASE;
regs->ARM_pc -= 2;
} else {
+#if defined(CONFIG_AEABI) && !defined(CONFIG_OABI_COMPAT)
+ regs->ARM_r7 = __NR_restart_syscall;
+ regs->ARM_pc -= 4;
+#else
u32 __user *usp;
+ u32 swival = __NR_restart_syscall;
regs->ARM_sp -= 12;
usp = (u32 __user *)regs->ARM_sp;
+ /*
+ * Either we supports OABI only, or we have
+ * EABI with the OABI compat layer enabled.
+ * In the later case we don't know if user
+ * space is EABI or not, and if not we must
+ * not clobber r7. Always using the OABI
+ * syscall solves that issue and works for
+ * all those cases.
+ */
+ swival = swival - __NR_SYSCALL_BASE + __NR_OABI_SYSCALL_BASE;
+
put_user(regs->ARM_pc, &usp[0]);
/* swi __NR_restart_syscall */
- put_user(0xef000000 | __NR_restart_syscall, &usp[1]);
+ put_user(0xef000000 | swival, &usp[1]);
/* ldr pc, [sp], #12 */
put_user(0xe49df00c, &usp[2]);
@@ -683,6 +699,7 @@ static int do_signal(sigset_t *oldset, struct pt_regs *regs, int syscall)
(unsigned long)(usp + 3));
regs->ARM_pc = regs->ARM_sp + 4;
+#endif
}
}
if (regs->ARM_r0 == -ERESTARTNOHAND ||
diff --git a/arch/arm/mach-ep93xx/core.c b/arch/arm/mach-ep93xx/core.c
index dcd417625389..bf6bd71bdd08 100644
--- a/arch/arm/mach-ep93xx/core.c
+++ b/arch/arm/mach-ep93xx/core.c
@@ -103,7 +103,8 @@ static int ep93xx_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
write_seqlock(&xtime_lock);
__raw_writel(1, EP93XX_TIMER1_CLEAR);
- while (__raw_readl(EP93XX_TIMER4_VALUE_LOW) - last_jiffy_time
+ while ((signed long)
+ (__raw_readl(EP93XX_TIMER4_VALUE_LOW) - last_jiffy_time)
>= TIMER4_TICKS_PER_JIFFY) {
last_jiffy_time += TIMER4_TICKS_PER_JIFFY;
timer_tick(regs);
@@ -124,7 +125,7 @@ static void __init ep93xx_timer_init(void)
{
/* Enable periodic HZ timer. */
__raw_writel(0x48, EP93XX_TIMER1_CONTROL);
- __raw_writel((508000 / HZ) - 1, EP93XX_TIMER1_LOAD);
+ __raw_writel((508469 / HZ) - 1, EP93XX_TIMER1_LOAD);
__raw_writel(0xc8, EP93XX_TIMER1_CONTROL);
/* Enable lost jiffy timer. */
diff --git a/arch/arm/mach-ep93xx/gesbc9312.c b/arch/arm/mach-ep93xx/gesbc9312.c
index d18fcb1a2f1b..47cc6c8b7c79 100644
--- a/arch/arm/mach-ep93xx/gesbc9312.c
+++ b/arch/arm/mach-ep93xx/gesbc9312.c
@@ -16,16 +16,38 @@
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
+#include <linux/ioport.h>
#include <linux/mtd/physmap.h>
+#include <linux/platform_device.h>
#include <asm/io.h>
#include <asm/hardware.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
+static struct physmap_flash_data gesbc9312_flash_data = {
+ .width = 4,
+};
+
+static struct resource gesbc9312_flash_resource = {
+ .start = 0x60000000,
+ .end = 0x60800000,
+ .flags = IORESOURCE_MEM,
+};
+
+static struct platform_device gesbc9312_flash = {
+ .name = "physmap-flash",
+ .id = 0,
+ .dev = {
+ .platform_data = &gesbc9312_flash_data,
+ },
+ .num_resources = 1,
+ .resource = &gesbc9312_flash_resource,
+};
+
static void __init gesbc9312_init_machine(void)
{
ep93xx_init_devices();
- physmap_configure(0x60000000, 0x00800000, 4, NULL);
+ platform_device_register(&gesbc9312_flash);
}
MACHINE_START(GESBC9312, "Glomation GESBC-9312-sx")
diff --git a/arch/arm/mach-ep93xx/ts72xx.c b/arch/arm/mach-ep93xx/ts72xx.c
index e24566b88a78..6e5a56cd5ae8 100644
--- a/arch/arm/mach-ep93xx/ts72xx.c
+++ b/arch/arm/mach-ep93xx/ts72xx.c
@@ -16,6 +16,7 @@
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
+#include <linux/ioport.h>
#include <linux/mtd/physmap.h>
#include <linux/platform_device.h>
#include <linux/m48t86.h>
@@ -111,6 +112,26 @@ static void __init ts72xx_map_io(void)
}
}
+static struct physmap_flash_data ts72xx_flash_data = {
+ .width = 1,
+};
+
+static struct resource ts72xx_flash_resource = {
+ .start = TS72XX_NOR_PHYS_BASE,
+ .end = TS72XX_NOR_PHYS_BASE + 0x01000000,
+ .flags = IORESOURCE_MEM,
+};
+
+static struct platform_device ts72xx_flash = {
+ .name = "physmap-flash",
+ .id = 0,
+ .dev = {
+ .platform_data = &ts72xx_flash_data,
+ },
+ .num_resources = 1,
+ .resource = &ts72xx_flash_resource,
+};
+
static unsigned char ts72xx_rtc_readbyte(unsigned long addr)
{
__raw_writeb(addr, TS72XX_RTC_INDEX_VIRT_BASE);
@@ -141,7 +162,7 @@ static void __init ts72xx_init_machine(void)
{
ep93xx_init_devices();
if (board_is_ts7200())
- physmap_configure(TS72XX_NOR_PHYS_BASE, 0x01000000, 1, NULL);
+ platform_device_register(&ts72xx_flash);
platform_device_register(&ts72xx_rtc_device);
}
diff --git a/arch/arm/mach-imx/dma.c b/arch/arm/mach-imx/dma.c
index 4ca51dcf13ac..36578871ecc8 100644
--- a/arch/arm/mach-imx/dma.c
+++ b/arch/arm/mach-imx/dma.c
@@ -15,6 +15,9 @@
* Changed to support scatter gather DMA
* by taking Russell's code from RiscPC
*
+ * 2006-05-31 Pavel Pisa <pisa@cmp.felk.cvut.cz>
+ * Corrected error handling code.
+ *
*/
#undef DEBUG
@@ -277,7 +280,7 @@ imx_dma_setup_sg(imx_dmach_t dma_ch,
int
imx_dma_setup_handlers(imx_dmach_t dma_ch,
void (*irq_handler) (int, void *, struct pt_regs *),
- void (*err_handler) (int, void *, struct pt_regs *),
+ void (*err_handler) (int, void *, struct pt_regs *, int),
void *data)
{
struct imx_dma_channel *imxdma = &imx_dma_channels[dma_ch];
@@ -463,43 +466,53 @@ static irqreturn_t dma_err_handler(int irq, void *dev_id, struct pt_regs *regs)
int i, disr = DISR;
struct imx_dma_channel *channel;
unsigned int err_mask = DBTOSR | DRTOSR | DSESR | DBOSR;
+ int errcode;
- DISR = disr;
+ DISR = disr & err_mask;
for (i = 0; i < IMX_DMA_CHANNELS; i++) {
- channel = &imx_dma_channels[i];
-
- if ((err_mask & 1 << i) && channel->name
- && channel->err_handler) {
- channel->err_handler(i, channel->data, regs);
+ if(!(err_mask & (1 << i)))
continue;
- }
-
- imx_dma_channels[i].sg = NULL;
+ channel = &imx_dma_channels[i];
+ errcode = 0;
if (DBTOSR & (1 << i)) {
- printk(KERN_WARNING
- "Burst timeout on channel %d (%s)\n",
- i, channel->name);
- DBTOSR |= (1 << i);
+ DBTOSR = (1 << i);
+ errcode |= IMX_DMA_ERR_BURST;
}
if (DRTOSR & (1 << i)) {
- printk(KERN_WARNING
- "Request timeout on channel %d (%s)\n",
- i, channel->name);
- DRTOSR |= (1 << i);
+ DRTOSR = (1 << i);
+ errcode |= IMX_DMA_ERR_REQUEST;
}
if (DSESR & (1 << i)) {
- printk(KERN_WARNING
- "Transfer timeout on channel %d (%s)\n",
- i, channel->name);
- DSESR |= (1 << i);
+ DSESR = (1 << i);
+ errcode |= IMX_DMA_ERR_TRANSFER;
}
if (DBOSR & (1 << i)) {
- printk(KERN_WARNING
- "Buffer overflow timeout on channel %d (%s)\n",
- i, channel->name);
- DBOSR |= (1 << i);
+ DBOSR = (1 << i);
+ errcode |= IMX_DMA_ERR_BUFFER;
}
+
+ /*
+ * The cleaning of @sg field would be questionable
+ * there, because its value can help to compute
+ * remaining/transfered bytes count in the handler
+ */
+ /*imx_dma_channels[i].sg = NULL;*/
+
+ if (channel->name && channel->err_handler) {
+ channel->err_handler(i, channel->data, regs, errcode);
+ continue;
+ }
+
+ imx_dma_channels[i].sg = NULL;
+
+ printk(KERN_WARNING
+ "DMA timeout on channel %d (%s) -%s%s%s%s\n",
+ i, channel->name,
+ errcode&IMX_DMA_ERR_BURST? " burst":"",
+ errcode&IMX_DMA_ERR_REQUEST? " request":"",
+ errcode&IMX_DMA_ERR_TRANSFER? " transfer":"",
+ errcode&IMX_DMA_ERR_BUFFER? " buffer":"");
}
return IRQ_HANDLED;
}
diff --git a/arch/arm/mach-ixp2000/core.c b/arch/arm/mach-ixp2000/core.c
index 6e8d504aca55..186f632035b8 100644
--- a/arch/arm/mach-ixp2000/core.c
+++ b/arch/arm/mach-ixp2000/core.c
@@ -211,7 +211,8 @@ static int ixp2000_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
/* clear timer 1 */
ixp2000_reg_wrb(IXP2000_T1_CLR, 1);
- while ((next_jiffy_time - *missing_jiffy_timer_csr) > ticks_per_jiffy) {
+ while ((signed long)(next_jiffy_time - *missing_jiffy_timer_csr)
+ >= ticks_per_jiffy) {
timer_tick(regs);
next_jiffy_time -= ticks_per_jiffy;
}
diff --git a/arch/arm/mach-ixp23xx/core.c b/arch/arm/mach-ixp23xx/core.c
index affd1d5d7440..051e3d70026e 100644
--- a/arch/arm/mach-ixp23xx/core.c
+++ b/arch/arm/mach-ixp23xx/core.c
@@ -334,7 +334,7 @@ void __init ixp23xx_init_irq(void)
/*************************************************************************
* Timer-tick functions for IXP23xx
*************************************************************************/
-#define CLOCK_TICKS_PER_USEC CLOCK_TICK_RATE / (USEC_PER_SEC)
+#define CLOCK_TICKS_PER_USEC (CLOCK_TICK_RATE / USEC_PER_SEC)
static unsigned long next_jiffy_time;
@@ -353,7 +353,7 @@ ixp23xx_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
/* Clear Pending Interrupt by writing '1' to it */
*IXP23XX_TIMER_STATUS = IXP23XX_TIMER1_INT_PEND;
- while ((*IXP23XX_TIMER_CONT - next_jiffy_time) > LATCH) {
+ while ((signed long)(*IXP23XX_TIMER_CONT - next_jiffy_time) >= LATCH) {
timer_tick(regs);
next_jiffy_time += LATCH;
}
@@ -439,5 +439,6 @@ static struct platform_device *ixp23xx_devices[] __initdata = {
void __init ixp23xx_sys_init(void)
{
+ *IXP23XX_EXP_UNIT_FUSE |= 0xf;
platform_add_devices(ixp23xx_devices, ARRAY_SIZE(ixp23xx_devices));
}
diff --git a/arch/arm/mach-ixp23xx/espresso.c b/arch/arm/mach-ixp23xx/espresso.c
index bf688c128630..dc5e489c70bc 100644
--- a/arch/arm/mach-ixp23xx/espresso.c
+++ b/arch/arm/mach-ixp23xx/espresso.c
@@ -53,9 +53,29 @@ static int __init espresso_pci_init(void)
};
subsys_initcall(espresso_pci_init);
+static struct physmap_flash_data espresso_flash_data = {
+ .width = 2,
+};
+
+static struct resource espresso_flash_resource = {
+ .start = 0x90000000,
+ .end = 0x92000000,
+ .flags = IORESOURCE_MEM,
+};
+
+static struct platform_device espresso_flash = {
+ .name = "physmap-flash",
+ .id = 0,
+ .dev = {
+ .platform_data = &espresso_flash_data,
+ },
+ .num_resources = 1,
+ .resource = &espresso_flash_resource,
+};
+
static void __init espresso_init(void)
{
- physmap_configure(0x90000000, 0x02000000, 2, NULL);
+ platform_device_register(&espresso_flash);
/*
* Mark flash as writeable.
diff --git a/arch/arm/mach-ixp23xx/ixdp2351.c b/arch/arm/mach-ixp23xx/ixdp2351.c
index 00146c35daac..535b334ee045 100644
--- a/arch/arm/mach-ixp23xx/ixdp2351.c
+++ b/arch/arm/mach-ixp23xx/ixdp2351.c
@@ -298,9 +298,29 @@ static void __init ixdp2351_map_io(void)
iotable_init(ixdp2351_io_desc, ARRAY_SIZE(ixdp2351_io_desc));
}
+static struct physmap_flash_data ixdp2351_flash_data = {
+ .width = 1,
+};
+
+static struct resource ixdp2351_flash_resource = {
+ .start = 0x90000000,
+ .end = 0x94000000,
+ .flags = IORESOURCE_MEM,
+};
+
+static struct platform_device ixdp2351_flash = {
+ .name = "physmap-flash",
+ .id = 0,
+ .dev = {
+ .platform_data = &ixdp2351_flash_data,
+ },
+ .num_resources = 1,
+ .resource = &ixdp2351_flash_resource,
+};
+
static void __init ixdp2351_init(void)
{
- physmap_configure(0x90000000, 0x04000000, 1, NULL);
+ platform_device_register(&ixdp2351_flash);
/*
* Mark flash as writeable
diff --git a/arch/arm/mach-ixp23xx/roadrunner.c b/arch/arm/mach-ixp23xx/roadrunner.c
index 43c14e740794..b9f5d13fcfe1 100644
--- a/arch/arm/mach-ixp23xx/roadrunner.c
+++ b/arch/arm/mach-ixp23xx/roadrunner.c
@@ -137,9 +137,29 @@ static int __init roadrunner_pci_init(void)
subsys_initcall(roadrunner_pci_init);
+static struct physmap_flash_data roadrunner_flash_data = {
+ .width = 2,
+};
+
+static struct resource roadrunner_flash_resource = {
+ .start = 0x90000000,
+ .end = 0x94000000,
+ .flags = IORESOURCE_MEM,
+};
+
+static struct platform_device roadrunner_flash = {
+ .name = "physmap-flash",
+ .id = 0,
+ .dev = {
+ .platform_data = &roadrunner_flash_data,
+ },
+ .num_resources = 1,
+ .resource = &roadrunner_flash_resource,
+};
+
static void __init roadrunner_init(void)
{
- physmap_configure(0x90000000, 0x04000000, 2, NULL);
+ platform_device_register(&roadrunner_flash);
/*
* Mark flash as writeable
diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c
index 00b761ff0f9c..bf25a76e9bdf 100644
--- a/arch/arm/mach-ixp4xx/common.c
+++ b/arch/arm/mach-ixp4xx/common.c
@@ -276,7 +276,7 @@ static irqreturn_t ixp4xx_timer_interrupt(int irq, void *dev_id, struct pt_regs
/*
* Catch up with the real idea of time
*/
- while ((*IXP4XX_OSTS - last_jiffy_time) > LATCH) {
+ while ((signed long)(*IXP4XX_OSTS - last_jiffy_time) >= LATCH) {
timer_tick(regs);
last_jiffy_time += LATCH;
}
diff --git a/arch/arm/mach-ixp4xx/nas100d-setup.c b/arch/arm/mach-ixp4xx/nas100d-setup.c
index a3b4c6ac5708..9a31444d9214 100644
--- a/arch/arm/mach-ixp4xx/nas100d-setup.c
+++ b/arch/arm/mach-ixp4xx/nas100d-setup.c
@@ -15,6 +15,7 @@
#include <linux/kernel.h>
#include <linux/serial.h>
#include <linux/serial_8250.h>
+#include <linux/leds.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
@@ -37,6 +38,36 @@ static struct platform_device nas100d_flash = {
.resource = &nas100d_flash_resource,
};
+#ifdef CONFIG_LEDS_IXP4XX
+static struct resource nas100d_led_resources[] = {
+ {
+ .name = "wlan", /* green led */
+ .start = 0,
+ .end = 0,
+ .flags = IXP4XX_GPIO_LOW,
+ },
+ {
+ .name = "ready", /* blue power led (off is flashing!) */
+ .start = 15,
+ .end = 15,
+ .flags = IXP4XX_GPIO_LOW,
+ },
+ {
+ .name = "disk", /* yellow led */
+ .start = 3,
+ .end = 3,
+ .flags = IXP4XX_GPIO_LOW,
+ },
+};
+
+static struct platform_device nas100d_leds = {
+ .name = "IXP4XX-GPIO-LED",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(nas100d_led_resources),
+ .resource = nas100d_led_resources,
+};
+#endif
+
static struct ixp4xx_i2c_pins nas100d_i2c_gpio_pins = {
.sda_pin = NAS100D_SDA_PIN,
.scl_pin = NAS100D_SCL_PIN,
@@ -95,7 +126,9 @@ static struct platform_device nas100d_uart = {
static struct platform_device *nas100d_devices[] __initdata = {
&nas100d_i2c_controller,
&nas100d_flash,
- &nas100d_uart,
+#ifdef CONFIG_LEDS_IXP4XX
+ &nas100d_leds,
+#endif
};
static void nas100d_power_off(void)
@@ -122,6 +155,12 @@ static void __init nas100d_init(void)
pm_power_off = nas100d_power_off;
+ /* This is only useful on a modified machine, but it is valuable
+ * to have it first in order to see debug messages, and so that
+ * it does *not* get removed if platform_add_devices fails!
+ */
+ (void)platform_device_register(&nas100d_uart);
+
platform_add_devices(nas100d_devices, ARRAY_SIZE(nas100d_devices));
}
diff --git a/arch/arm/mach-ixp4xx/nslu2-setup.c b/arch/arm/mach-ixp4xx/nslu2-setup.c
index 55411f21d838..749a337494d3 100644
--- a/arch/arm/mach-ixp4xx/nslu2-setup.c
+++ b/arch/arm/mach-ixp4xx/nslu2-setup.c
@@ -7,6 +7,7 @@
* Copyright (C) 2003-2004 MontaVista Software, Inc.
*
* Author: Mark Rakes <mrakes at mac.com>
+ * Author: Rod Whitby <rod@whitby.id.au>
* Maintainers: http://www.nslu2-linux.org/
*
* Fixed missing init_time in MACHINE_START kas11 10/22/04
@@ -16,6 +17,7 @@
#include <linux/kernel.h>
#include <linux/serial.h>
#include <linux/serial_8250.h>
+#include <linux/leds.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
@@ -43,6 +45,42 @@ static struct ixp4xx_i2c_pins nslu2_i2c_gpio_pins = {
.scl_pin = NSLU2_SCL_PIN,
};
+#ifdef CONFIG_LEDS_IXP4XX
+static struct resource nslu2_led_resources[] = {
+ {
+ .name = "ready", /* green led */
+ .start = NSLU2_LED_GRN,
+ .end = NSLU2_LED_GRN,
+ .flags = IXP4XX_GPIO_HIGH,
+ },
+ {
+ .name = "status", /* red led */
+ .start = NSLU2_LED_RED,
+ .end = NSLU2_LED_RED,
+ .flags = IXP4XX_GPIO_HIGH,
+ },
+ {
+ .name = "disk-1",
+ .start = NSLU2_LED_DISK1,
+ .end = NSLU2_LED_DISK1,
+ .flags = IXP4XX_GPIO_LOW,
+ },
+ {
+ .name = "disk-2",
+ .start = NSLU2_LED_DISK2,
+ .end = NSLU2_LED_DISK2,
+ .flags = IXP4XX_GPIO_LOW,
+ },
+};
+
+static struct platform_device nslu2_leds = {
+ .name = "IXP4XX-GPIO-LED",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(nslu2_led_resources),
+ .resource = nslu2_led_resources,
+};
+#endif
+
static struct platform_device nslu2_i2c_controller = {
.name = "IXP4XX-I2C",
.id = 0,
@@ -102,8 +140,10 @@ static struct platform_device nslu2_uart = {
static struct platform_device *nslu2_devices[] __initdata = {
&nslu2_i2c_controller,
&nslu2_flash,
- &nslu2_uart,
&nslu2_beeper,
+#ifdef CONFIG_LEDS_IXP4XX
+ &nslu2_leds,
+#endif
};
static void nslu2_power_off(void)
@@ -127,6 +167,12 @@ static void __init nslu2_init(void)
pm_power_off = nslu2_power_off;
+ /* This is only useful on a modified machine, but it is valuable
+ * to have it first in order to see debug messages, and so that
+ * it does *not* get removed if platform_add_devices fails!
+ */
+ (void)platform_device_register(&nslu2_uart);
+
platform_add_devices(nslu2_devices, ARRAY_SIZE(nslu2_devices));
}
diff --git a/arch/arm/mach-omap1/board-ams-delta.c b/arch/arm/mach-omap1/board-ams-delta.c
index 6178f046f128..73df32aac4c4 100644
--- a/arch/arm/mach-omap1/board-ams-delta.c
+++ b/arch/arm/mach-omap1/board-ams-delta.c
@@ -84,6 +84,15 @@ static struct omap_board_config_kernel ams_delta_config[] = {
{ OMAP_TAG_UART, &ams_delta_uart_config },
};
+static struct platform_device ams_delta_led_device = {
+ .name = "ams-delta-led",
+ .id = -1
+};
+
+static struct platform_device *ams_delta_devices[] __initdata = {
+ &ams_delta_led_device,
+};
+
static void __init ams_delta_init(void)
{
iotable_init(ams_delta_io_desc, ARRAY_SIZE(ams_delta_io_desc));
@@ -94,6 +103,8 @@ static void __init ams_delta_init(void)
/* Clear latch2 (NAND, LCD, modem enable) */
ams_delta_latch2_write(~0, 0);
+
+ platform_add_devices(ams_delta_devices, ARRAY_SIZE(ams_delta_devices));
}
static void __init ams_delta_map_io(void)
diff --git a/arch/arm/mach-pnx4008/clock.c b/arch/arm/mach-pnx4008/clock.c
index 285b22f631e9..f582ed2ec43c 100644
--- a/arch/arm/mach-pnx4008/clock.c
+++ b/arch/arm/mach-pnx4008/clock.c
@@ -767,6 +767,54 @@ static struct clk *onchip_clks[] = {
&uart6_ck,
};
+static int local_clk_enable(struct clk *clk)
+{
+ int ret = 0;
+
+ if (!(clk->flags & FIXED_RATE) && !clk->rate && clk->set_rate
+ && clk->user_rate)
+ ret = clk->set_rate(clk, clk->user_rate);
+ return ret;
+}
+
+static void local_clk_disable(struct clk *clk)
+{
+ if (!(clk->flags & FIXED_RATE) && clk->rate && clk->set_rate)
+ clk->set_rate(clk, 0);
+}
+
+static void local_clk_unuse(struct clk *clk)
+{
+ if (clk->usecount > 0 && !(--clk->usecount)) {
+ local_clk_disable(clk);
+ if (clk->parent)
+ local_clk_unuse(clk->parent);
+ }
+}
+
+static int local_clk_use(struct clk *clk)
+{
+ int ret = 0;
+ if (clk->usecount++ == 0) {
+ if (clk->parent)
+ ret = local_clk_use(clk->parent);
+
+ if (ret != 0) {
+ clk->usecount--;
+ goto out;
+ }
+
+ ret = local_clk_enable(clk);
+
+ if (ret != 0 && clk->parent) {
+ local_clk_unuse(clk->parent);
+ clk->usecount--;
+ }
+ }
+out:
+ return ret;
+}
+
static int local_set_rate(struct clk *clk, u32 rate)
{
int ret = -EINVAL;
@@ -847,28 +895,12 @@ unsigned long clk_get_rate(struct clk *clk)
}
EXPORT_SYMBOL(clk_get_rate);
-static int local_clk_enable(struct clk *clk)
-{
- int ret = 0;
-
- if (!(clk->flags & FIXED_RATE) && !clk->rate && clk->set_rate
- && clk->user_rate)
- ret = clk->set_rate(clk, clk->user_rate);
- return ret;
-}
-
-static void local_clk_disable(struct clk *clk)
-{
- if (!(clk->flags & FIXED_RATE) && clk->rate && clk->set_rate)
- clk->set_rate(clk, 0);
-}
-
int clk_enable(struct clk *clk)
{
int ret = 0;
clock_lock();
- ret = local_clk_enable(clk);
+ ret = local_clk_use(clk);
clock_unlock();
return ret;
}
@@ -878,70 +910,11 @@ EXPORT_SYMBOL(clk_enable);
void clk_disable(struct clk *clk)
{
clock_lock();
- local_clk_disable(clk);
- clock_unlock();
-}
-
-EXPORT_SYMBOL(clk_disable);
-
-static void local_clk_unuse(struct clk *clk)
-{
- if (clk->usecount > 0 && !(--clk->usecount)) {
- local_clk_disable(clk);
- if (clk->parent)
- local_clk_unuse(clk->parent);
- }
-}
-
-static int local_clk_use(struct clk *clk)
-{
- int ret = 0;
- if (clk->usecount++ == 0) {
- if (clk->parent)
- ret = local_clk_use(clk->parent);
-
- if (ret != 0) {
- clk->usecount--;
- goto out;
- }
-
- ret = local_clk_enable(clk);
-
- if (ret != 0 && clk->parent) {
- local_clk_unuse(clk->parent);
- clk->usecount--;
- }
- }
-out:
- return ret;
-}
-
-/* The main purpose of clk_use ans clk_unuse functions
- * is to control switching 13MHz oscillator and PLL1 (13'MHz),
- * so that they are disabled whenever none of PLL2-5 is using them.
- * Although in theory these functions should work with any clock,
- * please use them only on PLL2 - PLL5 to avoid confusion.
- */
-int clk_use(struct clk *clk)
-{
- int ret = 0;
-
- clock_lock();
- ret = local_clk_use(clk);
- clock_unlock();
- return ret;
-}
-EXPORT_SYMBOL(clk_use);
-
-void clk_unuse(struct clk *clk)
-{
-
- clock_lock();
local_clk_unuse(clk);
clock_unlock();
}
-EXPORT_SYMBOL(clk_unuse);
+EXPORT_SYMBOL(clk_disable);
long clk_round_rate(struct clk *clk, unsigned long rate)
{
@@ -995,7 +968,7 @@ static int __init clk_init(void)
__FUNCTION__, (*clkp)->name, (*clkp)->rate);
}
- clk_use(&ck_pll4);
+ local_clk_use(&ck_pll4);
/* if ck_13MHz is not used, disable it. */
if (ck_13MHz.usecount == 0)
diff --git a/arch/arm/mach-pnx4008/serial.c b/arch/arm/mach-pnx4008/serial.c
index 10322384e45d..95a1b3f964a2 100644
--- a/arch/arm/mach-pnx4008/serial.c
+++ b/arch/arm/mach-pnx4008/serial.c
@@ -20,7 +20,7 @@
#include <linux/serial_core.h>
#include <linux/serial_reg.h>
-#include <asm/arch/pm.h>
+#include <asm/arch/gpio.h>
#include <asm/arch/clock.h>
diff --git a/arch/arm/mach-s3c2410/Kconfig b/arch/arm/mach-s3c2410/Kconfig
index 0c334136db7c..7b786d725636 100644
--- a/arch/arm/mach-s3c2410/Kconfig
+++ b/arch/arm/mach-s3c2410/Kconfig
@@ -114,9 +114,15 @@ config MACH_NEXCODER_2440
endmenu
+config S3C2410_CLOCK
+ bool
+ help
+ Clock code for the S3C2410, and similar processors
+
config CPU_S3C2410
bool
depends on ARCH_S3C2410
+ select S3C2410_CLOCK
help
Support for S3C2410 and S3C2410A family from the S3C24XX line
of Samsung Mobile CPUs.
@@ -130,6 +136,7 @@ config CPU_S3C244X
config CPU_S3C2440
bool
depends on ARCH_S3C2410
+ select S3C2410_CLOCK
select CPU_S3C244X
help
Support for S3C2440 Samsung Mobile CPU based systems.
@@ -137,6 +144,7 @@ config CPU_S3C2440
config CPU_S3C2442
bool
depends on ARCH_S3C2420
+ select S3C2410_CLOCK
select CPU_S3C244X
help
Support for S3C2442 Samsung Mobile CPU based systems.
diff --git a/arch/arm/mach-s3c2410/Makefile b/arch/arm/mach-s3c2410/Makefile
index 5e09355cd4f4..372dbcea1434 100644
--- a/arch/arm/mach-s3c2410/Makefile
+++ b/arch/arm/mach-s3c2410/Makefile
@@ -29,6 +29,10 @@ obj-$(CONFIG_PM_SIMTEC) += pm-simtec.o
obj-$(CONFIG_CPU_S3C244X) += s3c244x.o
obj-$(CONFIG_CPU_S3C244X) += s3c244x-irq.o
+# Clock control
+
+obj-$(CONFIG_S3C2410_CLOCK) += s3c2410-clock.o
+
# S3C2440 support
obj-$(CONFIG_CPU_S3C2440) += s3c2440.o s3c2440-dsc.o
diff --git a/arch/arm/mach-s3c2410/clock.c b/arch/arm/mach-s3c2410/clock.c
index 99d174612b53..c5c93c333ac6 100644
--- a/arch/arm/mach-s3c2410/clock.c
+++ b/arch/arm/mach-s3c2410/clock.c
@@ -3,7 +3,7 @@
* Copyright (c) 2004-2005 Simtec Electronics
* Ben Dooks <ben@simtec.co.uk>
*
- * S3C2410 Clock control support
+ * S3C24XX Core clock control support
*
* Based on, and code from linux/arch/arm/mach-versatile/clock.c
**
@@ -56,25 +56,6 @@ static LIST_HEAD(clocks);
DEFINE_MUTEX(clocks_mutex);
-/* old functions */
-
-void inline s3c24xx_clk_enable(unsigned int clocks, unsigned int enable)
-{
- unsigned long clkcon;
-
- clkcon = __raw_readl(S3C2410_CLKCON);
-
- if (enable)
- clkcon |= clocks;
- else
- clkcon &= ~clocks;
-
- /* ensure none of the special function bits set */
- clkcon &= ~(S3C2410_CLKCON_IDLE|S3C2410_CLKCON_POWER | 3);
-
- __raw_writel(clkcon, S3C2410_CLKCON);
-}
-
/* enable and disable calls for use with the clk struct */
static int clk_null_enable(struct clk *clk, int enable)
@@ -82,12 +63,6 @@ static int clk_null_enable(struct clk *clk, int enable)
return 0;
}
-int s3c24xx_clkcon_enable(struct clk *clk, int enable)
-{
- s3c24xx_clk_enable(clk->ctrlbit, enable);
- return 0;
-}
-
/* Clock API calls */
struct clk *clk_get(struct device *dev, const char *id)
@@ -173,8 +148,11 @@ unsigned long clk_get_rate(struct clk *clk)
if (clk->rate != 0)
return clk->rate;
- while (clk->parent != NULL && clk->rate == 0)
- clk = clk->parent;
+ if (clk->get_rate != NULL)
+ return (clk->get_rate)(clk);
+
+ if (clk->parent != NULL)
+ return clk_get_rate(clk->parent);
return clk->rate;
}
@@ -233,28 +211,6 @@ EXPORT_SYMBOL(clk_set_rate);
EXPORT_SYMBOL(clk_get_parent);
EXPORT_SYMBOL(clk_set_parent);
-/* base clock enable */
-
-static int s3c24xx_upll_enable(struct clk *clk, int enable)
-{
- unsigned long clkslow = __raw_readl(S3C2410_CLKSLOW);
- unsigned long orig = clkslow;
-
- if (enable)
- clkslow &= ~S3C2410_CLKSLOW_UCLK_OFF;
- else
- clkslow |= S3C2410_CLKSLOW_UCLK_OFF;
-
- __raw_writel(clkslow, S3C2410_CLKSLOW);
-
- /* if we started the UPLL, then allow to settle */
-
- if (enable && (orig & S3C2410_CLKSLOW_UCLK_OFF))
- udelay(200);
-
- return 0;
-}
-
/* base clocks */
static struct clk clk_xtal = {
@@ -265,15 +221,14 @@ static struct clk clk_xtal = {
.ctrlbit = 0,
};
-static struct clk clk_upll = {
+struct clk clk_upll = {
.name = "upll",
.id = -1,
.parent = NULL,
- .enable = s3c24xx_upll_enable,
.ctrlbit = 0,
};
-static struct clk clk_f = {
+struct clk clk_f = {
.name = "fclk",
.id = -1,
.rate = 0,
@@ -281,7 +236,7 @@ static struct clk clk_f = {
.ctrlbit = 0,
};
-static struct clk clk_h = {
+struct clk clk_h = {
.name = "hclk",
.id = -1,
.rate = 0,
@@ -289,7 +244,7 @@ static struct clk clk_h = {
.ctrlbit = 0,
};
-static struct clk clk_p = {
+struct clk clk_p = {
.name = "pclk",
.id = -1,
.rate = 0,
@@ -426,108 +381,6 @@ struct clk s3c24xx_uclk = {
.id = -1,
};
-
-/* standard clock definitions */
-
-static struct clk init_clocks[] = {
- {
- .name = "nand",
- .id = -1,
- .parent = &clk_h,
- .enable = s3c24xx_clkcon_enable,
- .ctrlbit = S3C2410_CLKCON_NAND,
- }, {
- .name = "lcd",
- .id = -1,
- .parent = &clk_h,
- .enable = s3c24xx_clkcon_enable,
- .ctrlbit = S3C2410_CLKCON_LCDC,
- }, {
- .name = "usb-host",
- .id = -1,
- .parent = &clk_h,
- .enable = s3c24xx_clkcon_enable,
- .ctrlbit = S3C2410_CLKCON_USBH,
- }, {
- .name = "usb-device",
- .id = -1,
- .parent = &clk_h,
- .enable = s3c24xx_clkcon_enable,
- .ctrlbit = S3C2410_CLKCON_USBD,
- }, {
- .name = "timers",
- .id = -1,
- .parent = &clk_p,
- .enable = s3c24xx_clkcon_enable,
- .ctrlbit = S3C2410_CLKCON_PWMT,
- }, {
- .name = "sdi",
- .id = -1,
- .parent = &clk_p,
- .enable = s3c24xx_clkcon_enable,
- .ctrlbit = S3C2410_CLKCON_SDI,
- }, {
- .name = "uart",
- .id = 0,
- .parent = &clk_p,
- .enable = s3c24xx_clkcon_enable,
- .ctrlbit = S3C2410_CLKCON_UART0,
- }, {
- .name = "uart",
- .id = 1,
- .parent = &clk_p,
- .enable = s3c24xx_clkcon_enable,
- .ctrlbit = S3C2410_CLKCON_UART1,
- }, {
- .name = "uart",
- .id = 2,
- .parent = &clk_p,
- .enable = s3c24xx_clkcon_enable,
- .ctrlbit = S3C2410_CLKCON_UART2,
- }, {
- .name = "gpio",
- .id = -1,
- .parent = &clk_p,
- .enable = s3c24xx_clkcon_enable,
- .ctrlbit = S3C2410_CLKCON_GPIO,
- }, {
- .name = "rtc",
- .id = -1,
- .parent = &clk_p,
- .enable = s3c24xx_clkcon_enable,
- .ctrlbit = S3C2410_CLKCON_RTC,
- }, {
- .name = "adc",
- .id = -1,
- .parent = &clk_p,
- .enable = s3c24xx_clkcon_enable,
- .ctrlbit = S3C2410_CLKCON_ADC,
- }, {
- .name = "i2c",
- .id = -1,
- .parent = &clk_p,
- .enable = s3c24xx_clkcon_enable,
- .ctrlbit = S3C2410_CLKCON_IIC,
- }, {
- .name = "iis",
- .id = -1,
- .parent = &clk_p,
- .enable = s3c24xx_clkcon_enable,
- .ctrlbit = S3C2410_CLKCON_IIS,
- }, {
- .name = "spi",
- .id = -1,
- .parent = &clk_p,
- .enable = s3c24xx_clkcon_enable,
- .ctrlbit = S3C2410_CLKCON_SPI,
- }, {
- .name = "watchdog",
- .id = -1,
- .parent = &clk_p,
- .ctrlbit = 0,
- }
-};
-
/* initialise the clock system */
int s3c24xx_register_clock(struct clk *clk)
@@ -537,14 +390,6 @@ int s3c24xx_register_clock(struct clk *clk)
if (clk->enable == NULL)
clk->enable = clk_null_enable;
- /* if this is a standard clock, set the usage state */
-
- if (clk->ctrlbit && clk->enable == s3c24xx_clkcon_enable) {
- unsigned long clkcon = __raw_readl(S3C2410_CLKCON);
-
- clk->usage = (clkcon & clk->ctrlbit) ? 1 : 0;
- }
-
/* add to the list of available clocks */
mutex_lock(&clocks_mutex);
@@ -561,44 +406,17 @@ int __init s3c24xx_setup_clocks(unsigned long xtal,
unsigned long hclk,
unsigned long pclk)
{
- unsigned long upllcon = __raw_readl(S3C2410_UPLLCON);
- unsigned long clkslow = __raw_readl(S3C2410_CLKSLOW);
- struct clk *clkp = init_clocks;
- int ptr;
- int ret;
-
- printk(KERN_INFO "S3C2410 Clocks, (c) 2004 Simtec Electronics\n");
+ printk(KERN_INFO "S3C24XX Clocks, (c) 2004 Simtec Electronics\n");
/* initialise the main system clocks */
clk_xtal.rate = xtal;
- clk_upll.rate = s3c2410_get_pll(upllcon, xtal);
+ clk_upll.rate = s3c2410_get_pll(__raw_readl(S3C2410_UPLLCON), xtal);
clk_h.rate = hclk;
clk_p.rate = pclk;
clk_f.rate = fclk;
- /* We must be careful disabling the clocks we are not intending to
- * be using at boot time, as subsytems such as the LCD which do
- * their own DMA requests to the bus can cause the system to lockup
- * if they where in the middle of requesting bus access.
- *
- * Disabling the LCD clock if the LCD is active is very dangerous,
- * and therefore the bootloader should be careful to not enable
- * the LCD clock if it is not needed.
- */
-
- mutex_lock(&clocks_mutex);
-
- s3c24xx_clk_enable(S3C2410_CLKCON_NAND, 0);
- s3c24xx_clk_enable(S3C2410_CLKCON_USBH, 0);
- s3c24xx_clk_enable(S3C2410_CLKCON_USBD, 0);
- s3c24xx_clk_enable(S3C2410_CLKCON_ADC, 0);
- s3c24xx_clk_enable(S3C2410_CLKCON_IIC, 0);
- s3c24xx_clk_enable(S3C2410_CLKCON_SPI, 0);
-
- mutex_unlock(&clocks_mutex);
-
/* assume uart clocks are correctly setup */
/* register our clocks */
@@ -618,27 +436,5 @@ int __init s3c24xx_setup_clocks(unsigned long xtal,
if (s3c24xx_register_clock(&clk_p) < 0)
printk(KERN_ERR "failed to register cpu pclk\n");
-
- if (s3c24xx_register_clock(&clk_usb_bus) < 0)
- printk(KERN_ERR "failed to register usb bus clock\n");
-
- /* register clocks from clock array */
-
- for (ptr = 0; ptr < ARRAY_SIZE(init_clocks); ptr++, clkp++) {
- ret = s3c24xx_register_clock(clkp);
- if (ret < 0) {
- printk(KERN_ERR "Failed to register clock %s (%d)\n",
- clkp->name, ret);
- }
- }
-
- /* show the clock-slow value */
-
- printk("CLOCK: Slow mode (%ld.%ld MHz), %s, MPLL %s, UPLL %s\n",
- print_mhz(xtal / ( 2 * S3C2410_CLKSLOW_GET_SLOWVAL(clkslow))),
- (clkslow & S3C2410_CLKSLOW_SLOW) ? "slow" : "fast",
- (clkslow & S3C2410_CLKSLOW_MPLL_OFF) ? "off" : "on",
- (clkslow & S3C2410_CLKSLOW_UCLK_OFF) ? "off" : "on");
-
return 0;
}
diff --git a/arch/arm/mach-s3c2410/clock.h b/arch/arm/mach-s3c2410/clock.h
index 01bb458bf8eb..9456c81eb5d3 100644
--- a/arch/arm/mach-s3c2410/clock.h
+++ b/arch/arm/mach-s3c2410/clock.h
@@ -22,6 +22,7 @@ struct clk {
int (*enable)(struct clk *, int enable);
int (*set_rate)(struct clk *c, unsigned long rate);
+ unsigned long (*get_rate)(struct clk *c);
unsigned long (*round_rate)(struct clk *c, unsigned long rate);
int (*set_parent)(struct clk *c, struct clk *parent);
};
@@ -36,6 +37,13 @@ extern struct clk s3c24xx_uclk;
extern struct clk clk_usb_bus;
+/* core clock support */
+
+extern struct clk clk_f;
+extern struct clk clk_h;
+extern struct clk clk_p;
+extern struct clk clk_upll;
+
/* exports for arch/arm/mach-s3c2410
*
* Please DO NOT use these outside of arch/arm/mach-s3c2410
@@ -43,7 +51,8 @@ extern struct clk clk_usb_bus;
extern struct mutex clocks_mutex;
-extern int s3c24xx_clkcon_enable(struct clk *clk, int enable);
+extern int s3c2410_clkcon_enable(struct clk *clk, int enable);
+
extern int s3c24xx_register_clock(struct clk *clk);
extern int s3c24xx_setup_clocks(unsigned long xtal,
diff --git a/arch/arm/mach-s3c2410/cpu.h b/arch/arm/mach-s3c2410/cpu.h
index 40862899b2f1..21c62dc29bb2 100644
--- a/arch/arm/mach-s3c2410/cpu.h
+++ b/arch/arm/mach-s3c2410/cpu.h
@@ -73,5 +73,6 @@ extern struct sys_timer s3c24xx_timer;
/* system device classes */
+extern struct sysdev_class s3c2410_sysclass;
extern struct sysdev_class s3c2440_sysclass;
extern struct sysdev_class s3c2442_sysclass;
diff --git a/arch/arm/mach-s3c2410/s3c2410-clock.c b/arch/arm/mach-s3c2410/s3c2410-clock.c
new file mode 100644
index 000000000000..fd17c60e1132
--- /dev/null
+++ b/arch/arm/mach-s3c2410/s3c2410-clock.c
@@ -0,0 +1,263 @@
+/* linux/arch/arm/mach-s3c2410/clock.c
+ *
+ * Copyright (c) 2006 Simtec Electronics
+ * Ben Dooks <ben@simtec.co.uk>
+ *
+ * S3C2410,S3C2440,S3C2442 Clock control support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/sysdev.h>
+#include <linux/clk.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+
+#include <asm/hardware.h>
+#include <asm/io.h>
+
+#include <asm/arch/regs-clock.h>
+#include <asm/arch/regs-gpio.h>
+
+#include "clock.h"
+#include "cpu.h"
+
+int s3c2410_clkcon_enable(struct clk *clk, int enable)
+{
+ unsigned int clocks = clk->ctrlbit;
+ unsigned long clkcon;
+
+ clkcon = __raw_readl(S3C2410_CLKCON);
+
+ if (enable)
+ clkcon |= clocks;
+ else
+ clkcon &= ~clocks;
+
+ /* ensure none of the special function bits set */
+ clkcon &= ~(S3C2410_CLKCON_IDLE|S3C2410_CLKCON_POWER);
+
+ __raw_writel(clkcon, S3C2410_CLKCON);
+
+ return 0;
+}
+
+static int s3c2410_upll_enable(struct clk *clk, int enable)
+{
+ unsigned long clkslow = __raw_readl(S3C2410_CLKSLOW);
+ unsigned long orig = clkslow;
+
+ if (enable)
+ clkslow &= ~S3C2410_CLKSLOW_UCLK_OFF;
+ else
+ clkslow |= S3C2410_CLKSLOW_UCLK_OFF;
+
+ __raw_writel(clkslow, S3C2410_CLKSLOW);
+
+ /* if we started the UPLL, then allow to settle */
+
+ if (enable && (orig & S3C2410_CLKSLOW_UCLK_OFF))
+ udelay(200);
+
+ return 0;
+}
+
+/* standard clock definitions */
+
+static struct clk init_clocks_disable[] = {
+ {
+ .name = "nand",
+ .id = -1,
+ .parent = &clk_h,
+ .enable = s3c2410_clkcon_enable,
+ .ctrlbit = S3C2410_CLKCON_NAND,
+ }, {
+ .name = "sdi",
+ .id = -1,
+ .parent = &clk_p,
+ .enable = s3c2410_clkcon_enable,
+ .ctrlbit = S3C2410_CLKCON_SDI,
+ }, {
+ .name = "adc",
+ .id = -1,
+ .parent = &clk_p,
+ .enable = s3c2410_clkcon_enable,
+ .ctrlbit = S3C2410_CLKCON_ADC,
+ }, {
+ .name = "i2c",
+ .id = -1,
+ .parent = &clk_p,
+ .enable = s3c2410_clkcon_enable,
+ .ctrlbit = S3C2410_CLKCON_IIC,
+ }, {
+ .name = "iis",
+ .id = -1,
+ .parent = &clk_p,
+ .enable = s3c2410_clkcon_enable,
+ .ctrlbit = S3C2410_CLKCON_IIS,
+ }, {
+ .name = "spi",
+ .id = -1,
+ .parent = &clk_p,
+ .enable = s3c2410_clkcon_enable,
+ .ctrlbit = S3C2410_CLKCON_SPI,
+ }
+};
+
+static struct clk init_clocks[] = {
+ {
+ .name = "lcd",
+ .id = -1,
+ .parent = &clk_h,
+ .enable = s3c2410_clkcon_enable,
+ .ctrlbit = S3C2410_CLKCON_LCDC,
+ }, {
+ .name = "gpio",
+ .id = -1,
+ .parent = &clk_p,
+ .enable = s3c2410_clkcon_enable,
+ .ctrlbit = S3C2410_CLKCON_GPIO,
+ }, {
+ .name = "usb-host",
+ .id = -1,
+ .parent = &clk_h,
+ .enable = s3c2410_clkcon_enable,
+ .ctrlbit = S3C2410_CLKCON_USBH,
+ }, {
+ .name = "usb-device",
+ .id = -1,
+ .parent = &clk_h,
+ .enable = s3c2410_clkcon_enable,
+ .ctrlbit = S3C2410_CLKCON_USBD,
+ }, {
+ .name = "timers",
+ .id = -1,
+ .parent = &clk_p,
+ .enable = s3c2410_clkcon_enable,
+ .ctrlbit = S3C2410_CLKCON_PWMT,
+ }, {
+ .name = "uart",
+ .id = 0,
+ .parent = &clk_p,
+ .enable = s3c2410_clkcon_enable,
+ .ctrlbit = S3C2410_CLKCON_UART0,
+ }, {
+ .name = "uart",
+ .id = 1,
+ .parent = &clk_p,
+ .enable = s3c2410_clkcon_enable,
+ .ctrlbit = S3C2410_CLKCON_UART1,
+ }, {
+ .name = "uart",
+ .id = 2,
+ .parent = &clk_p,
+ .enable = s3c2410_clkcon_enable,
+ .ctrlbit = S3C2410_CLKCON_UART2,
+ }, {
+ .name = "rtc",
+ .id = -1,
+ .parent = &clk_p,
+ .enable = s3c2410_clkcon_enable,
+ .ctrlbit = S3C2410_CLKCON_RTC,
+ }, {
+ .name = "watchdog",
+ .id = -1,
+ .parent = &clk_p,
+ .ctrlbit = 0,
+ }
+};
+
+/* s3c2410_baseclk_add()
+ *
+ * Add all the clocks used by the s3c2410 or compatible CPUs
+ * such as the S3C2440 and S3C2442.
+ *
+ * We cannot use a system device as we are needed before any
+ * of the init-calls that initialise the devices are actually
+ * done.
+*/
+
+int __init s3c2410_baseclk_add(void)
+{
+ unsigned long clkslow = __raw_readl(S3C2410_CLKSLOW);
+ unsigned long clkcon = __raw_readl(S3C2410_CLKCON);
+ struct clk *clkp;
+ struct clk *xtal;
+ int ret;
+ int ptr;
+
+ clk_upll.enable = s3c2410_upll_enable;
+
+ if (s3c24xx_register_clock(&clk_usb_bus) < 0)
+ printk(KERN_ERR "failed to register usb bus clock\n");
+
+ /* register clocks from clock array */
+
+ clkp = init_clocks;
+ for (ptr = 0; ptr < ARRAY_SIZE(init_clocks); ptr++, clkp++) {
+ /* ensure that we note the clock state */
+
+ clkp->usage = clkcon & clkp->ctrlbit ? 1 : 0;
+
+ ret = s3c24xx_register_clock(clkp);
+ if (ret < 0) {
+ printk(KERN_ERR "Failed to register clock %s (%d)\n",
+ clkp->name, ret);
+ }
+ }
+
+ /* We must be careful disabling the clocks we are not intending to
+ * be using at boot time, as subsytems such as the LCD which do
+ * their own DMA requests to the bus can cause the system to lockup
+ * if they where in the middle of requesting bus access.
+ *
+ * Disabling the LCD clock if the LCD is active is very dangerous,
+ * and therefore the bootloader should be careful to not enable
+ * the LCD clock if it is not needed.
+ */
+
+ /* install (and disable) the clocks we do not need immediately */
+
+ clkp = init_clocks_disable;
+ for (ptr = 0; ptr < ARRAY_SIZE(init_clocks_disable); ptr++, clkp++) {
+
+ ret = s3c24xx_register_clock(clkp);
+ if (ret < 0) {
+ printk(KERN_ERR "Failed to register clock %s (%d)\n",
+ clkp->name, ret);
+ }
+
+ s3c2410_clkcon_enable(clkp, 0);
+ }
+
+ /* show the clock-slow value */
+
+ xtal = clk_get(NULL, "xtal");
+
+ printk("CLOCK: Slow mode (%ld.%ld MHz), %s, MPLL %s, UPLL %s\n",
+ print_mhz(clk_get_rate(xtal) /
+ ( 2 * S3C2410_CLKSLOW_GET_SLOWVAL(clkslow))),
+ (clkslow & S3C2410_CLKSLOW_SLOW) ? "slow" : "fast",
+ (clkslow & S3C2410_CLKSLOW_MPLL_OFF) ? "off" : "on",
+ (clkslow & S3C2410_CLKSLOW_UCLK_OFF) ? "off" : "on");
+
+ return 0;
+}
diff --git a/arch/arm/mach-s3c2410/s3c2410.c b/arch/arm/mach-s3c2410/s3c2410.c
index 0852e87a79c4..a110cff9cf6b 100644
--- a/arch/arm/mach-s3c2410/s3c2410.c
+++ b/arch/arm/mach-s3c2410/s3c2410.c
@@ -27,6 +27,7 @@
#include <linux/list.h>
#include <linux/timer.h>
#include <linux/init.h>
+#include <linux/sysdev.h>
#include <linux/platform_device.h>
#include <asm/mach/arch.h>
@@ -108,11 +109,33 @@ void __init s3c2410_init_clocks(int xtal)
*/
s3c24xx_setup_clocks(xtal, fclk, hclk, pclk);
+ s3c2410_baseclk_add();
}
+struct sysdev_class s3c2410_sysclass = {
+ set_kset_name("s3c2410-core"),
+};
+
+static struct sys_device s3c2410_sysdev = {
+ .cls = &s3c2410_sysclass,
+};
+
+/* need to register class before we actually register the device, and
+ * we also need to ensure that it has been initialised before any of the
+ * drivers even try to use it (even if not on an s3c2440 based system)
+ * as a driver which may support both 2410 and 2440 may try and use it.
+*/
+
+static int __init s3c2410_core_init(void)
+{
+ return sysdev_class_register(&s3c2410_sysclass);
+}
+
+core_initcall(s3c2410_core_init);
+
int __init s3c2410_init(void)
{
printk("S3C2410: Initialising architecture\n");
- return 0;
+ return sysdev_register(&s3c2410_sysdev);
}
diff --git a/arch/arm/mach-s3c2410/s3c2410.h b/arch/arm/mach-s3c2410/s3c2410.h
index 4d5312a48209..73f1a2474a61 100644
--- a/arch/arm/mach-s3c2410/s3c2410.h
+++ b/arch/arm/mach-s3c2410/s3c2410.h
@@ -29,6 +29,8 @@ extern void s3c2410_init_uarts(struct s3c2410_uartcfg *cfg, int no);
extern void s3c2410_init_clocks(int xtal);
+extern int s3c2410_baseclk_add(void);
+
#else
#define s3c2410_init_clocks NULL
#define s3c2410_init_uarts NULL
diff --git a/arch/arm/mach-s3c2410/s3c2440-clock.c b/arch/arm/mach-s3c2410/s3c2440-clock.c
index d7a30ed6c327..15796864d010 100644
--- a/arch/arm/mach-s3c2410/s3c2440-clock.c
+++ b/arch/arm/mach-s3c2410/s3c2440-clock.c
@@ -91,7 +91,7 @@ static int s3c2440_camif_upll_setrate(struct clk *clk, unsigned long rate)
static struct clk s3c2440_clk_cam = {
.name = "camif",
.id = -1,
- .enable = s3c24xx_clkcon_enable,
+ .enable = s3c2410_clkcon_enable,
.ctrlbit = S3C2440_CLKCON_CAMERA,
};
@@ -105,7 +105,7 @@ static struct clk s3c2440_clk_cam_upll = {
static struct clk s3c2440_clk_ac97 = {
.name = "ac97",
.id = -1,
- .enable = s3c24xx_clkcon_enable,
+ .enable = s3c2410_clkcon_enable,
.ctrlbit = S3C2440_CLKCON_CAMERA,
};
diff --git a/arch/arm/mach-s3c2410/s3c2442-clock.c b/arch/arm/mach-s3c2410/s3c2442-clock.c
index 5b7b301eb522..d9f54b5cab7f 100644
--- a/arch/arm/mach-s3c2410/s3c2442-clock.c
+++ b/arch/arm/mach-s3c2410/s3c2442-clock.c
@@ -102,7 +102,7 @@ static int s3c2442_camif_upll_setrate(struct clk *clk, unsigned long rate)
static struct clk s3c2442_clk_cam = {
.name = "camif",
.id = -1,
- .enable = s3c24xx_clkcon_enable,
+ .enable = s3c2410_clkcon_enable,
.ctrlbit = S3C2440_CLKCON_CAMERA,
};
diff --git a/arch/arm/mach-s3c2410/s3c244x.c b/arch/arm/mach-s3c2410/s3c244x.c
index 96852a7000db..838bc525e836 100644
--- a/arch/arm/mach-s3c2410/s3c244x.c
+++ b/arch/arm/mach-s3c2410/s3c244x.c
@@ -34,6 +34,7 @@
#include <asm/arch/regs-gpioj.h>
#include <asm/arch/regs-dsc.h>
+#include "s3c2410.h"
#include "s3c2440.h"
#include "s3c244x.h"
#include "clock.h"
@@ -118,6 +119,7 @@ void __init s3c244x_init_clocks(int xtal)
*/
s3c24xx_setup_clocks(xtal, fclk, hclk, pclk);
+ s3c2410_baseclk_add();
}
#ifdef CONFIG_PM
diff --git a/arch/arm/nwfpe/fpmodule.c b/arch/arm/nwfpe/fpmodule.c
index 2dfe1ac42ee8..7d977d23f026 100644
--- a/arch/arm/nwfpe/fpmodule.c
+++ b/arch/arm/nwfpe/fpmodule.c
@@ -33,7 +33,8 @@
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/init.h>
-/* XXX */
+
+#include <asm/thread_notify.h>
#include "softfloat.h"
#include "fpopcode.h"
@@ -56,16 +57,28 @@ void fp_send_sig(unsigned long sig, struct task_struct *p, int priv);
extern char fpe_type[];
#endif
+static int nwfpe_notify(struct notifier_block *self, unsigned long cmd, void *v)
+{
+ struct thread_info *thread = v;
+
+ if (cmd == THREAD_NOTIFY_FLUSH)
+ nwfpe_init_fpa(&thread->fpstate);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block nwfpe_notifier_block = {
+ .notifier_call = nwfpe_notify,
+};
+
/* kernel function prototypes required */
void fp_setup(void);
/* external declarations for saved kernel symbols */
extern void (*kern_fp_enter)(void);
-extern void (*fp_init)(union fp_state *);
/* Original value of fp_enter from kernel before patched by fpe_init. */
static void (*orig_fp_enter)(void);
-static void (*orig_fp_init)(union fp_state *);
/* forward declarations */
extern void nwfpe_enter(void);
@@ -88,20 +101,20 @@ static int __init fpe_init(void)
printk(KERN_WARNING "NetWinder Floating Point Emulator V0.97 ("
NWFPE_BITS " precision)\n");
+ thread_register_notifier(&nwfpe_notifier_block);
+
/* Save pointer to the old FP handler and then patch ourselves in */
orig_fp_enter = kern_fp_enter;
- orig_fp_init = fp_init;
kern_fp_enter = nwfpe_enter;
- fp_init = nwfpe_init_fpa;
return 0;
}
static void __exit fpe_exit(void)
{
+ thread_unregister_notifier(&nwfpe_notifier_block);
/* Restore the values we saved earlier. */
kern_fp_enter = orig_fp_enter;
- fp_init = orig_fp_init;
}
/*
diff --git a/arch/arm/plat-omap/timer32k.c b/arch/arm/plat-omap/timer32k.c
index b2a943bf11ef..3461a6c9665c 100644
--- a/arch/arm/plat-omap/timer32k.c
+++ b/arch/arm/plat-omap/timer32k.c
@@ -210,7 +210,8 @@ static irqreturn_t omap_32k_timer_interrupt(int irq, void *dev_id,
now = omap_32k_sync_timer_read();
- while (now - omap_32k_last_tick >= OMAP_32K_TICKS_PER_HZ) {
+ while ((signed long)(now - omap_32k_last_tick)
+ >= OMAP_32K_TICKS_PER_HZ) {
omap_32k_last_tick += OMAP_32K_TICKS_PER_HZ;
timer_tick(regs);
}
diff --git a/arch/arm/vfp/Makefile b/arch/arm/vfp/Makefile
index afabac31dd1d..7e136e77971a 100644
--- a/arch/arm/vfp/Makefile
+++ b/arch/arm/vfp/Makefile
@@ -7,6 +7,9 @@
# EXTRA_CFLAGS := -DDEBUG
# EXTRA_AFLAGS := -DDEBUG
+AFLAGS :=$(AFLAGS:-msoft-float=-Wa,-mfpu=softvfp+vfp)
+LDFLAGS +=--no-warn-mismatch
+
obj-y += vfp.o
-vfp-$(CONFIG_VFP) += entry.o vfpmodule.o vfphw.o vfpsingle.o vfpdouble.o
+vfp-$(CONFIG_VFP) += vfpmodule.o entry.o vfphw.o vfpsingle.o vfpdouble.o
diff --git a/arch/arm/vfp/vfphw.S b/arch/arm/vfp/vfphw.S
index a3f65b47aea9..eb683cd77163 100644
--- a/arch/arm/vfp/vfphw.S
+++ b/arch/arm/vfp/vfphw.S
@@ -192,7 +192,7 @@ vfp_get_double:
add pc, pc, r0, lsl #3
mov r0, r0
.irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
- mrrc p11, 1, r0, r1, c\dr @ fmrrd r0, r1, d\dr
+ fmrrd r0, r1, d\dr
mov pc, lr
.endr
@@ -206,6 +206,6 @@ vfp_put_double:
add pc, pc, r0, lsl #3
mov r0, r0
.irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
- mcrr p11, 1, r1, r2, c\dr @ fmdrr r1, r2, d\dr
+ fmdrr d\dr, r1, r2
mov pc, lr
.endr
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index 03486be04193..2476f4c2e760 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -15,6 +15,8 @@
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/init.h>
+
+#include <asm/thread_notify.h>
#include <asm/vfp.h>
#include "vfpinstr.h"
@@ -36,38 +38,55 @@ union vfp_state *last_VFP_context;
*/
unsigned int VFP_arch;
-/*
- * Per-thread VFP initialisation.
- */
-void vfp_flush_thread(union vfp_state *vfp)
+static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v)
{
- memset(vfp, 0, sizeof(union vfp_state));
+ struct thread_info *thread = v;
+ union vfp_state *vfp = &thread->vfpstate;
- vfp->hard.fpexc = FPEXC_ENABLE;
- vfp->hard.fpscr = FPSCR_ROUND_NEAREST;
+ switch (cmd) {
+ case THREAD_NOTIFY_FLUSH:
+ /*
+ * Per-thread VFP initialisation.
+ */
+ memset(vfp, 0, sizeof(union vfp_state));
- /*
- * Disable VFP to ensure we initialise it first.
- */
- fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_ENABLE);
+ vfp->hard.fpexc = FPEXC_ENABLE;
+ vfp->hard.fpscr = FPSCR_ROUND_NEAREST;
- /*
- * Ensure we don't try to overwrite our newly initialised
- * state information on the first fault.
- */
- if (last_VFP_context == vfp)
- last_VFP_context = NULL;
-}
+ /*
+ * Disable VFP to ensure we initialise it first.
+ */
+ fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_ENABLE);
-/*
- * Per-thread VFP cleanup.
- */
-void vfp_release_thread(union vfp_state *vfp)
-{
- if (last_VFP_context == vfp)
- last_VFP_context = NULL;
+ /*
+ * FALLTHROUGH: Ensure we don't try to overwrite our newly
+ * initialised state information on the first fault.
+ */
+
+ case THREAD_NOTIFY_RELEASE:
+ /*
+ * Per-thread VFP cleanup.
+ */
+ if (last_VFP_context == vfp)
+ last_VFP_context = NULL;
+ break;
+
+ case THREAD_NOTIFY_SWITCH:
+ /*
+ * Always disable VFP so we can lazily save/restore the
+ * old state.
+ */
+ fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_ENABLE);
+ break;
+ }
+
+ return NOTIFY_DONE;
}
+static struct notifier_block vfp_notifier_block = {
+ .notifier_call = vfp_notifier,
+};
+
/*
* Raise a SIGFPE for the current process.
* sicode describes the signal being raised.
@@ -281,6 +300,8 @@ static int __init vfp_init(void)
(vfpsid & FPSID_VARIANT_MASK) >> FPSID_VARIANT_BIT,
(vfpsid & FPSID_REV_MASK) >> FPSID_REV_BIT);
vfp_vector = vfp_support_entry;
+
+ thread_register_notifier(&vfp_notifier_block);
}
return 0;
}
diff --git a/arch/frv/kernel/entry.S b/arch/frv/kernel/entry.S
index a9b59527a741..81d94e41a189 100644
--- a/arch/frv/kernel/entry.S
+++ b/arch/frv/kernel/entry.S
@@ -1474,7 +1474,7 @@ sys_call_table:
.long sys_mknodat
.long sys_fchownat
.long sys_futimesat
- .long sys_newfstatat /* 300 */
+ .long sys_fstatat64 /* 300 */
.long sys_unlinkat
.long sys_renameat
.long sys_linkat
diff --git a/arch/frv/kernel/frv_ksyms.c b/arch/frv/kernel/frv_ksyms.c
index 0f273a7aca5a..dee637fffda5 100644
--- a/arch/frv/kernel/frv_ksyms.c
+++ b/arch/frv/kernel/frv_ksyms.c
@@ -26,16 +26,6 @@ extern long __memset_user(void *dst, const void *src, size_t count);
EXPORT_SYMBOL(__ioremap);
EXPORT_SYMBOL(iounmap);
-EXPORT_SYMBOL(strnlen);
-EXPORT_SYMBOL(strrchr);
-EXPORT_SYMBOL(strstr);
-EXPORT_SYMBOL(strchr);
-EXPORT_SYMBOL(strcat);
-EXPORT_SYMBOL(strlen);
-EXPORT_SYMBOL(strcmp);
-EXPORT_SYMBOL(strncmp);
-EXPORT_SYMBOL(strncpy);
-
EXPORT_SYMBOL(ip_fast_csum);
#if 0
@@ -44,8 +34,6 @@ EXPORT_SYMBOL(local_bh_count);
#endif
EXPORT_SYMBOL(kernel_thread);
-EXPORT_SYMBOL(enable_irq);
-EXPORT_SYMBOL(disable_irq);
EXPORT_SYMBOL(__res_bus_clock_speed_HZ);
EXPORT_SYMBOL(__page_offset);
EXPORT_SYMBOL(__memcpy_user);
@@ -62,18 +50,12 @@ EXPORT_SYMBOL(memory_end);
EXPORT_SYMBOL(__debug_bug_trap);
-/* Networking helper routines. */
-EXPORT_SYMBOL(csum_partial_copy);
-
/* The following are special because they're not called
explicitly (the C compiler generates them). Fortunately,
their interface isn't gonna change any time soon now, so
it's OK to leave it out of version control. */
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memset);
-EXPORT_SYMBOL(memcmp);
-EXPORT_SYMBOL(memscan);
-EXPORT_SYMBOL(memmove);
EXPORT_SYMBOL(__outsl_ns);
EXPORT_SYMBOL(__insl_ns);
diff --git a/arch/frv/kernel/irq-routing.c b/arch/frv/kernel/irq-routing.c
index d4776d1f4e82..b90b70a761d1 100644
--- a/arch/frv/kernel/irq-routing.c
+++ b/arch/frv/kernel/irq-routing.c
@@ -112,7 +112,7 @@ struct irq_source frv_cpuuart[2] = {
#define __CPUUART(X, A) \
[X] = { \
.muxname = "uart", \
- .muxdata = (volatile void __iomem *) A, \
+ .muxdata = (volatile void __iomem *)(unsigned long)A,\
.irqmask = 1 << IRQ_CPU_UART##X, \
.doirq = frv_cpuuart_doirq, \
}
@@ -136,7 +136,7 @@ struct irq_source frv_cpudma[8] = {
#define __CPUDMA(X, A) \
[X] = { \
.muxname = "dma", \
- .muxdata = (volatile void __iomem *) A, \
+ .muxdata = (volatile void __iomem *)(unsigned long)A,\
.irqmask = 1 << IRQ_CPU_DMA##X, \
.doirq = frv_cpudma_doirq, \
}
@@ -164,7 +164,7 @@ struct irq_source frv_cputimer[3] = {
#define __CPUTIMER(X) \
[X] = { \
.muxname = "timer", \
- .muxdata = 0, \
+ .muxdata = NULL, \
.irqmask = 1 << IRQ_CPU_TIMER##X, \
.doirq = frv_cputimer_doirq, \
}
@@ -187,7 +187,7 @@ struct irq_source frv_cpuexternal[8] = {
#define __CPUEXTERNAL(X) \
[X] = { \
.muxname = "ext", \
- .muxdata = 0, \
+ .muxdata = NULL, \
.irqmask = 1 << IRQ_CPU_EXTERNAL##X, \
.doirq = frv_cpuexternal_doirq, \
}
diff --git a/arch/frv/kernel/irq.c b/arch/frv/kernel/irq.c
index 11fa326a8f62..8b112b361914 100644
--- a/arch/frv/kernel/irq.c
+++ b/arch/frv/kernel/irq.c
@@ -625,7 +625,7 @@ static struct proc_dir_entry * irq_dir [NR_IRQS];
#define HEX_DIGITS 8
-static unsigned int parse_hex_value (const char *buffer,
+static unsigned int parse_hex_value (const char __user *buffer,
unsigned long count, unsigned long *ret)
{
unsigned char hexnum [HEX_DIGITS];
@@ -672,7 +672,7 @@ static int prof_cpu_mask_read_proc (char *page, char **start, off_t off,
return sprintf (page, "%08lx\n", *mask);
}
-static int prof_cpu_mask_write_proc (struct file *file, const char *buffer,
+static int prof_cpu_mask_write_proc (struct file *file, const char __user *buffer,
unsigned long count, void *data)
{
unsigned long *mask = (unsigned long *) data, full_count = count, err;
@@ -711,7 +711,7 @@ void init_irq_proc (void)
int i;
/* create /proc/irq */
- root_irq_dir = proc_mkdir("irq", 0);
+ root_irq_dir = proc_mkdir("irq", NULL);
/* create /proc/irq/prof_cpu_mask */
entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir);
diff --git a/arch/frv/kernel/pm.c b/arch/frv/kernel/pm.c
index f0b8fff3e733..43ce28a13a5d 100644
--- a/arch/frv/kernel/pm.c
+++ b/arch/frv/kernel/pm.c
@@ -137,7 +137,7 @@ unsigned long sleep_phys_sp(void *sp)
#define CTL_PM_P0 4
#define CTL_PM_CM 5
-static int user_atoi(char *ubuf, size_t len)
+static int user_atoi(char __user *ubuf, size_t len)
{
char buf[16];
unsigned long ret;
@@ -159,7 +159,7 @@ static int user_atoi(char *ubuf, size_t len)
* Send us to sleep.
*/
static int sysctl_pm_do_suspend(ctl_table *ctl, int write, struct file *filp,
- void *buffer, size_t *lenp, loff_t *fpos)
+ void __user *buffer, size_t *lenp, loff_t *fpos)
{
int retval, mode;
@@ -215,7 +215,7 @@ static int try_set_cmode(int new_cmode)
static int cmode_procctl(ctl_table *ctl, int write, struct file *filp,
- void *buffer, size_t *lenp, loff_t *fpos)
+ void __user *buffer, size_t *lenp, loff_t *fpos)
{
int new_cmode;
@@ -227,9 +227,9 @@ static int cmode_procctl(ctl_table *ctl, int write, struct file *filp,
return try_set_cmode(new_cmode)?:*lenp;
}
-static int cmode_sysctl(ctl_table *table, int *name, int nlen,
- void *oldval, size_t *oldlenp,
- void *newval, size_t newlen, void **context)
+static int cmode_sysctl(ctl_table *table, int __user *name, int nlen,
+ void __user *oldval, size_t __user *oldlenp,
+ void __user *newval, size_t newlen, void **context)
{
if (oldval && oldlenp) {
size_t oldlen;
@@ -240,7 +240,7 @@ static int cmode_sysctl(ctl_table *table, int *name, int nlen,
if (oldlen != sizeof(int))
return -EINVAL;
- if (put_user(clock_cmode_current, (unsigned int *)oldval) ||
+ if (put_user(clock_cmode_current, (unsigned __user *)oldval) ||
put_user(sizeof(int), oldlenp))
return -EFAULT;
}
@@ -250,7 +250,7 @@ static int cmode_sysctl(ctl_table *table, int *name, int nlen,
if (newlen != sizeof(int))
return -EINVAL;
- if (get_user(new_cmode, (int *)newval))
+ if (get_user(new_cmode, (int __user *)newval))
return -EFAULT;
return try_set_cmode(new_cmode)?:1;
@@ -318,7 +318,7 @@ static int try_set_cm(int new_cm)
}
static int p0_procctl(ctl_table *ctl, int write, struct file *filp,
- void *buffer, size_t *lenp, loff_t *fpos)
+ void __user *buffer, size_t *lenp, loff_t *fpos)
{
int new_p0;
@@ -330,9 +330,9 @@ static int p0_procctl(ctl_table *ctl, int write, struct file *filp,
return try_set_p0(new_p0)?:*lenp;
}
-static int p0_sysctl(ctl_table *table, int *name, int nlen,
- void *oldval, size_t *oldlenp,
- void *newval, size_t newlen, void **context)
+static int p0_sysctl(ctl_table *table, int __user *name, int nlen,
+ void __user *oldval, size_t __user *oldlenp,
+ void __user *newval, size_t newlen, void **context)
{
if (oldval && oldlenp) {
size_t oldlen;
@@ -343,7 +343,7 @@ static int p0_sysctl(ctl_table *table, int *name, int nlen,
if (oldlen != sizeof(int))
return -EINVAL;
- if (put_user(clock_p0_current, (unsigned int *)oldval) ||
+ if (put_user(clock_p0_current, (unsigned __user *)oldval) ||
put_user(sizeof(int), oldlenp))
return -EFAULT;
}
@@ -353,7 +353,7 @@ static int p0_sysctl(ctl_table *table, int *name, int nlen,
if (newlen != sizeof(int))
return -EINVAL;
- if (get_user(new_p0, (int *)newval))
+ if (get_user(new_p0, (int __user *)newval))
return -EFAULT;
return try_set_p0(new_p0)?:1;
@@ -362,7 +362,7 @@ static int p0_sysctl(ctl_table *table, int *name, int nlen,
}
static int cm_procctl(ctl_table *ctl, int write, struct file *filp,
- void *buffer, size_t *lenp, loff_t *fpos)
+ void __user *buffer, size_t *lenp, loff_t *fpos)
{
int new_cm;
@@ -374,9 +374,9 @@ static int cm_procctl(ctl_table *ctl, int write, struct file *filp,
return try_set_cm(new_cm)?:*lenp;
}
-static int cm_sysctl(ctl_table *table, int *name, int nlen,
- void *oldval, size_t *oldlenp,
- void *newval, size_t newlen, void **context)
+static int cm_sysctl(ctl_table *table, int __user *name, int nlen,
+ void __user *oldval, size_t __user *oldlenp,
+ void __user *newval, size_t newlen, void **context)
{
if (oldval && oldlenp) {
size_t oldlen;
@@ -387,7 +387,7 @@ static int cm_sysctl(ctl_table *table, int *name, int nlen,
if (oldlen != sizeof(int))
return -EINVAL;
- if (put_user(clock_cm_current, (unsigned int *)oldval) ||
+ if (put_user(clock_cm_current, (unsigned __user *)oldval) ||
put_user(sizeof(int), oldlenp))
return -EFAULT;
}
@@ -397,7 +397,7 @@ static int cm_sysctl(ctl_table *table, int *name, int nlen,
if (newlen != sizeof(int))
return -EINVAL;
- if (get_user(new_cm, (int *)newval))
+ if (get_user(new_cm, (int __user *)newval))
return -EFAULT;
return try_set_cm(new_cm)?:1;
diff --git a/arch/frv/kernel/process.c b/arch/frv/kernel/process.c
index 0fff8a61ef2a..489e6c489cbe 100644
--- a/arch/frv/kernel/process.c
+++ b/arch/frv/kernel/process.c
@@ -246,7 +246,7 @@ int copy_thread(int nr, unsigned long clone_flags,
/*
* sys_execve() executes a new program.
*/
-asmlinkage int sys_execve(char *name, char **argv, char **envp)
+asmlinkage int sys_execve(char __user *name, char __user * __user *argv, char __user * __user *envp)
{
int error;
char * filename;
diff --git a/arch/frv/kernel/setup.c b/arch/frv/kernel/setup.c
index 5908deae9607..1f7d65f29e78 100644
--- a/arch/frv/kernel/setup.c
+++ b/arch/frv/kernel/setup.c
@@ -814,7 +814,7 @@ void __init setup_arch(char **cmdline_p)
* - by now the stack is part of the init task */
printk("Memory %08lx-%08lx\n", memory_start, memory_end);
- if (memory_start == memory_end) BUG();
+ BUG_ON(memory_start == memory_end);
init_mm.start_code = (unsigned long) &_stext;
init_mm.end_code = (unsigned long) &_etext;
diff --git a/arch/frv/kernel/signal.c b/arch/frv/kernel/signal.c
index 679c1d5cc958..b8a5882b8625 100644
--- a/arch/frv/kernel/signal.c
+++ b/arch/frv/kernel/signal.c
@@ -98,7 +98,7 @@ int sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss)
struct sigframe
{
- void (*pretcode)(void);
+ __sigrestore_t pretcode;
int sig;
struct sigcontext sc;
unsigned long extramask[_NSIG_WORDS-1];
@@ -107,10 +107,10 @@ struct sigframe
struct rt_sigframe
{
- void (*pretcode)(void);
+ __sigrestore_t pretcode;
int sig;
- struct siginfo *pinfo;
- void *puc;
+ struct siginfo __user *pinfo;
+ void __user *puc;
struct siginfo info;
struct ucontext uc;
uint32_t retcode[2];
@@ -233,7 +233,7 @@ static inline void __user *get_sigframe(struct k_sigaction *ka,
/* This is the X/Open sanctioned signal stack switching. */
if (ka->sa.sa_flags & SA_ONSTACK) {
- if (! on_sig_stack(sp))
+ if (! sas_ss_flags(sp))
sp = current->sas_ss_sp + current->sas_ss_size;
}
@@ -284,7 +284,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set)
* setlos #__NR_sigreturn,gr7
* tira gr0,0
*/
- if (__put_user((void (*)(void))frame->retcode, &frame->pretcode) ||
+ if (__put_user((__sigrestore_t)frame->retcode, &frame->pretcode) ||
__put_user(0x8efc0000|__NR_sigreturn, &frame->retcode[0]) ||
__put_user(0xc0700000, &frame->retcode[1]))
goto give_sigsegv;
@@ -300,7 +300,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set)
if (get_personality & FDPIC_FUNCPTRS) {
struct fdpic_func_descriptor __user *funcptr =
- (struct fdpic_func_descriptor *) ka->sa.sa_handler;
+ (struct fdpic_func_descriptor __user *) ka->sa.sa_handler;
__get_user(__frame->pc, &funcptr->text);
__get_user(__frame->gr15, &funcptr->GOT);
} else {
@@ -359,8 +359,8 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
/* Create the ucontext. */
if (__put_user(0, &frame->uc.uc_flags) ||
- __put_user(0, &frame->uc.uc_link) ||
- __put_user((void*)current->sas_ss_sp, &frame->uc.uc_stack.ss_sp) ||
+ __put_user(NULL, &frame->uc.uc_link) ||
+ __put_user((void __user *)current->sas_ss_sp, &frame->uc.uc_stack.ss_sp) ||
__put_user(sas_ss_flags(__frame->sp), &frame->uc.uc_stack.ss_flags) ||
__put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size))
goto give_sigsegv;
@@ -382,7 +382,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
* setlos #__NR_sigreturn,gr7
* tira gr0,0
*/
- if (__put_user((void (*)(void))frame->retcode, &frame->pretcode) ||
+ if (__put_user((__sigrestore_t)frame->retcode, &frame->pretcode) ||
__put_user(0x8efc0000|__NR_rt_sigreturn, &frame->retcode[0]) ||
__put_user(0xc0700000, &frame->retcode[1]))
goto give_sigsegv;
@@ -398,7 +398,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
__frame->gr9 = (unsigned long) &frame->info;
if (get_personality & FDPIC_FUNCPTRS) {
- struct fdpic_func_descriptor *funcptr =
+ struct fdpic_func_descriptor __user *funcptr =
(struct fdpic_func_descriptor __user *) ka->sa.sa_handler;
__get_user(__frame->pc, &funcptr->text);
__get_user(__frame->gr15, &funcptr->GOT);
diff --git a/arch/frv/kernel/sys_frv.c b/arch/frv/kernel/sys_frv.c
index 931aa6d895e3..c4d4348c9e8e 100644
--- a/arch/frv/kernel/sys_frv.c
+++ b/arch/frv/kernel/sys_frv.c
@@ -32,7 +32,7 @@
* sys_pipe() is the normal C calling standard for creating
* a pipe. It's not the way unix traditionally does this, though.
*/
-asmlinkage long sys_pipe(unsigned long * fildes)
+asmlinkage long sys_pipe(unsigned long __user * fildes)
{
int fd[2];
int error;
diff --git a/arch/frv/kernel/sysctl.c b/arch/frv/kernel/sysctl.c
index 408b0f382b42..b908863d6593 100644
--- a/arch/frv/kernel/sysctl.c
+++ b/arch/frv/kernel/sysctl.c
@@ -49,7 +49,7 @@ static void frv_change_dcache_mode(unsigned long newmode)
* handle requests to dynamically switch the write caching mode delivered by /proc
*/
static int procctl_frv_cachemode(ctl_table *table, int write, struct file *filp,
- void *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp, loff_t *ppos)
{
unsigned long hsr0;
char buff[8];
@@ -123,7 +123,7 @@ static int procctl_frv_cachemode(ctl_table *table, int write, struct file *filp,
*/
#ifdef CONFIG_MMU
static int procctl_frv_pin_cxnr(ctl_table *table, int write, struct file *filp,
- void *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp, loff_t *ppos)
{
pid_t pid;
char buff[16], *p;
diff --git a/arch/frv/kernel/uaccess.c b/arch/frv/kernel/uaccess.c
index 9b751c0f0e84..9fb771a20df3 100644
--- a/arch/frv/kernel/uaccess.c
+++ b/arch/frv/kernel/uaccess.c
@@ -17,7 +17,7 @@
/*
* copy a null terminated string from userspace
*/
-long strncpy_from_user(char *dst, const char *src, long count)
+long strncpy_from_user(char *dst, const char __user *src, long count)
{
unsigned long max;
char *p, ch;
@@ -70,9 +70,9 @@ EXPORT_SYMBOL(strncpy_from_user);
*
* Return 0 on exception, a value greater than N if too long
*/
-long strnlen_user(const char *src, long count)
+long strnlen_user(const char __user *src, long count)
{
- const char *p;
+ const char __user *p;
long err = 0;
char ch;
diff --git a/arch/frv/mb93090-mb00/pci-irq.c b/arch/frv/mb93090-mb00/pci-irq.c
index c4a1144c98b0..45ae39d84b69 100644
--- a/arch/frv/mb93090-mb00/pci-irq.c
+++ b/arch/frv/mb93090-mb00/pci-irq.c
@@ -32,11 +32,11 @@
*/
static const uint8_t __initdata pci_bus0_irq_routing[32][4] = {
- [0 ] { IRQ_FPGA_MB86943_PCI_INTA },
- [16] { IRQ_FPGA_RTL8029_INTA },
- [17] { IRQ_FPGA_PCI_INTC, IRQ_FPGA_PCI_INTD, IRQ_FPGA_PCI_INTA, IRQ_FPGA_PCI_INTB },
- [18] { IRQ_FPGA_PCI_INTB, IRQ_FPGA_PCI_INTC, IRQ_FPGA_PCI_INTD, IRQ_FPGA_PCI_INTA },
- [19] { IRQ_FPGA_PCI_INTA, IRQ_FPGA_PCI_INTB, IRQ_FPGA_PCI_INTC, IRQ_FPGA_PCI_INTD },
+ [0 ] = { IRQ_FPGA_MB86943_PCI_INTA },
+ [16] = { IRQ_FPGA_RTL8029_INTA },
+ [17] = { IRQ_FPGA_PCI_INTC, IRQ_FPGA_PCI_INTD, IRQ_FPGA_PCI_INTA, IRQ_FPGA_PCI_INTB },
+ [18] = { IRQ_FPGA_PCI_INTB, IRQ_FPGA_PCI_INTC, IRQ_FPGA_PCI_INTD, IRQ_FPGA_PCI_INTA },
+ [19] = { IRQ_FPGA_PCI_INTA, IRQ_FPGA_PCI_INTB, IRQ_FPGA_PCI_INTC, IRQ_FPGA_PCI_INTD },
};
void __init pcibios_irq_init(void)
diff --git a/arch/frv/mm/kmap.c b/arch/frv/mm/kmap.c
index c54f18e65ea6..40b62c5c2951 100644
--- a/arch/frv/mm/kmap.c
+++ b/arch/frv/mm/kmap.c
@@ -31,15 +31,15 @@
* Map some physical address range into the kernel address space.
*/
-void *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag)
+void __iomem *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag)
{
- return (void *)physaddr;
+ return (void __iomem *)physaddr;
}
/*
* Unmap a ioremap()ed region again
*/
-void iounmap(void *addr)
+void iounmap(void volatile __iomem *addr)
{
}
diff --git a/arch/h8300/kernel/signal.c b/arch/h8300/kernel/signal.c
index f13d5e82d4b9..7787f70a05bb 100644
--- a/arch/h8300/kernel/signal.c
+++ b/arch/h8300/kernel/signal.c
@@ -307,7 +307,7 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size)
/* This is the X/Open sanctioned signal stack switching. */
if (ka->sa.sa_flags & SA_ONSTACK) {
- if (!on_sig_stack(usp))
+ if (!sas_ss_flags(usp))
usp = current->sas_ss_sp + current->sas_ss_size;
}
return (void *)((usp - frame_size) & -8UL);
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig
index 8dfa3054f10f..1596101cfaf8 100644
--- a/arch/i386/Kconfig
+++ b/arch/i386/Kconfig
@@ -173,6 +173,12 @@ config ACPI_SRAT
bool
default y
depends on NUMA && (X86_SUMMIT || X86_GENERICARCH)
+ select ACPI_NUMA
+
+config HAVE_ARCH_PARSE_SRAT
+ bool
+ default y
+ depends on ACPI_SRAT
config X86_SUMMIT_NUMA
bool
@@ -224,7 +230,6 @@ config NR_CPUS
config SCHED_SMT
bool "SMT (Hyperthreading) scheduler support"
depends on SMP
- default off
help
SMT scheduler support improves the CPU scheduler's decision making
when dealing with Intel Pentium 4 chips with HyperThreading at a
diff --git a/arch/i386/kernel/acpi/boot.c b/arch/i386/kernel/acpi/boot.c
index fbe93084244c..97ca17189af5 100644
--- a/arch/i386/kernel/acpi/boot.c
+++ b/arch/i386/kernel/acpi/boot.c
@@ -217,7 +217,7 @@ static int __init acpi_parse_madt(unsigned long phys_addr, unsigned long size)
{
struct acpi_table_madt *madt = NULL;
- if (!phys_addr || !size)
+ if (!phys_addr || !size || !cpu_has_apic)
return -EINVAL;
madt = (struct acpi_table_madt *)__acpi_map_table(phys_addr, size);
@@ -623,9 +623,9 @@ extern u32 pmtmr_ioport;
static int __init acpi_parse_fadt(unsigned long phys, unsigned long size)
{
- struct fadt_descriptor_rev2 *fadt = NULL;
+ struct fadt_descriptor *fadt = NULL;
- fadt = (struct fadt_descriptor_rev2 *)__acpi_map_table(phys, size);
+ fadt = (struct fadt_descriptor *)__acpi_map_table(phys, size);
if (!fadt) {
printk(KERN_WARNING PREFIX "Unable to map FADT\n");
return 0;
@@ -756,7 +756,7 @@ static int __init acpi_parse_madt_ioapic_entries(void)
return -ENODEV;
}
- if (!cpu_has_apic)
+ if (!cpu_has_apic)
return -ENODEV;
/*
diff --git a/arch/i386/kernel/acpi/processor.c b/arch/i386/kernel/acpi/processor.c
index 9f4cc02717ec..b54fded49834 100644
--- a/arch/i386/kernel/acpi/processor.c
+++ b/arch/i386/kernel/acpi/processor.c
@@ -47,7 +47,7 @@ static void init_intel_pdc(struct acpi_processor *pr, struct cpuinfo_x86 *c)
buf[2] = ACPI_PDC_C_CAPABILITY_SMP;
if (cpu_has(c, X86_FEATURE_EST))
- buf[2] |= ACPI_PDC_EST_CAPABILITY_SMP;
+ buf[2] |= ACPI_PDC_EST_CAPABILITY_SWSMP;
obj->type = ACPI_TYPE_BUFFER;
obj->buffer.length = 12;
diff --git a/arch/i386/kernel/acpi/sleep.c b/arch/i386/kernel/acpi/sleep.c
index 1cb2b186a3af..4ee83577bf61 100644
--- a/arch/i386/kernel/acpi/sleep.c
+++ b/arch/i386/kernel/acpi/sleep.c
@@ -8,30 +8,17 @@
#include <linux/acpi.h>
#include <linux/bootmem.h>
#include <linux/dmi.h>
+#include <linux/cpumask.h>
+
#include <asm/smp.h>
-#include <asm/tlbflush.h>
/* address in low memory of the wakeup routine. */
unsigned long acpi_wakeup_address = 0;
unsigned long acpi_video_flags;
extern char wakeup_start, wakeup_end;
-extern void zap_low_mappings(void);
-
extern unsigned long FASTCALL(acpi_copy_wakeup_routine(unsigned long));
-static void init_low_mapping(pgd_t * pgd, int pgd_limit)
-{
- int pgd_ofs = 0;
-
- while ((pgd_ofs < pgd_limit)
- && (pgd_ofs + USER_PTRS_PER_PGD < PTRS_PER_PGD)) {
- set_pgd(pgd, *(pgd + USER_PTRS_PER_PGD));
- pgd_ofs++, pgd++;
- }
- flush_tlb_all();
-}
-
/**
* acpi_save_state_mem - save kernel state
*
@@ -42,7 +29,6 @@ int acpi_save_state_mem(void)
{
if (!acpi_wakeup_address)
return 1;
- init_low_mapping(swapper_pg_dir, USER_PTRS_PER_PGD);
memcpy((void *)acpi_wakeup_address, &wakeup_start,
&wakeup_end - &wakeup_start);
acpi_copy_wakeup_routine(acpi_wakeup_address);
@@ -55,7 +41,6 @@ int acpi_save_state_mem(void)
*/
void acpi_restore_state_mem(void)
{
- zap_low_mappings();
}
/**
diff --git a/arch/i386/kernel/acpi/wakeup.S b/arch/i386/kernel/acpi/wakeup.S
index 7c74fe0dc93c..9f408eee4e6f 100644
--- a/arch/i386/kernel/acpi/wakeup.S
+++ b/arch/i386/kernel/acpi/wakeup.S
@@ -56,7 +56,7 @@ wakeup_code:
1:
# set up page table
- movl $swapper_pg_dir-__PAGE_OFFSET, %eax
+ movl $swsusp_pg_dir-__PAGE_OFFSET, %eax
movl %eax, %cr3
testl $1, real_efer_save_restore - wakeup_code
@@ -265,11 +265,6 @@ ENTRY(acpi_copy_wakeup_routine)
movl $0x12345678, saved_magic
ret
-.data
-ALIGN
-ENTRY(saved_magic) .long 0
-ENTRY(saved_eip) .long 0
-
save_registers:
leal 4(%esp), %eax
movl %eax, saved_context_esp
@@ -304,7 +299,11 @@ ret_point:
call restore_processor_state
ret
+.data
ALIGN
+ENTRY(saved_magic) .long 0
+ENTRY(saved_eip) .long 0
+
# saved registers
saved_gdt: .long 0,0
saved_idt: .long 0,0
diff --git a/arch/i386/kernel/apic.c b/arch/i386/kernel/apic.c
index 3d4b2f3d116a..5ab59c12335b 100644
--- a/arch/i386/kernel/apic.c
+++ b/arch/i386/kernel/apic.c
@@ -62,7 +62,7 @@ int apic_verbosity;
static void apic_pm_activate(void);
-int modern_apic(void)
+static int modern_apic(void)
{
unsigned int lvr, version;
/* AMD systems use old APIC versions, so check the CPU */
@@ -113,7 +113,7 @@ void __init apic_intr_init(void)
}
/* Using APIC to generate smp_local_timer_interrupt? */
-int using_apic_timer = 0;
+int using_apic_timer __read_mostly = 0;
static int enabled_via_apicbase;
diff --git a/arch/i386/kernel/apm.c b/arch/i386/kernel/apm.c
index df0e1745f189..9e819eb68229 100644
--- a/arch/i386/kernel/apm.c
+++ b/arch/i386/kernel/apm.c
@@ -374,14 +374,14 @@ static struct {
unsigned short segment;
} apm_bios_entry;
static int clock_slowed;
-static int idle_threshold = DEFAULT_IDLE_THRESHOLD;
-static int idle_period = DEFAULT_IDLE_PERIOD;
+static int idle_threshold __read_mostly = DEFAULT_IDLE_THRESHOLD;
+static int idle_period __read_mostly = DEFAULT_IDLE_PERIOD;
static int set_pm_idle;
static int suspends_pending;
static int standbys_pending;
static int ignore_sys_suspend;
static int ignore_normal_resume;
-static int bounce_interval = DEFAULT_BOUNCE_INTERVAL;
+static int bounce_interval __read_mostly = DEFAULT_BOUNCE_INTERVAL;
#ifdef CONFIG_APM_RTC_IS_GMT
# define clock_cmos_diff 0
@@ -390,8 +390,8 @@ static int bounce_interval = DEFAULT_BOUNCE_INTERVAL;
static long clock_cmos_diff;
static int got_clock_diff;
#endif
-static int debug;
-static int smp;
+static int debug __read_mostly;
+static int smp __read_mostly;
static int apm_disabled = -1;
#ifdef CONFIG_SMP
static int power_off;
@@ -403,8 +403,8 @@ static int realmode_power_off = 1;
#else
static int realmode_power_off;
#endif
-static int exit_kapmd;
-static int kapmd_running;
+static int exit_kapmd __read_mostly;
+static int kapmd_running __read_mostly;
#ifdef CONFIG_APM_ALLOW_INTS
static int allow_ints = 1;
#else
@@ -416,15 +416,15 @@ static DECLARE_WAIT_QUEUE_HEAD(apm_waitqueue);
static DECLARE_WAIT_QUEUE_HEAD(apm_suspend_waitqueue);
static struct apm_user * user_list;
static DEFINE_SPINLOCK(user_list_lock);
-static struct desc_struct bad_bios_desc = { 0, 0x00409200 };
+static const struct desc_struct bad_bios_desc = { 0, 0x00409200 };
-static char driver_version[] = "1.16ac"; /* no spaces */
+static const char driver_version[] = "1.16ac"; /* no spaces */
/*
* APM event names taken from the APM 1.2 specification. These are
* the message codes that the BIOS uses to tell us about events
*/
-static char * apm_event_name[] = {
+static const char * const apm_event_name[] = {
"system standby",
"system suspend",
"normal resume",
@@ -616,7 +616,7 @@ static u8 apm_bios_call(u32 func, u32 ebx_in, u32 ecx_in,
* @ecx_in: ECX register value for BIOS call
* @eax: EAX register on return from the BIOS call
*
- * Make a BIOS call that does only returns one value, or just status.
+ * Make a BIOS call that returns one value only, or just status.
* If there is an error, then the error code is returned in AH
* (bits 8-15 of eax) and this function returns non-zero. This is
* used for simpler BIOS operations. This call may hold interrupts
@@ -822,7 +822,7 @@ static void apm_do_busy(void)
#define IDLE_CALC_LIMIT (HZ * 100)
#define IDLE_LEAKY_MAX 16
-static void (*original_pm_idle)(void);
+static void (*original_pm_idle)(void) __read_mostly;
/**
* apm_cpu_idle - cpu idling for APM capable Linux
@@ -1063,7 +1063,8 @@ static int apm_engage_power_management(u_short device, int enable)
static int apm_console_blank(int blank)
{
- int error, i;
+ int error = APM_NOT_ENGAGED; /* silence gcc */
+ int i;
u_short state;
static const u_short dev[3] = { 0x100, 0x1FF, 0x101 };
@@ -1104,7 +1105,8 @@ static int queue_empty(struct apm_user *as)
static apm_event_t get_queued_event(struct apm_user *as)
{
- as->event_tail = (as->event_tail + 1) % APM_MAX_EVENTS;
+ if (++as->event_tail >= APM_MAX_EVENTS)
+ as->event_tail = 0;
return as->events[as->event_tail];
}
@@ -1118,13 +1120,16 @@ static void queue_event(apm_event_t event, struct apm_user *sender)
for (as = user_list; as != NULL; as = as->next) {
if ((as == sender) || (!as->reader))
continue;
- as->event_head = (as->event_head + 1) % APM_MAX_EVENTS;
+ if (++as->event_head >= APM_MAX_EVENTS)
+ as->event_head = 0;
+
if (as->event_head == as->event_tail) {
static int notified;
if (notified++ == 0)
printk(KERN_ERR "apm: an event queue overflowed\n");
- as->event_tail = (as->event_tail + 1) % APM_MAX_EVENTS;
+ if (++as->event_tail >= APM_MAX_EVENTS)
+ as->event_tail = 0;
}
as->events[as->event_head] = event;
if ((!as->suser) || (!as->writer))
@@ -1282,7 +1287,7 @@ static void standby(void)
static apm_event_t get_event(void)
{
int error;
- apm_event_t event;
+ apm_event_t event = APM_NO_EVENTS; /* silence gcc */
apm_eventinfo_t info;
static int notified;
diff --git a/arch/i386/kernel/cpu/common.c b/arch/i386/kernel/cpu/common.c
index a06a49075f10..44f2c5f2dda1 100644
--- a/arch/i386/kernel/cpu/common.c
+++ b/arch/i386/kernel/cpu/common.c
@@ -11,6 +11,8 @@
#include <asm/msr.h>
#include <asm/io.h>
#include <asm/mmu_context.h>
+#include <asm/mtrr.h>
+#include <asm/mce.h>
#ifdef CONFIG_X86_LOCAL_APIC
#include <asm/mpspec.h>
#include <asm/apic.h>
diff --git a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
index 1a7bdcef1926..05668e3598c0 100644
--- a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -48,12 +48,13 @@ MODULE_LICENSE("GPL");
struct cpufreq_acpi_io {
- struct acpi_processor_performance acpi_data;
+ struct acpi_processor_performance *acpi_data;
struct cpufreq_frequency_table *freq_table;
unsigned int resume;
};
static struct cpufreq_acpi_io *acpi_io_data[NR_CPUS];
+static struct acpi_processor_performance *acpi_perf_data[NR_CPUS];
static struct cpufreq_driver acpi_cpufreq_driver;
@@ -104,64 +105,43 @@ acpi_processor_set_performance (
{
u16 port = 0;
u8 bit_width = 0;
- int ret;
- u32 value = 0;
int i = 0;
- struct cpufreq_freqs cpufreq_freqs;
- cpumask_t saved_mask;
+ int ret = 0;
+ u32 value = 0;
int retval;
+ struct acpi_processor_performance *perf;
dprintk("acpi_processor_set_performance\n");
- /*
- * TBD: Use something other than set_cpus_allowed.
- * As set_cpus_allowed is a bit racy,
- * with any other set_cpus_allowed for this process.
- */
- saved_mask = current->cpus_allowed;
- set_cpus_allowed(current, cpumask_of_cpu(cpu));
- if (smp_processor_id() != cpu) {
- return (-EAGAIN);
- }
-
- if (state == data->acpi_data.state) {
+ retval = 0;
+ perf = data->acpi_data;
+ if (state == perf->state) {
if (unlikely(data->resume)) {
dprintk("Called after resume, resetting to P%d\n", state);
data->resume = 0;
} else {
dprintk("Already at target state (P%d)\n", state);
- retval = 0;
- goto migrate_end;
+ return (retval);
}
}
- dprintk("Transitioning from P%d to P%d\n",
- data->acpi_data.state, state);
-
- /* cpufreq frequency struct */
- cpufreq_freqs.cpu = cpu;
- cpufreq_freqs.old = data->freq_table[data->acpi_data.state].frequency;
- cpufreq_freqs.new = data->freq_table[state].frequency;
-
- /* notify cpufreq */
- cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_PRECHANGE);
+ dprintk("Transitioning from P%d to P%d\n", perf->state, state);
/*
* First we write the target state's 'control' value to the
* control_register.
*/
- port = data->acpi_data.control_register.address;
- bit_width = data->acpi_data.control_register.bit_width;
- value = (u32) data->acpi_data.states[state].control;
+ port = perf->control_register.address;
+ bit_width = perf->control_register.bit_width;
+ value = (u32) perf->states[state].control;
dprintk("Writing 0x%08x to port 0x%04x\n", value, port);
ret = acpi_processor_write_port(port, bit_width, value);
if (ret) {
dprintk("Invalid port width 0x%04x\n", bit_width);
- retval = ret;
- goto migrate_end;
+ return (ret);
}
/*
@@ -177,48 +157,35 @@ acpi_processor_set_performance (
* before giving up.
*/
- port = data->acpi_data.status_register.address;
- bit_width = data->acpi_data.status_register.bit_width;
+ port = perf->status_register.address;
+ bit_width = perf->status_register.bit_width;
dprintk("Looking for 0x%08x from port 0x%04x\n",
- (u32) data->acpi_data.states[state].status, port);
+ (u32) perf->states[state].status, port);
- for (i=0; i<100; i++) {
+ for (i = 0; i < 100; i++) {
ret = acpi_processor_read_port(port, bit_width, &value);
if (ret) {
dprintk("Invalid port width 0x%04x\n", bit_width);
- retval = ret;
- goto migrate_end;
+ return (ret);
}
- if (value == (u32) data->acpi_data.states[state].status)
+ if (value == (u32) perf->states[state].status)
break;
udelay(10);
}
} else {
- value = (u32) data->acpi_data.states[state].status;
+ value = (u32) perf->states[state].status;
}
- /* notify cpufreq */
- cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_POSTCHANGE);
-
- if (unlikely(value != (u32) data->acpi_data.states[state].status)) {
- unsigned int tmp = cpufreq_freqs.new;
- cpufreq_freqs.new = cpufreq_freqs.old;
- cpufreq_freqs.old = tmp;
- cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_PRECHANGE);
- cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_POSTCHANGE);
+ if (unlikely(value != (u32) perf->states[state].status)) {
printk(KERN_WARNING "acpi-cpufreq: Transition failed\n");
retval = -ENODEV;
- goto migrate_end;
+ return (retval);
}
dprintk("Transition successful after %d microseconds\n", i * 10);
- data->acpi_data.state = state;
-
- retval = 0;
-migrate_end:
- set_cpus_allowed(current, saved_mask);
+ perf->state = state;
return (retval);
}
@@ -230,8 +197,17 @@ acpi_cpufreq_target (
unsigned int relation)
{
struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu];
+ struct acpi_processor_performance *perf;
+ struct cpufreq_freqs freqs;
+ cpumask_t online_policy_cpus;
+ cpumask_t saved_mask;
+ cpumask_t set_mask;
+ cpumask_t covered_cpus;
+ unsigned int cur_state = 0;
unsigned int next_state = 0;
unsigned int result = 0;
+ unsigned int j;
+ unsigned int tmp;
dprintk("acpi_cpufreq_setpolicy\n");
@@ -240,11 +216,95 @@ acpi_cpufreq_target (
target_freq,
relation,
&next_state);
- if (result)
+ if (unlikely(result))
return (result);
- result = acpi_processor_set_performance (data, policy->cpu, next_state);
+ perf = data->acpi_data;
+ cur_state = perf->state;
+ freqs.old = data->freq_table[cur_state].frequency;
+ freqs.new = data->freq_table[next_state].frequency;
+
+#ifdef CONFIG_HOTPLUG_CPU
+ /* cpufreq holds the hotplug lock, so we are safe from here on */
+ cpus_and(online_policy_cpus, cpu_online_map, policy->cpus);
+#else
+ online_policy_cpus = policy->cpus;
+#endif
+
+ for_each_cpu_mask(j, online_policy_cpus) {
+ freqs.cpu = j;
+ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+ }
+
+ /*
+ * We need to call driver->target() on all or any CPU in
+ * policy->cpus, depending on policy->shared_type.
+ */
+ saved_mask = current->cpus_allowed;
+ cpus_clear(covered_cpus);
+ for_each_cpu_mask(j, online_policy_cpus) {
+ /*
+ * Support for SMP systems.
+ * Make sure we are running on CPU that wants to change freq
+ */
+ cpus_clear(set_mask);
+ if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
+ cpus_or(set_mask, set_mask, online_policy_cpus);
+ else
+ cpu_set(j, set_mask);
+
+ set_cpus_allowed(current, set_mask);
+ if (unlikely(!cpu_isset(smp_processor_id(), set_mask))) {
+ dprintk("couldn't limit to CPUs in this domain\n");
+ result = -EAGAIN;
+ break;
+ }
+
+ result = acpi_processor_set_performance (data, j, next_state);
+ if (result) {
+ result = -EAGAIN;
+ break;
+ }
+
+ if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
+ break;
+
+ cpu_set(j, covered_cpus);
+ }
+
+ for_each_cpu_mask(j, online_policy_cpus) {
+ freqs.cpu = j;
+ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+ }
+ if (unlikely(result)) {
+ /*
+ * We have failed halfway through the frequency change.
+ * We have sent callbacks to online_policy_cpus and
+ * acpi_processor_set_performance() has been called on
+ * coverd_cpus. Best effort undo..
+ */
+
+ if (!cpus_empty(covered_cpus)) {
+ for_each_cpu_mask(j, covered_cpus) {
+ policy->cpu = j;
+ acpi_processor_set_performance (data,
+ j,
+ cur_state);
+ }
+ }
+
+ tmp = freqs.new;
+ freqs.new = freqs.old;
+ freqs.old = tmp;
+ for_each_cpu_mask(j, online_policy_cpus) {
+ freqs.cpu = j;
+ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+ }
+ }
+
+ set_cpus_allowed(current, saved_mask);
return (result);
}
@@ -270,30 +330,65 @@ acpi_cpufreq_guess_freq (
struct cpufreq_acpi_io *data,
unsigned int cpu)
{
+ struct acpi_processor_performance *perf = data->acpi_data;
+
if (cpu_khz) {
/* search the closest match to cpu_khz */
unsigned int i;
unsigned long freq;
- unsigned long freqn = data->acpi_data.states[0].core_frequency * 1000;
+ unsigned long freqn = perf->states[0].core_frequency * 1000;
- for (i=0; i < (data->acpi_data.state_count - 1); i++) {
+ for (i = 0; i < (perf->state_count - 1); i++) {
freq = freqn;
- freqn = data->acpi_data.states[i+1].core_frequency * 1000;
+ freqn = perf->states[i+1].core_frequency * 1000;
if ((2 * cpu_khz) > (freqn + freq)) {
- data->acpi_data.state = i;
+ perf->state = i;
return (freq);
}
}
- data->acpi_data.state = data->acpi_data.state_count - 1;
+ perf->state = perf->state_count - 1;
return (freqn);
- } else
+ } else {
/* assume CPU is at P0... */
- data->acpi_data.state = 0;
- return data->acpi_data.states[0].core_frequency * 1000;
-
+ perf->state = 0;
+ return perf->states[0].core_frequency * 1000;
+ }
}
+/*
+ * acpi_cpufreq_early_init - initialize ACPI P-States library
+ *
+ * Initialize the ACPI P-States library (drivers/acpi/processor_perflib.c)
+ * in order to determine correct frequency and voltage pairings. We can
+ * do _PDC and _PSD and find out the processor dependency for the
+ * actual init that will happen later...
+ */
+static int acpi_cpufreq_early_init_acpi(void)
+{
+ struct acpi_processor_performance *data;
+ unsigned int i, j;
+
+ dprintk("acpi_cpufreq_early_init\n");
+
+ for_each_cpu(i) {
+ data = kzalloc(sizeof(struct acpi_processor_performance),
+ GFP_KERNEL);
+ if (!data) {
+ for_each_cpu(j) {
+ kfree(acpi_perf_data[j]);
+ acpi_perf_data[j] = NULL;
+ }
+ return (-ENOMEM);
+ }
+ acpi_perf_data[i] = data;
+ }
+
+ /* Do initialization in ACPI core */
+ acpi_processor_preregister_performance(acpi_perf_data);
+ return 0;
+}
+
static int
acpi_cpufreq_cpu_init (
struct cpufreq_policy *policy)
@@ -303,41 +398,51 @@ acpi_cpufreq_cpu_init (
struct cpufreq_acpi_io *data;
unsigned int result = 0;
struct cpuinfo_x86 *c = &cpu_data[policy->cpu];
+ struct acpi_processor_performance *perf;
dprintk("acpi_cpufreq_cpu_init\n");
+ if (!acpi_perf_data[cpu])
+ return (-ENODEV);
+
data = kzalloc(sizeof(struct cpufreq_acpi_io), GFP_KERNEL);
if (!data)
return (-ENOMEM);
+ data->acpi_data = acpi_perf_data[cpu];
acpi_io_data[cpu] = data;
- result = acpi_processor_register_performance(&data->acpi_data, cpu);
+ result = acpi_processor_register_performance(data->acpi_data, cpu);
if (result)
goto err_free;
+ perf = data->acpi_data;
+ policy->cpus = perf->shared_cpu_map;
+ policy->shared_type = perf->shared_type;
+
if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
}
/* capability check */
- if (data->acpi_data.state_count <= 1) {
+ if (perf->state_count <= 1) {
dprintk("No P-States\n");
result = -ENODEV;
goto err_unreg;
}
- if ((data->acpi_data.control_register.space_id != ACPI_ADR_SPACE_SYSTEM_IO) ||
- (data->acpi_data.status_register.space_id != ACPI_ADR_SPACE_SYSTEM_IO)) {
+
+ if ((perf->control_register.space_id != ACPI_ADR_SPACE_SYSTEM_IO) ||
+ (perf->status_register.space_id != ACPI_ADR_SPACE_SYSTEM_IO)) {
dprintk("Unsupported address space [%d, %d]\n",
- (u32) (data->acpi_data.control_register.space_id),
- (u32) (data->acpi_data.status_register.space_id));
+ (u32) (perf->control_register.space_id),
+ (u32) (perf->status_register.space_id));
result = -ENODEV;
goto err_unreg;
}
/* alloc freq_table */
- data->freq_table = kmalloc(sizeof(struct cpufreq_frequency_table) * (data->acpi_data.state_count + 1), GFP_KERNEL);
+ data->freq_table = kmalloc(sizeof(struct cpufreq_frequency_table) * (perf->state_count + 1), GFP_KERNEL);
if (!data->freq_table) {
result = -ENOMEM;
goto err_unreg;
@@ -345,9 +450,9 @@ acpi_cpufreq_cpu_init (
/* detect transition latency */
policy->cpuinfo.transition_latency = 0;
- for (i=0; i<data->acpi_data.state_count; i++) {
- if ((data->acpi_data.states[i].transition_latency * 1000) > policy->cpuinfo.transition_latency)
- policy->cpuinfo.transition_latency = data->acpi_data.states[i].transition_latency * 1000;
+ for (i=0; i<perf->state_count; i++) {
+ if ((perf->states[i].transition_latency * 1000) > policy->cpuinfo.transition_latency)
+ policy->cpuinfo.transition_latency = perf->states[i].transition_latency * 1000;
}
policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
@@ -355,11 +460,11 @@ acpi_cpufreq_cpu_init (
policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
/* table init */
- for (i=0; i<=data->acpi_data.state_count; i++)
+ for (i=0; i<=perf->state_count; i++)
{
data->freq_table[i].index = i;
- if (i<data->acpi_data.state_count)
- data->freq_table[i].frequency = data->acpi_data.states[i].core_frequency * 1000;
+ if (i<perf->state_count)
+ data->freq_table[i].frequency = perf->states[i].core_frequency * 1000;
else
data->freq_table[i].frequency = CPUFREQ_TABLE_END;
}
@@ -374,12 +479,12 @@ acpi_cpufreq_cpu_init (
printk(KERN_INFO "acpi-cpufreq: CPU%u - ACPI performance management activated.\n",
cpu);
- for (i = 0; i < data->acpi_data.state_count; i++)
+ for (i = 0; i < perf->state_count; i++)
dprintk(" %cP%d: %d MHz, %d mW, %d uS\n",
- (i == data->acpi_data.state?'*':' '), i,
- (u32) data->acpi_data.states[i].core_frequency,
- (u32) data->acpi_data.states[i].power,
- (u32) data->acpi_data.states[i].transition_latency);
+ (i == perf->state?'*':' '), i,
+ (u32) perf->states[i].core_frequency,
+ (u32) perf->states[i].power,
+ (u32) perf->states[i].transition_latency);
cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu);
@@ -394,7 +499,7 @@ acpi_cpufreq_cpu_init (
err_freqfree:
kfree(data->freq_table);
err_unreg:
- acpi_processor_unregister_performance(&data->acpi_data, cpu);
+ acpi_processor_unregister_performance(perf, cpu);
err_free:
kfree(data);
acpi_io_data[cpu] = NULL;
@@ -415,7 +520,7 @@ acpi_cpufreq_cpu_exit (
if (data) {
cpufreq_frequency_table_put_attr(policy->cpu);
acpi_io_data[policy->cpu] = NULL;
- acpi_processor_unregister_performance(&data->acpi_data, policy->cpu);
+ acpi_processor_unregister_performance(data->acpi_data, policy->cpu);
kfree(data);
}
@@ -462,7 +567,10 @@ acpi_cpufreq_init (void)
dprintk("acpi_cpufreq_init\n");
- result = cpufreq_register_driver(&acpi_cpufreq_driver);
+ result = acpi_cpufreq_early_init_acpi();
+
+ if (!result)
+ result = cpufreq_register_driver(&acpi_cpufreq_driver);
return (result);
}
@@ -471,10 +579,15 @@ acpi_cpufreq_init (void)
static void __exit
acpi_cpufreq_exit (void)
{
+ unsigned int i;
dprintk("acpi_cpufreq_exit\n");
cpufreq_unregister_driver(&acpi_cpufreq_driver);
+ for_each_cpu(i) {
+ kfree(acpi_perf_data[i]);
+ acpi_perf_data[i] = NULL;
+ }
return;
}
diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
index b4277f58f40c..2d6491672559 100644
--- a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
@@ -120,7 +120,7 @@ static int pending_bit_stuck(void)
{
u32 lo, hi;
- if (cpu_family)
+ if (cpu_family == CPU_HW_PSTATE)
return 0;
rdmsr(MSR_FIDVID_STATUS, lo, hi);
@@ -136,7 +136,7 @@ static int query_current_values_with_pending_wait(struct powernow_k8_data *data)
u32 lo, hi;
u32 i = 0;
- if (cpu_family) {
+ if (cpu_family == CPU_HW_PSTATE) {
rdmsr(MSR_PSTATE_STATUS, lo, hi);
i = lo & HW_PSTATE_MASK;
rdmsr(MSR_PSTATE_DEF_BASE + i, lo, hi);
@@ -598,7 +598,7 @@ static void print_basics(struct powernow_k8_data *data)
int j;
for (j = 0; j < data->numps; j++) {
if (data->powernow_table[j].frequency != CPUFREQ_ENTRY_INVALID) {
- if (cpu_family) {
+ if (cpu_family == CPU_HW_PSTATE) {
printk(KERN_INFO PFX " %d : fid 0x%x gid 0x%x (%d MHz)\n", j, (data->powernow_table[j].index & 0xff00) >> 8,
(data->powernow_table[j].index & 0xff0000) >> 16,
data->powernow_table[j].frequency/1000);
@@ -758,7 +758,7 @@ static int find_psb_table(struct powernow_k8_data *data)
#ifdef CONFIG_X86_POWERNOW_K8_ACPI
static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index)
{
- if (!data->acpi_data.state_count || cpu_family)
+ if (!data->acpi_data.state_count || (cpu_family == CPU_HW_PSTATE))
return;
data->irt = (data->acpi_data.states[index].control >> IRT_SHIFT) & IRT_MASK;
@@ -801,7 +801,7 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
goto err_out;
}
- if (cpu_family)
+ if (cpu_family == CPU_HW_PSTATE)
ret_val = fill_powernow_table_pstate(data, powernow_table);
else
ret_val = fill_powernow_table_fidvid(data, powernow_table);
@@ -885,8 +885,8 @@ static int fill_powernow_table_fidvid(struct powernow_k8_data *data, struct cpuf
u32 vid;
if (data->exttype) {
- fid = data->acpi_data.states[i].status & FID_MASK;
- vid = (data->acpi_data.states[i].status >> VID_SHIFT) & VID_MASK;
+ fid = data->acpi_data.states[i].status & EXT_FID_MASK;
+ vid = (data->acpi_data.states[i].status >> VID_SHIFT) & EXT_VID_MASK;
} else {
fid = data->acpi_data.states[i].control & FID_MASK;
vid = (data->acpi_data.states[i].control >> VID_SHIFT) & VID_MASK;
@@ -1082,7 +1082,7 @@ static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsi
if (query_current_values_with_pending_wait(data))
goto err_out;
- if (cpu_family)
+ if (cpu_family == CPU_HW_PSTATE)
dprintk("targ: curr fid 0x%x, did 0x%x\n",
data->currfid, data->currvid);
else {
@@ -1103,7 +1103,7 @@ static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsi
powernow_k8_acpi_pst_values(data, newstate);
- if (cpu_family)
+ if (cpu_family == CPU_HW_PSTATE)
ret = transition_frequency_pstate(data, newstate);
else
ret = transition_frequency_fidvid(data, newstate);
@@ -1115,7 +1115,7 @@ static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsi
}
mutex_unlock(&fidvid_mutex);
- if (cpu_family)
+ if (cpu_family == CPU_HW_PSTATE)
pol->cur = find_khz_freq_from_fiddid(data->currfid, data->currdid);
else
pol->cur = find_khz_freq_from_fid(data->currfid);
@@ -1163,7 +1163,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
* Use the PSB BIOS structure. This is only availabe on
* an UP version, and is deprecated by AMD.
*/
- if ((num_online_cpus() != 1) || (num_possible_cpus() != 1)) {
+ if (num_online_cpus() != 1) {
printk(KERN_ERR PFX "MP systems not supported by PSB BIOS structure\n");
kfree(data);
return -ENODEV;
@@ -1197,14 +1197,14 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
if (query_current_values_with_pending_wait(data))
goto err_out;
- if (!cpu_family)
+ if (cpu_family == CPU_OPTERON)
fidvid_msr_init();
/* run on any CPU again */
set_cpus_allowed(current, oldmask);
pol->governor = CPUFREQ_DEFAULT_GOVERNOR;
- if (cpu_family)
+ if (cpu_family == CPU_HW_PSTATE)
pol->cpus = cpumask_of_cpu(pol->cpu);
else
pol->cpus = cpu_core_map[pol->cpu];
@@ -1215,7 +1215,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
pol->cpuinfo.transition_latency = (((data->rvo + 8) * data->vstable * VST_UNITS_20US)
+ (3 * (1 << data->irt) * 10)) * 1000;
- if (cpu_family)
+ if (cpu_family == CPU_HW_PSTATE)
pol->cur = find_khz_freq_from_fiddid(data->currfid, data->currdid);
else
pol->cur = find_khz_freq_from_fid(data->currfid);
@@ -1232,7 +1232,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
cpufreq_frequency_table_get_attr(data->powernow_table, pol->cpu);
- if (cpu_family)
+ if (cpu_family == CPU_HW_PSTATE)
dprintk("cpu_init done, current fid 0x%x, did 0x%x\n",
data->currfid, data->currdid);
else
diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k8.h b/arch/i386/kernel/cpu/cpufreq/powernow-k8.h
index bf8ad9e43da3..0fb2a3001ba5 100644
--- a/arch/i386/kernel/cpu/cpufreq/powernow-k8.h
+++ b/arch/i386/kernel/cpu/cpufreq/powernow-k8.h
@@ -169,7 +169,9 @@ struct powernow_k8_data {
#define MVS_MASK 3
#define VST_MASK 0x7f
#define VID_MASK 0x1f
-#define FID_MASK 0x3f
+#define FID_MASK 0x1f
+#define EXT_VID_MASK 0x3f
+#define EXT_FID_MASK 0x3f
/*
diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
index ce54ff12c15d..31c3a5baaa7f 100644
--- a/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
+++ b/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
@@ -2,19 +2,15 @@
* cpufreq driver for Enhanced SpeedStep, as found in Intel's Pentium
* M (part of the Centrino chipset).
*
+ * Since the original Pentium M, most new Intel CPUs support Enhanced
+ * SpeedStep.
+ *
* Despite the "SpeedStep" in the name, this is almost entirely unlike
* traditional SpeedStep.
*
* Modelled on speedstep.c
*
* Copyright (C) 2003 Jeremy Fitzhardinge <jeremy@goop.org>
- *
- * WARNING WARNING WARNING
- *
- * This driver manipulates the PERF_CTL MSR, which is only somewhat
- * documented. While it seems to work on my laptop, it has not been
- * tested anywhere else, and it may not work for you, do strange
- * things or simply crash.
*/
#include <linux/kernel.h>
@@ -36,7 +32,7 @@
#include <asm/cpufeature.h>
#define PFX "speedstep-centrino: "
-#define MAINTAINER "Jeremy Fitzhardinge <jeremy@goop.org>"
+#define MAINTAINER "cpufreq@lists.linux.org.uk"
#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "speedstep-centrino", msg)
@@ -351,7 +347,36 @@ static unsigned int get_cur_freq(unsigned int cpu)
#ifdef CONFIG_X86_SPEEDSTEP_CENTRINO_ACPI
-static struct acpi_processor_performance p;
+static struct acpi_processor_performance *acpi_perf_data[NR_CPUS];
+
+/*
+ * centrino_cpu_early_init_acpi - Do the preregistering with ACPI P-States
+ * library
+ *
+ * Before doing the actual init, we need to do _PSD related setup whenever
+ * supported by the BIOS. These are handled by this early_init routine.
+ */
+static int centrino_cpu_early_init_acpi(void)
+{
+ unsigned int i, j;
+ struct acpi_processor_performance *data;
+
+ for_each_cpu(i) {
+ data = kzalloc(sizeof(struct acpi_processor_performance),
+ GFP_KERNEL);
+ if (!data) {
+ for_each_cpu(j) {
+ kfree(acpi_perf_data[j]);
+ acpi_perf_data[j] = NULL;
+ }
+ return (-ENOMEM);
+ }
+ acpi_perf_data[i] = data;
+ }
+
+ acpi_processor_preregister_performance(acpi_perf_data);
+ return 0;
+}
/*
* centrino_cpu_init_acpi - register with ACPI P-States library
@@ -365,46 +390,51 @@ static int centrino_cpu_init_acpi(struct cpufreq_policy *policy)
unsigned long cur_freq;
int result = 0, i;
unsigned int cpu = policy->cpu;
+ struct acpi_processor_performance *p;
+
+ p = acpi_perf_data[cpu];
/* register with ACPI core */
- if (acpi_processor_register_performance(&p, cpu)) {
- dprintk("obtaining ACPI data failed\n");
+ if (acpi_processor_register_performance(p, cpu)) {
+ dprintk(PFX "obtaining ACPI data failed\n");
return -EIO;
}
+ policy->cpus = p->shared_cpu_map;
+ policy->shared_type = p->shared_type;
/* verify the acpi_data */
- if (p.state_count <= 1) {
+ if (p->state_count <= 1) {
dprintk("No P-States\n");
result = -ENODEV;
goto err_unreg;
}
- if ((p.control_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) ||
- (p.status_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) {
+ if ((p->control_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) ||
+ (p->status_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) {
dprintk("Invalid control/status registers (%x - %x)\n",
- p.control_register.space_id, p.status_register.space_id);
+ p->control_register.space_id, p->status_register.space_id);
result = -EIO;
goto err_unreg;
}
- for (i=0; i<p.state_count; i++) {
- if (p.states[i].control != p.states[i].status) {
+ for (i=0; i<p->state_count; i++) {
+ if (p->states[i].control != p->states[i].status) {
dprintk("Different control (%llu) and status values (%llu)\n",
- p.states[i].control, p.states[i].status);
+ p->states[i].control, p->states[i].status);
result = -EINVAL;
goto err_unreg;
}
- if (!p.states[i].core_frequency) {
+ if (!p->states[i].core_frequency) {
dprintk("Zero core frequency for state %u\n", i);
result = -EINVAL;
goto err_unreg;
}
- if (p.states[i].core_frequency > p.states[0].core_frequency) {
+ if (p->states[i].core_frequency > p->states[0].core_frequency) {
dprintk("P%u has larger frequency (%llu) than P0 (%llu), skipping\n", i,
- p.states[i].core_frequency, p.states[0].core_frequency);
- p.states[i].core_frequency = 0;
+ p->states[i].core_frequency, p->states[0].core_frequency);
+ p->states[i].core_frequency = 0;
continue;
}
}
@@ -416,26 +446,26 @@ static int centrino_cpu_init_acpi(struct cpufreq_policy *policy)
}
centrino_model[cpu]->model_name=NULL;
- centrino_model[cpu]->max_freq = p.states[0].core_frequency * 1000;
+ centrino_model[cpu]->max_freq = p->states[0].core_frequency * 1000;
centrino_model[cpu]->op_points = kmalloc(sizeof(struct cpufreq_frequency_table) *
- (p.state_count + 1), GFP_KERNEL);
+ (p->state_count + 1), GFP_KERNEL);
if (!centrino_model[cpu]->op_points) {
result = -ENOMEM;
goto err_kfree;
}
- for (i=0; i<p.state_count; i++) {
- centrino_model[cpu]->op_points[i].index = p.states[i].control;
- centrino_model[cpu]->op_points[i].frequency = p.states[i].core_frequency * 1000;
+ for (i=0; i<p->state_count; i++) {
+ centrino_model[cpu]->op_points[i].index = p->states[i].control;
+ centrino_model[cpu]->op_points[i].frequency = p->states[i].core_frequency * 1000;
dprintk("adding state %i with frequency %u and control value %04x\n",
i, centrino_model[cpu]->op_points[i].frequency, centrino_model[cpu]->op_points[i].index);
}
- centrino_model[cpu]->op_points[p.state_count].frequency = CPUFREQ_TABLE_END;
+ centrino_model[cpu]->op_points[p->state_count].frequency = CPUFREQ_TABLE_END;
cur_freq = get_cur_freq(cpu);
- for (i=0; i<p.state_count; i++) {
- if (!p.states[i].core_frequency) {
+ for (i=0; i<p->state_count; i++) {
+ if (!p->states[i].core_frequency) {
dprintk("skipping state %u\n", i);
centrino_model[cpu]->op_points[i].frequency = CPUFREQ_ENTRY_INVALID;
continue;
@@ -451,7 +481,7 @@ static int centrino_cpu_init_acpi(struct cpufreq_policy *policy)
}
if (cur_freq == centrino_model[cpu]->op_points[i].frequency)
- p.state = i;
+ p->state = i;
}
/* notify BIOS that we exist */
@@ -464,12 +494,13 @@ static int centrino_cpu_init_acpi(struct cpufreq_policy *policy)
err_kfree:
kfree(centrino_model[cpu]);
err_unreg:
- acpi_processor_unregister_performance(&p, cpu);
- dprintk("invalid ACPI data\n");
+ acpi_processor_unregister_performance(p, cpu);
+ dprintk(PFX "invalid ACPI data\n");
return (result);
}
#else
static inline int centrino_cpu_init_acpi(struct cpufreq_policy *policy) { return -ENODEV; }
+static inline int centrino_cpu_early_init_acpi(void) { return 0; }
#endif
static int centrino_cpu_init(struct cpufreq_policy *policy)
@@ -555,10 +586,15 @@ static int centrino_cpu_exit(struct cpufreq_policy *policy)
#ifdef CONFIG_X86_SPEEDSTEP_CENTRINO_ACPI
if (!centrino_model[cpu]->model_name) {
- dprintk("unregistering and freeing ACPI data\n");
- acpi_processor_unregister_performance(&p, cpu);
- kfree(centrino_model[cpu]->op_points);
- kfree(centrino_model[cpu]);
+ static struct acpi_processor_performance *p;
+
+ if (acpi_perf_data[cpu]) {
+ p = acpi_perf_data[cpu];
+ dprintk("unregistering and freeing ACPI data\n");
+ acpi_processor_unregister_performance(p, cpu);
+ kfree(centrino_model[cpu]->op_points);
+ kfree(centrino_model[cpu]);
+ }
}
#endif
@@ -592,63 +628,128 @@ static int centrino_target (struct cpufreq_policy *policy,
unsigned int relation)
{
unsigned int newstate = 0;
- unsigned int msr, oldmsr, h, cpu = policy->cpu;
+ unsigned int msr, oldmsr = 0, h = 0, cpu = policy->cpu;
struct cpufreq_freqs freqs;
+ cpumask_t online_policy_cpus;
cpumask_t saved_mask;
- int retval;
+ cpumask_t set_mask;
+ cpumask_t covered_cpus;
+ int retval = 0;
+ unsigned int j, k, first_cpu, tmp;
- if (centrino_model[cpu] == NULL)
+ if (unlikely(centrino_model[cpu] == NULL))
return -ENODEV;
- /*
- * Support for SMP systems.
- * Make sure we are running on the CPU that wants to change frequency
- */
- saved_mask = current->cpus_allowed;
- set_cpus_allowed(current, policy->cpus);
- if (!cpu_isset(smp_processor_id(), policy->cpus)) {
- dprintk("couldn't limit to CPUs in this domain\n");
- return(-EAGAIN);
+ if (unlikely(cpufreq_frequency_table_target(policy,
+ centrino_model[cpu]->op_points,
+ target_freq,
+ relation,
+ &newstate))) {
+ return -EINVAL;
}
- if (cpufreq_frequency_table_target(policy, centrino_model[cpu]->op_points, target_freq,
- relation, &newstate)) {
- retval = -EINVAL;
- goto migrate_end;
- }
+#ifdef CONFIG_HOTPLUG_CPU
+ /* cpufreq holds the hotplug lock, so we are safe from here on */
+ cpus_and(online_policy_cpus, cpu_online_map, policy->cpus);
+#else
+ online_policy_cpus = policy->cpus;
+#endif
- msr = centrino_model[cpu]->op_points[newstate].index;
- rdmsr(MSR_IA32_PERF_CTL, oldmsr, h);
+ saved_mask = current->cpus_allowed;
+ first_cpu = 1;
+ cpus_clear(covered_cpus);
+ for_each_cpu_mask(j, online_policy_cpus) {
+ /*
+ * Support for SMP systems.
+ * Make sure we are running on CPU that wants to change freq
+ */
+ cpus_clear(set_mask);
+ if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
+ cpus_or(set_mask, set_mask, online_policy_cpus);
+ else
+ cpu_set(j, set_mask);
+
+ set_cpus_allowed(current, set_mask);
+ if (unlikely(!cpu_isset(smp_processor_id(), set_mask))) {
+ dprintk("couldn't limit to CPUs in this domain\n");
+ retval = -EAGAIN;
+ if (first_cpu) {
+ /* We haven't started the transition yet. */
+ goto migrate_end;
+ }
+ break;
+ }
- if (msr == (oldmsr & 0xffff)) {
- retval = 0;
- dprintk("no change needed - msr was and needs to be %x\n", oldmsr);
- goto migrate_end;
- }
+ msr = centrino_model[cpu]->op_points[newstate].index;
+
+ if (first_cpu) {
+ rdmsr(MSR_IA32_PERF_CTL, oldmsr, h);
+ if (msr == (oldmsr & 0xffff)) {
+ dprintk("no change needed - msr was and needs "
+ "to be %x\n", oldmsr);
+ retval = 0;
+ goto migrate_end;
+ }
+
+ freqs.old = extract_clock(oldmsr, cpu, 0);
+ freqs.new = extract_clock(msr, cpu, 0);
+
+ dprintk("target=%dkHz old=%d new=%d msr=%04x\n",
+ target_freq, freqs.old, freqs.new, msr);
+
+ for_each_cpu_mask(k, online_policy_cpus) {
+ freqs.cpu = k;
+ cpufreq_notify_transition(&freqs,
+ CPUFREQ_PRECHANGE);
+ }
+
+ first_cpu = 0;
+ /* all but 16 LSB are reserved, treat them with care */
+ oldmsr &= ~0xffff;
+ msr &= 0xffff;
+ oldmsr |= msr;
+ }
- freqs.cpu = cpu;
- freqs.old = extract_clock(oldmsr, cpu, 0);
- freqs.new = extract_clock(msr, cpu, 0);
+ wrmsr(MSR_IA32_PERF_CTL, oldmsr, h);
+ if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
+ break;
- dprintk("target=%dkHz old=%d new=%d msr=%04x\n",
- target_freq, freqs.old, freqs.new, msr);
+ cpu_set(j, covered_cpus);
+ }
- cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+ for_each_cpu_mask(k, online_policy_cpus) {
+ freqs.cpu = k;
+ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+ }
- /* all but 16 LSB are "reserved", so treat them with
- care */
- oldmsr &= ~0xffff;
- msr &= 0xffff;
- oldmsr |= msr;
+ if (unlikely(retval)) {
+ /*
+ * We have failed halfway through the frequency change.
+ * We have sent callbacks to policy->cpus and
+ * MSRs have already been written on coverd_cpus.
+ * Best effort undo..
+ */
- wrmsr(MSR_IA32_PERF_CTL, oldmsr, h);
+ if (!cpus_empty(covered_cpus)) {
+ for_each_cpu_mask(j, covered_cpus) {
+ set_cpus_allowed(current, cpumask_of_cpu(j));
+ wrmsr(MSR_IA32_PERF_CTL, oldmsr, h);
+ }
+ }
- cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+ tmp = freqs.new;
+ freqs.new = freqs.old;
+ freqs.old = tmp;
+ for_each_cpu_mask(j, online_policy_cpus) {
+ freqs.cpu = j;
+ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+ }
+ }
- retval = 0;
migrate_end:
set_cpus_allowed(current, saved_mask);
- return (retval);
+ return 0;
}
static struct freq_attr* centrino_attr[] = {
@@ -690,12 +791,25 @@ static int __init centrino_init(void)
if (!cpu_has(cpu, X86_FEATURE_EST))
return -ENODEV;
+ centrino_cpu_early_init_acpi();
+
return cpufreq_register_driver(&centrino_driver);
}
static void __exit centrino_exit(void)
{
+#ifdef CONFIG_X86_SPEEDSTEP_CENTRINO_ACPI
+ unsigned int j;
+#endif
+
cpufreq_unregister_driver(&centrino_driver);
+
+#ifdef CONFIG_X86_SPEEDSTEP_CENTRINO_ACPI
+ for_each_cpu(j) {
+ kfree(acpi_perf_data[j]);
+ acpi_perf_data[j] = NULL;
+ }
+#endif
}
MODULE_AUTHOR ("Jeremy Fitzhardinge <jeremy@goop.org>");
diff --git a/arch/i386/kernel/cpu/cyrix.c b/arch/i386/kernel/cpu/cyrix.c
index 00f2e058797c..fc32c8028e24 100644
--- a/arch/i386/kernel/cpu/cyrix.c
+++ b/arch/i386/kernel/cpu/cyrix.c
@@ -184,7 +184,7 @@ static void __init geode_configure(void)
#ifdef CONFIG_PCI
-static struct pci_device_id cyrix_55x0[] = {
+static struct pci_device_id __initdata cyrix_55x0[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5510) },
{ PCI_DEVICE(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5520) },
{ },
@@ -272,14 +272,15 @@ static void __init init_cyrix(struct cpuinfo_x86 *c)
printk(KERN_INFO "Working around Cyrix MediaGX virtual DMA bugs.\n");
isa_dma_bridge_buggy = 2;
-#endif
- c->x86_cache_size=16; /* Yep 16K integrated cache thats it */
-
+
+
/*
* The 5510/5520 companion chips have a funky PIT.
*/
if (pci_dev_present(cyrix_55x0))
pit_latch_buggy = 1;
+#endif
+ c->x86_cache_size=16; /* Yep 16K integrated cache thats it */
/* GXm supports extended cpuid levels 'ala' AMD */
if (c->cpuid_level == 2) {
diff --git a/arch/i386/kernel/cpu/proc.c b/arch/i386/kernel/cpu/proc.c
index f94cdb7aca50..a19fcb262dbb 100644
--- a/arch/i386/kernel/cpu/proc.c
+++ b/arch/i386/kernel/cpu/proc.c
@@ -52,7 +52,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
/* VIA/Cyrix/Centaur-defined */
NULL, NULL, "rng", "rng_en", NULL, NULL, "ace", "ace_en",
- NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ "ace2", "ace2_en", "phe", "phe_en", "pmm", "pmm_en", NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
diff --git a/arch/i386/kernel/i387.c b/arch/i386/kernel/i387.c
index d75524758daf..c4351972d9af 100644
--- a/arch/i386/kernel/i387.c
+++ b/arch/i386/kernel/i387.c
@@ -25,7 +25,7 @@
#define HAVE_HWFP 1
#endif
-static unsigned long mxcsr_feature_mask = 0xffffffff;
+static unsigned long mxcsr_feature_mask __read_mostly = 0xffffffff;
void mxcsr_feature_mask_init(void)
{
diff --git a/arch/i386/kernel/i8259.c b/arch/i386/kernel/i8259.c
index 323ef8ab3244..b7636b96e104 100644
--- a/arch/i386/kernel/i8259.c
+++ b/arch/i386/kernel/i8259.c
@@ -271,8 +271,8 @@ static int i8259A_shutdown(struct sys_device *dev)
* the kernel initialization code can get it
* out of.
*/
- outb(0xff, 0x21); /* mask all of 8259A-1 */
- outb(0xff, 0xA1); /* mask all of 8259A-1 */
+ outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */
+ outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-1 */
return 0;
}
diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c
index d70f2ade5cde..a62df3e764c5 100644
--- a/arch/i386/kernel/io_apic.c
+++ b/arch/i386/kernel/io_apic.c
@@ -267,7 +267,7 @@ static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t cpumask)
# include <linux/slab.h> /* kmalloc() */
# include <linux/timer.h> /* time_after() */
-# ifdef CONFIG_BALANCED_IRQ_DEBUG
+#ifdef CONFIG_BALANCED_IRQ_DEBUG
# define TDprintk(x...) do { printk("<%ld:%s:%d>: ", jiffies, __FILE__, __LINE__); printk(x); } while (0)
# define Dprintk(x...) do { TDprintk(x); } while (0)
# else
@@ -275,10 +275,15 @@ static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t cpumask)
# define Dprintk(x...)
# endif
-
#define IRQBALANCE_CHECK_ARCH -999
-static int irqbalance_disabled = IRQBALANCE_CHECK_ARCH;
-static int physical_balance = 0;
+#define MAX_BALANCED_IRQ_INTERVAL (5*HZ)
+#define MIN_BALANCED_IRQ_INTERVAL (HZ/2)
+#define BALANCED_IRQ_MORE_DELTA (HZ/10)
+#define BALANCED_IRQ_LESS_DELTA (HZ)
+
+static int irqbalance_disabled __read_mostly = IRQBALANCE_CHECK_ARCH;
+static int physical_balance __read_mostly;
+static long balanced_irq_interval __read_mostly = MAX_BALANCED_IRQ_INTERVAL;
static struct irq_cpu_info {
unsigned long * last_irq;
@@ -297,12 +302,14 @@ static struct irq_cpu_info {
#define CPU_TO_PACKAGEINDEX(i) (first_cpu(cpu_sibling_map[i]))
-#define MAX_BALANCED_IRQ_INTERVAL (5*HZ)
-#define MIN_BALANCED_IRQ_INTERVAL (HZ/2)
-#define BALANCED_IRQ_MORE_DELTA (HZ/10)
-#define BALANCED_IRQ_LESS_DELTA (HZ)
+static cpumask_t balance_irq_affinity[NR_IRQS] = {
+ [0 ... NR_IRQS-1] = CPU_MASK_ALL
+};
-static long balanced_irq_interval = MAX_BALANCED_IRQ_INTERVAL;
+void set_balance_irq_affinity(unsigned int irq, cpumask_t mask)
+{
+ balance_irq_affinity[irq] = mask;
+}
static unsigned long move(int curr_cpu, cpumask_t allowed_mask,
unsigned long now, int direction)
@@ -340,7 +347,7 @@ static inline void balance_irq(int cpu, int irq)
if (irqbalance_disabled)
return;
- cpus_and(allowed_mask, cpu_online_map, irq_affinity[irq]);
+ cpus_and(allowed_mask, cpu_online_map, balance_irq_affinity[irq]);
new_cpu = move(cpu, allowed_mask, now, 1);
if (cpu != new_cpu) {
set_pending_irq(irq, cpumask_of_cpu(new_cpu));
@@ -529,7 +536,9 @@ tryanotherirq:
}
}
- cpus_and(allowed_mask, cpu_online_map, irq_affinity[selected_irq]);
+ cpus_and(allowed_mask,
+ cpu_online_map,
+ balance_irq_affinity[selected_irq]);
target_cpu_mask = cpumask_of_cpu(min_loaded);
cpus_and(tmp, target_cpu_mask, allowed_mask);
diff --git a/arch/i386/kernel/irq.c b/arch/i386/kernel/irq.c
index f3a9c78c4a24..248e922ee13a 100644
--- a/arch/i386/kernel/irq.c
+++ b/arch/i386/kernel/irq.c
@@ -42,8 +42,8 @@ union irq_ctx {
u32 stack[THREAD_SIZE/sizeof(u32)];
};
-static union irq_ctx *hardirq_ctx[NR_CPUS];
-static union irq_ctx *softirq_ctx[NR_CPUS];
+static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly;
+static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly;
#endif
/*
diff --git a/arch/i386/kernel/kprobes.c b/arch/i386/kernel/kprobes.c
index 38806f427849..395a9a6dff88 100644
--- a/arch/i386/kernel/kprobes.c
+++ b/arch/i386/kernel/kprobes.c
@@ -607,7 +607,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
struct die_args *args = (struct die_args *)data;
int ret = NOTIFY_DONE;
- if (args->regs && user_mode(args->regs))
+ if (args->regs && user_mode_vm(args->regs))
return ret;
switch (val) {
diff --git a/arch/i386/kernel/microcode.c b/arch/i386/kernel/microcode.c
index e7c138f66c5a..0a865889b2a9 100644
--- a/arch/i386/kernel/microcode.c
+++ b/arch/i386/kernel/microcode.c
@@ -91,7 +91,10 @@ MODULE_DESCRIPTION("Intel CPU (IA-32) Microcode Update Driver");
MODULE_AUTHOR("Tigran Aivazian <tigran@veritas.com>");
MODULE_LICENSE("GPL");
-#define MICROCODE_VERSION "1.14"
+static int verbose;
+module_param(verbose, int, 0644);
+
+#define MICROCODE_VERSION "1.14a"
#define DEFAULT_UCODE_DATASIZE (2000) /* 2000 bytes */
#define MC_HEADER_SIZE (sizeof (microcode_header_t)) /* 48 bytes */
@@ -122,14 +125,15 @@ static unsigned int user_buffer_size; /* it's size */
typedef enum mc_error_code {
MC_SUCCESS = 0,
- MC_NOTFOUND = 1,
- MC_MARKED = 2,
- MC_ALLOCATED = 3,
+ MC_IGNORED = 1,
+ MC_NOTFOUND = 2,
+ MC_MARKED = 3,
+ MC_ALLOCATED = 4,
} mc_error_code_t;
static struct ucode_cpu_info {
unsigned int sig;
- unsigned int pf;
+ unsigned int pf, orig_pf;
unsigned int rev;
unsigned int cksum;
mc_error_code_t err;
@@ -164,6 +168,7 @@ static void collect_cpu_info (void *unused)
rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]);
uci->pf = 1 << ((val[1] >> 18) & 7);
}
+ uci->orig_pf = uci->pf;
}
wrmsr(MSR_IA32_UCODE_REV, 0, 0);
@@ -197,21 +202,34 @@ static inline void mark_microcode_update (int cpu_num, microcode_header_t *mc_he
pr_debug(" Checksum 0x%x\n", cksum);
if (mc_header->rev < uci->rev) {
- printk(KERN_ERR "microcode: CPU%d not 'upgrading' to earlier revision"
- " 0x%x (current=0x%x)\n", cpu_num, mc_header->rev, uci->rev);
- goto out;
+ if (uci->err == MC_NOTFOUND) {
+ uci->err = MC_IGNORED;
+ uci->cksum = mc_header->rev;
+ } else if (uci->err == MC_IGNORED && uci->cksum < mc_header->rev)
+ uci->cksum = mc_header->rev;
} else if (mc_header->rev == uci->rev) {
- /* notify the caller of success on this cpu */
- uci->err = MC_SUCCESS;
- goto out;
+ if (uci->err < MC_MARKED) {
+ /* notify the caller of success on this cpu */
+ uci->err = MC_SUCCESS;
+ }
+ } else if (uci->err != MC_ALLOCATED || mc_header->rev > uci->mc->hdr.rev) {
+ pr_debug("microcode: CPU%d found a matching microcode update with "
+ " revision 0x%x (current=0x%x)\n", cpu_num, mc_header->rev, uci->rev);
+ uci->cksum = cksum;
+ uci->pf = pf; /* keep the original mc pf for cksum calculation */
+ uci->err = MC_MARKED; /* found the match */
+ for_each_online_cpu(cpu_num) {
+ if (ucode_cpu_info + cpu_num != uci
+ && ucode_cpu_info[cpu_num].mc == uci->mc) {
+ uci->mc = NULL;
+ break;
+ }
+ }
+ if (uci->mc != NULL) {
+ vfree(uci->mc);
+ uci->mc = NULL;
+ }
}
-
- pr_debug("microcode: CPU%d found a matching microcode update with "
- " revision 0x%x (current=0x%x)\n", cpu_num, mc_header->rev, uci->rev);
- uci->cksum = cksum;
- uci->pf = pf; /* keep the original mc pf for cksum calculation */
- uci->err = MC_MARKED; /* found the match */
-out:
return;
}
@@ -253,10 +271,8 @@ static int find_matching_ucodes (void)
for_each_online_cpu(cpu_num) {
struct ucode_cpu_info *uci = ucode_cpu_info + cpu_num;
- if (uci->err != MC_NOTFOUND) /* already found a match or not an online cpu*/
- continue;
- if (sigmatch(mc_header.sig, uci->sig, mc_header.pf, uci->pf))
+ if (sigmatch(mc_header.sig, uci->sig, mc_header.pf, uci->orig_pf))
mark_microcode_update(cpu_num, &mc_header, mc_header.sig, mc_header.pf, mc_header.cksum);
}
@@ -295,9 +311,8 @@ static int find_matching_ucodes (void)
}
for_each_online_cpu(cpu_num) {
struct ucode_cpu_info *uci = ucode_cpu_info + cpu_num;
- if (uci->err != MC_NOTFOUND) /* already found a match or not an online cpu*/
- continue;
- if (sigmatch(ext_sig.sig, uci->sig, ext_sig.pf, uci->pf)) {
+
+ if (sigmatch(ext_sig.sig, uci->sig, ext_sig.pf, uci->orig_pf)) {
mark_microcode_update(cpu_num, &mc_header, ext_sig.sig, ext_sig.pf, ext_sig.cksum);
}
}
@@ -368,6 +383,13 @@ static void do_update_one (void * unused)
struct ucode_cpu_info *uci = ucode_cpu_info + cpu_num;
if (uci->mc == NULL) {
+ if (verbose) {
+ if (uci->err == MC_SUCCESS)
+ printk(KERN_INFO "microcode: CPU%d already at revision 0x%x\n",
+ cpu_num, uci->rev);
+ else
+ printk(KERN_INFO "microcode: No new microcode data for CPU%d\n", cpu_num);
+ }
return;
}
@@ -426,6 +448,9 @@ out_free:
ucode_cpu_info[j].mc = NULL;
}
}
+ if (ucode_cpu_info[i].err == MC_IGNORED && verbose)
+ printk(KERN_WARNING "microcode: CPU%d not 'upgrading' to earlier revision"
+ " 0x%x (current=0x%x)\n", i, ucode_cpu_info[i].cksum, ucode_cpu_info[i].rev);
}
out:
return error;
diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c
index dd6b0e3386ce..e6023970aa40 100644
--- a/arch/i386/kernel/setup.c
+++ b/arch/i386/kernel/setup.c
@@ -48,6 +48,7 @@
#include <linux/crash_dump.h>
#include <linux/dmi.h>
#include <linux/pfn.h>
+#include <linux/suspend.h>
#include <video/edid.h>
@@ -1434,6 +1435,111 @@ static void set_mca_bus(int x)
static void set_mca_bus(int x) { }
#endif
+#ifdef CONFIG_SOFTWARE_SUSPEND
+static void __init mark_nosave_page_range(unsigned long start, unsigned long end)
+{
+ struct page *page;
+ while (start <= end) {
+ page = pfn_to_page(start);
+ SetPageNosave(page);
+ start++;
+ }
+}
+
+static void __init e820_nosave_reserved_pages(void)
+{
+ int i;
+ unsigned long r_start = 0, r_end = 0;
+
+ /* Assume e820 map is sorted */
+ for (i = 0; i < e820.nr_map; i++) {
+ struct e820entry *ei = &e820.map[i];
+ unsigned long start, end;
+
+ start = PFN_DOWN(ei->addr);
+ end = PFN_UP(ei->addr + ei->size);
+ if (start >= end)
+ continue;
+ if (ei->type == E820_RESERVED)
+ continue;
+ r_end = start;
+ /*
+ * Highmem 'Reserved' pages are marked as reserved, swsusp
+ * will not save/restore them, so we ignore these pages here.
+ */
+ if (r_end > max_low_pfn)
+ r_end = max_low_pfn;
+ if (r_end > r_start)
+ mark_nosave_page_range(r_start, r_end-1);
+ if (r_end >= max_low_pfn)
+ break;
+ r_start = end;
+ }
+}
+
+static void __init e820_save_acpi_pages(void)
+{
+ int i;
+
+ /* Assume e820 map is sorted */
+ for (i = 0; i < e820.nr_map; i++) {
+ struct e820entry *ei = &e820.map[i];
+ unsigned long start, end;
+
+ start = ei->addr;
+ end = ei->addr + ei->size;
+ if (start >= end)
+ continue;
+ if (ei->type != E820_ACPI && ei->type != E820_NVS)
+ continue;
+ /*
+ * If the region is below max_low_pfn, it will be
+ * saved/restored by swsusp follow 'RAM' type.
+ */
+ if (start < (max_low_pfn << PAGE_SHIFT))
+ start = max_low_pfn << PAGE_SHIFT;
+ /*
+ * Highmem pages (ACPI NVS/Data) are reserved, but swsusp
+ * highmem save/restore will not save/restore them. We marked
+ * them as arch saveable pages here
+ */
+ if (end > start)
+ swsusp_add_arch_pages(start, end);
+ }
+}
+
+extern char __start_rodata, __end_rodata;
+/*
+ * BIOS reserved region/hole - no save/restore
+ * ACPI NVS - save/restore
+ * ACPI Data - this is a little tricky, the mem could be used by OS after OS
+ * reads tables from the region, but anyway save/restore the memory hasn't any
+ * side effect and Linux runtime module load/unload might use it.
+ * kernel rodata - no save/restore (kernel rodata isn't changed)
+ */
+static int __init mark_nosave_pages(void)
+{
+ unsigned long pfn_start, pfn_end;
+
+ /* FIXME: provide a version for efi BIOS */
+ if (efi_enabled)
+ return 0;
+ /* BIOS reserved regions & holes */
+ e820_nosave_reserved_pages();
+
+ /* kernel rodata */
+ pfn_start = PFN_UP(virt_to_phys(&__start_rodata));
+ pfn_end = PFN_DOWN(virt_to_phys(&__end_rodata));
+ mark_nosave_page_range(pfn_start, pfn_end-1);
+
+ /* record ACPI Data/NVS as saveable */
+ e820_save_acpi_pages();
+
+ return 0;
+}
+core_initcall(mark_nosave_pages);
+#endif
+
/*
* Determine if we were loaded by an EFI loader. If so, then we have also been
* passed the efi memmap, systab, etc., so we should use these data structures
diff --git a/arch/i386/kernel/srat.c b/arch/i386/kernel/srat.c
index 52b3ed5d2cb5..989c85255dbe 100644
--- a/arch/i386/kernel/srat.c
+++ b/arch/i386/kernel/srat.c
@@ -39,7 +39,6 @@
#define NODE_ARRAY_OFFSET(x) ((x) % 8) /* 8 bits/char */
#define BMAP_SET(bmap, bit) ((bmap)[NODE_ARRAY_INDEX(bit)] |= 1 << NODE_ARRAY_OFFSET(bit))
#define BMAP_TEST(bmap, bit) ((bmap)[NODE_ARRAY_INDEX(bit)] & (1 << NODE_ARRAY_OFFSET(bit)))
-#define MAX_PXM_DOMAINS 256 /* 1 byte and no promises about values */
/* bitmap length; _PXM is at most 255 */
#define PXM_BITMAP_LEN (MAX_PXM_DOMAINS / 8)
static u8 pxm_bitmap[PXM_BITMAP_LEN]; /* bitmap of proximity domains */
@@ -213,19 +212,11 @@ static __init void node_read_chunk(int nid, struct node_memory_chunk_s *memory_c
node_end_pfn[nid] = memory_chunk->end_pfn;
}
-static u8 pxm_to_nid_map[MAX_PXM_DOMAINS];/* _PXM to logical node ID map */
-
-int pxm_to_node(int pxm)
-{
- return pxm_to_nid_map[pxm];
-}
-
/* Parse the ACPI Static Resource Affinity Table */
static int __init acpi20_parse_srat(struct acpi_table_srat *sratp)
{
u8 *start, *end, *p;
int i, j, nid;
- u8 nid_to_pxm_map[MAX_NUMNODES];/* logical node ID to _PXM map */
start = (u8 *)(&(sratp->reserved) + 1); /* skip header */
p = start;
@@ -235,10 +226,6 @@ static int __init acpi20_parse_srat(struct acpi_table_srat *sratp)
memset(node_memory_chunk, 0, sizeof(node_memory_chunk));
memset(zholes_size, 0, sizeof(zholes_size));
- /* -1 in these maps means not available */
- memset(pxm_to_nid_map, -1, sizeof(pxm_to_nid_map));
- memset(nid_to_pxm_map, -1, sizeof(nid_to_pxm_map));
-
num_memory_chunks = 0;
while (p < end) {
switch (*p) {
@@ -278,9 +265,7 @@ static int __init acpi20_parse_srat(struct acpi_table_srat *sratp)
nodes_clear(node_online_map);
for (i = 0; i < MAX_PXM_DOMAINS; i++) {
if (BMAP_TEST(pxm_bitmap, i)) {
- nid = num_online_nodes();
- pxm_to_nid_map[i] = nid;
- nid_to_pxm_map[nid] = i;
+ int nid = acpi_map_pxm_to_node(i);
node_set_online(nid);
}
}
@@ -288,7 +273,7 @@ static int __init acpi20_parse_srat(struct acpi_table_srat *sratp)
/* set cnode id in memory chunk structure */
for (i = 0; i < num_memory_chunks; i++)
- node_memory_chunk[i].nid = pxm_to_nid_map[node_memory_chunk[i].pxm];
+ node_memory_chunk[i].nid = pxm_to_node(node_memory_chunk[i].pxm);
printk("pxm bitmap: ");
for (i = 0; i < sizeof(pxm_bitmap); i++) {
diff --git a/arch/i386/kernel/syscall_table.S b/arch/i386/kernel/syscall_table.S
index af56987f69b0..dd63d4775398 100644
--- a/arch/i386/kernel/syscall_table.S
+++ b/arch/i386/kernel/syscall_table.S
@@ -316,3 +316,4 @@ ENTRY(sys_call_table)
.long sys_sync_file_range
.long sys_tee /* 315 */
.long sys_vmsplice
+ .long sys_move_pages
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c
index 0e498369f35e..dcc14477af1f 100644
--- a/arch/i386/kernel/traps.c
+++ b/arch/i386/kernel/traps.c
@@ -149,6 +149,12 @@ static inline unsigned long print_context_stack(struct thread_info *tinfo,
while (valid_stack_ptr(tinfo, (void *)ebp)) {
addr = *(unsigned long *)(ebp + 4);
printed = print_addr_and_symbol(addr, log_lvl, printed);
+ /*
+ * break out of recursive entries (such as
+ * end_of_stack_stop_unwind_function):
+ */
+ if (ebp == *(unsigned long *)ebp)
+ break;
ebp = *(unsigned long *)ebp;
}
#else
@@ -268,8 +274,9 @@ void show_registers(struct pt_regs *regs)
regs->esi, regs->edi, regs->ebp, esp);
printk(KERN_EMERG "ds: %04x es: %04x ss: %04x\n",
regs->xds & 0xffff, regs->xes & 0xffff, ss);
- printk(KERN_EMERG "Process %s (pid: %d, threadinfo=%p task=%p)",
- current->comm, current->pid, current_thread_info(), current);
+ printk(KERN_EMERG "Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)",
+ TASK_COMM_LEN, current->comm, current->pid,
+ current_thread_info(), current, current->thread_info);
/*
* When in-kernel, we also print out the stack and code at the
* time of the fault..
diff --git a/arch/i386/lib/usercopy.c b/arch/i386/lib/usercopy.c
index 4cf981d70f45..6979297ce278 100644
--- a/arch/i386/lib/usercopy.c
+++ b/arch/i386/lib/usercopy.c
@@ -425,15 +425,121 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
: "eax", "edx", "memory");
return size;
}
+
+/*
+ * Non Temporal Hint version of __copy_user_zeroing_intel. It is cache aware.
+ * hyoshiok@miraclelinux.com
+ */
+
+static unsigned long __copy_user_zeroing_intel_nocache(void *to,
+ const void __user *from, unsigned long size)
+{
+ int d0, d1;
+
+ __asm__ __volatile__(
+ " .align 2,0x90\n"
+ "0: movl 32(%4), %%eax\n"
+ " cmpl $67, %0\n"
+ " jbe 2f\n"
+ "1: movl 64(%4), %%eax\n"
+ " .align 2,0x90\n"
+ "2: movl 0(%4), %%eax\n"
+ "21: movl 4(%4), %%edx\n"
+ " movnti %%eax, 0(%3)\n"
+ " movnti %%edx, 4(%3)\n"
+ "3: movl 8(%4), %%eax\n"
+ "31: movl 12(%4),%%edx\n"
+ " movnti %%eax, 8(%3)\n"
+ " movnti %%edx, 12(%3)\n"
+ "4: movl 16(%4), %%eax\n"
+ "41: movl 20(%4), %%edx\n"
+ " movnti %%eax, 16(%3)\n"
+ " movnti %%edx, 20(%3)\n"
+ "10: movl 24(%4), %%eax\n"
+ "51: movl 28(%4), %%edx\n"
+ " movnti %%eax, 24(%3)\n"
+ " movnti %%edx, 28(%3)\n"
+ "11: movl 32(%4), %%eax\n"
+ "61: movl 36(%4), %%edx\n"
+ " movnti %%eax, 32(%3)\n"
+ " movnti %%edx, 36(%3)\n"
+ "12: movl 40(%4), %%eax\n"
+ "71: movl 44(%4), %%edx\n"
+ " movnti %%eax, 40(%3)\n"
+ " movnti %%edx, 44(%3)\n"
+ "13: movl 48(%4), %%eax\n"
+ "81: movl 52(%4), %%edx\n"
+ " movnti %%eax, 48(%3)\n"
+ " movnti %%edx, 52(%3)\n"
+ "14: movl 56(%4), %%eax\n"
+ "91: movl 60(%4), %%edx\n"
+ " movnti %%eax, 56(%3)\n"
+ " movnti %%edx, 60(%3)\n"
+ " addl $-64, %0\n"
+ " addl $64, %4\n"
+ " addl $64, %3\n"
+ " cmpl $63, %0\n"
+ " ja 0b\n"
+ " sfence \n"
+ "5: movl %0, %%eax\n"
+ " shrl $2, %0\n"
+ " andl $3, %%eax\n"
+ " cld\n"
+ "6: rep; movsl\n"
+ " movl %%eax,%0\n"
+ "7: rep; movsb\n"
+ "8:\n"
+ ".section .fixup,\"ax\"\n"
+ "9: lea 0(%%eax,%0,4),%0\n"
+ "16: pushl %0\n"
+ " pushl %%eax\n"
+ " xorl %%eax,%%eax\n"
+ " rep; stosb\n"
+ " popl %%eax\n"
+ " popl %0\n"
+ " jmp 8b\n"
+ ".previous\n"
+ ".section __ex_table,\"a\"\n"
+ " .align 4\n"
+ " .long 0b,16b\n"
+ " .long 1b,16b\n"
+ " .long 2b,16b\n"
+ " .long 21b,16b\n"
+ " .long 3b,16b\n"
+ " .long 31b,16b\n"
+ " .long 4b,16b\n"
+ " .long 41b,16b\n"
+ " .long 10b,16b\n"
+ " .long 51b,16b\n"
+ " .long 11b,16b\n"
+ " .long 61b,16b\n"
+ " .long 12b,16b\n"
+ " .long 71b,16b\n"
+ " .long 13b,16b\n"
+ " .long 81b,16b\n"
+ " .long 14b,16b\n"
+ " .long 91b,16b\n"
+ " .long 6b,9b\n"
+ " .long 7b,16b\n"
+ ".previous"
+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
+ : "1"(to), "2"(from), "0"(size)
+ : "eax", "edx", "memory");
+ return size;
+}
+
#else
+
/*
* Leave these declared but undefined. They should not be any references to
* them
*/
-unsigned long
-__copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size);
-unsigned long
-__copy_user_intel(void __user *to, const void *from, unsigned long size);
+unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
+ unsigned long size);
+unsigned long __copy_user_intel(void __user *to, const void *from,
+ unsigned long size);
+unsigned long __copy_user_zeroing_intel_nocache(void *to,
+ const void __user *from, unsigned long size);
#endif /* CONFIG_X86_INTEL_USERCOPY */
/* Generic arbitrary sized copy. */
@@ -515,8 +621,8 @@ do { \
: "memory"); \
} while (0)
-
-unsigned long __copy_to_user_ll(void __user *to, const void *from, unsigned long n)
+unsigned long __copy_to_user_ll(void __user *to, const void *from,
+ unsigned long n)
{
BUG_ON((long) n < 0);
#ifndef CONFIG_X86_WP_WORKS_OK
@@ -576,8 +682,8 @@ survive:
}
EXPORT_SYMBOL(__copy_to_user_ll);
-unsigned long
-__copy_from_user_ll(void *to, const void __user *from, unsigned long n)
+unsigned long __copy_from_user_ll(void *to, const void __user *from,
+ unsigned long n)
{
BUG_ON((long)n < 0);
if (movsl_is_ok(to, from, n))
@@ -588,6 +694,21 @@ __copy_from_user_ll(void *to, const void __user *from, unsigned long n)
}
EXPORT_SYMBOL(__copy_from_user_ll);
+unsigned long __copy_from_user_ll_nocache(void *to, const void __user *from,
+ unsigned long n)
+{
+ BUG_ON((long)n < 0);
+#ifdef CONFIG_X86_INTEL_USERCOPY
+ if ( n > 64 && cpu_has_xmm2)
+ n = __copy_user_zeroing_intel_nocache(to, from, n);
+ else
+ __copy_user_zeroing(to, from, n);
+#else
+ __copy_user_zeroing(to, from, n);
+#endif
+ return n;
+}
+
/**
* copy_to_user: - Copy a block of data into user space.
* @to: Destination address, in user space.
diff --git a/arch/i386/mm/fault.c b/arch/i386/mm/fault.c
index 7f0fcf219a26..bd6fe96cc16d 100644
--- a/arch/i386/mm/fault.c
+++ b/arch/i386/mm/fault.c
@@ -77,12 +77,15 @@ static inline unsigned long get_segment_eip(struct pt_regs *regs,
unsigned seg = regs->xcs & 0xffff;
u32 seg_ar, seg_limit, base, *desc;
+ /* Unlikely, but must come before segment checks. */
+ if (unlikely(regs->eflags & VM_MASK)) {
+ base = seg << 4;
+ *eip_limit = base + 0xffff;
+ return base + (eip & 0xffff);
+ }
+
/* The standard kernel/user address space limit. */
*eip_limit = (seg & 3) ? USER_DS.seg : KERNEL_DS.seg;
-
- /* Unlikely, but must come before segment checks. */
- if (unlikely((regs->eflags & VM_MASK) != 0))
- return eip + (seg << 4);
/* By far the most common cases. */
if (likely(seg == __USER_CS || seg == __KERNEL_CS))
@@ -380,12 +383,12 @@ fastcall void __kprobes do_page_fault(struct pt_regs *regs,
goto bad_area;
if (error_code & 4) {
/*
- * accessing the stack below %esp is always a bug.
- * The "+ 32" is there due to some instructions (like
- * pusha) doing post-decrement on the stack and that
- * doesn't show up until later..
+ * Accessing the stack below %esp is always a bug.
+ * The large cushion allows instructions like enter
+ * and pusha to work. ("enter $65535,$31" pushes
+ * 32 pointers and then decrements %esp by 65535.)
*/
- if (address + 32 < regs->esp)
+ if (address + 65536 + 32 * sizeof(unsigned long) < regs->esp)
goto bad_area;
}
if (expand_stack(vma, address))
diff --git a/arch/i386/mm/init.c b/arch/i386/mm/init.c
index 3df1371d4520..bf19513f0cea 100644
--- a/arch/i386/mm/init.c
+++ b/arch/i386/mm/init.c
@@ -29,6 +29,7 @@
#include <linux/efi.h>
#include <linux/memory_hotplug.h>
#include <linux/initrd.h>
+#include <linux/cpumask.h>
#include <asm/processor.h>
#include <asm/system.h>
@@ -384,7 +385,7 @@ static void __init pagetable_init (void)
#endif
}
-#ifdef CONFIG_SOFTWARE_SUSPEND
+#if defined(CONFIG_SOFTWARE_SUSPEND) || defined(CONFIG_ACPI_SLEEP)
/*
* Swap suspend & friends need this for resume because things like the intel-agp
* driver might have split up a kernel 4MB mapping.
diff --git a/arch/i386/mm/pageattr.c b/arch/i386/mm/pageattr.c
index 92c3d9f0e731..0887b34bc59b 100644
--- a/arch/i386/mm/pageattr.c
+++ b/arch/i386/mm/pageattr.c
@@ -209,19 +209,19 @@ int change_page_attr(struct page *page, int numpages, pgprot_t prot)
}
void global_flush_tlb(void)
-{
- LIST_HEAD(l);
+{
+ struct list_head l;
struct page *pg, *next;
BUG_ON(irqs_disabled());
spin_lock_irq(&cpa_lock);
- list_splice_init(&df_list, &l);
+ list_replace_init(&df_list, &l);
spin_unlock_irq(&cpa_lock);
flush_map();
list_for_each_entry_safe(pg, next, &l, lru)
__free_page(pg);
-}
+}
#ifdef CONFIG_DEBUG_PAGEALLOC
void kernel_map_pages(struct page *page, int numpages, int enable)
diff --git a/arch/i386/pci/irq.c b/arch/i386/pci/irq.c
index 06dab00aaadc..8ce69508f3c7 100644
--- a/arch/i386/pci/irq.c
+++ b/arch/i386/pci/irq.c
@@ -198,14 +198,14 @@ static void write_config_nybble(struct pci_dev *router, unsigned offset, unsigne
*/
static int pirq_ali_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
{
- static unsigned char irqmap[16] = { 0, 9, 3, 10, 4, 5, 7, 6, 1, 11, 0, 12, 0, 14, 0, 15 };
+ static const unsigned char irqmap[16] = { 0, 9, 3, 10, 4, 5, 7, 6, 1, 11, 0, 12, 0, 14, 0, 15 };
return irqmap[read_config_nybble(router, 0x48, pirq-1)];
}
static int pirq_ali_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
{
- static unsigned char irqmap[16] = { 0, 8, 0, 2, 4, 5, 7, 6, 0, 1, 3, 9, 11, 0, 13, 15 };
+ static const unsigned char irqmap[16] = { 0, 8, 0, 2, 4, 5, 7, 6, 0, 1, 3, 9, 11, 0, 13, 15 };
unsigned int val = irqmap[irq];
if (val) {
@@ -256,13 +256,13 @@ static int pirq_via_set(struct pci_dev *router, struct pci_dev *dev, int pirq, i
*/
static int pirq_via586_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
{
- static unsigned int pirqmap[4] = { 3, 2, 5, 1 };
+ static const unsigned int pirqmap[4] = { 3, 2, 5, 1 };
return read_config_nybble(router, 0x55, pirqmap[pirq-1]);
}
static int pirq_via586_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
{
- static unsigned int pirqmap[4] = { 3, 2, 5, 1 };
+ static const unsigned int pirqmap[4] = { 3, 2, 5, 1 };
write_config_nybble(router, 0x55, pirqmap[pirq-1], irq);
return 1;
}
@@ -274,13 +274,13 @@ static int pirq_via586_set(struct pci_dev *router, struct pci_dev *dev, int pirq
*/
static int pirq_ite_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
{
- static unsigned char pirqmap[4] = { 1, 0, 2, 3 };
+ static const unsigned char pirqmap[4] = { 1, 0, 2, 3 };
return read_config_nybble(router,0x43, pirqmap[pirq-1]);
}
static int pirq_ite_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
{
- static unsigned char pirqmap[4] = { 1, 0, 2, 3 };
+ static const unsigned char pirqmap[4] = { 1, 0, 2, 3 };
write_config_nybble(router, 0x43, pirqmap[pirq-1], irq);
return 1;
}
@@ -505,7 +505,7 @@ static int pirq_bios_set(struct pci_dev *router, struct pci_dev *dev, int pirq,
static __init int intel_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
{
- static struct pci_device_id pirq_440gx[] = {
+ static struct pci_device_id __initdata pirq_440gx[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_2) },
{ },
@@ -880,6 +880,7 @@ static int pcibios_lookup_irq(struct pci_dev *dev, int assign)
((!(pci_probe & PCI_USE_PIRQ_MASK)) || ((1 << irq) & mask)) ) {
DBG(" -> got IRQ %d\n", irq);
msg = "Found";
+ eisa_set_level_irq(irq);
} else if (newirq && r->set && (dev->class >> 8) != PCI_CLASS_DISPLAY_VGA) {
DBG(" -> assigning IRQ %d", newirq);
if (r->set(pirq_router_dev, dev, pirq, newirq)) {
diff --git a/arch/i386/power/cpu.c b/arch/i386/power/cpu.c
index 79b2370c7fac..e6517915fe3e 100644
--- a/arch/i386/power/cpu.c
+++ b/arch/i386/power/cpu.c
@@ -10,6 +10,8 @@
#include <linux/config.h>
#include <linux/module.h>
#include <linux/suspend.h>
+#include <asm/mtrr.h>
+#include <asm/mce.h>
static struct saved_context saved_context;
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 0f3076a820c3..18318749884b 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -77,6 +77,7 @@ choice
config IA64_GENERIC
bool "generic"
select ACPI
+ select PCI
select NUMA
select ACPI_NUMA
help
@@ -273,7 +274,6 @@ config HOTPLUG_CPU
config SCHED_SMT
bool "SMT scheduler support"
depends on SMP
- default off
help
Improves the CPU scheduler's decision making when dealing with
Intel IA64 chips with MultiThreading at a cost of slightly increased
@@ -449,6 +449,8 @@ config PCI_DOMAINS
bool
default PCI
+source "drivers/pci/pcie/Kconfig"
+
source "drivers/pci/Kconfig"
source "drivers/pci/hotplug/Kconfig"
diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile
index 80ea7506fa1a..21033ed83307 100644
--- a/arch/ia64/Makefile
+++ b/arch/ia64/Makefile
@@ -71,6 +71,8 @@ all: compressed unwcheck
compressed: vmlinux.gz
+vmlinuz: vmlinux.gz
+
vmlinux.gz: vmlinux
$(Q)$(MAKE) $(build)=$(boot) $@
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
index bdccd0b1eb60..5825ddee58d6 100644
--- a/arch/ia64/hp/common/sba_iommu.c
+++ b/arch/ia64/hp/common/sba_iommu.c
@@ -1958,7 +1958,7 @@ sba_map_ioc_to_node(struct ioc *ioc, acpi_handle handle)
if (pxm < 0)
return;
- node = pxm_to_nid_map[pxm];
+ node = pxm_to_node(pxm);
if (node >= MAX_NUMNODES || !node_online(node))
return;
@@ -1999,7 +1999,7 @@ acpi_sba_ioc_add(struct acpi_device *device)
if (!iovp_shift)
iovp_shift = min(PAGE_SHIFT, 16);
}
- ACPI_MEM_FREE(dev_info);
+ kfree(dev_info);
/*
* default anything not caught above or specified on cmdline to 4k
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 58c93a30348c..ca16d9556bde 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -68,8 +68,6 @@ EXPORT_SYMBOL(pm_power_off);
unsigned char acpi_kbd_controller_present = 1;
unsigned char acpi_legacy_devices;
-static unsigned int __initdata acpi_madt_rev;
-
unsigned int acpi_cpei_override;
unsigned int acpi_cpei_phys_cpuid;
@@ -243,6 +241,8 @@ acpi_parse_iosapic(acpi_table_entry_header * header, const unsigned long end)
return iosapic_init(iosapic->address, iosapic->global_irq_base);
}
+static unsigned int __initdata acpi_madt_rev;
+
static int __init
acpi_parse_plat_int_src(acpi_table_entry_header * header,
const unsigned long end)
@@ -415,9 +415,6 @@ static int __initdata srat_num_cpus; /* number of cpus */
static u32 __devinitdata pxm_flag[PXM_FLAG_LEN];
#define pxm_bit_set(bit) (set_bit(bit,(void *)pxm_flag))
#define pxm_bit_test(bit) (test_bit(bit,(void *)pxm_flag))
-/* maps to convert between proximity domain and logical node ID */
-int __devinitdata pxm_to_nid_map[MAX_PXM_DOMAINS];
-int __initdata nid_to_pxm_map[MAX_NUMNODES];
static struct acpi_table_slit __initdata *slit_table;
static int get_processor_proximity_domain(struct acpi_table_processor_affinity *pa)
@@ -533,22 +530,17 @@ void __init acpi_numa_arch_fixup(void)
* MCD - This can probably be dropped now. No need for pxm ID to node ID
* mapping with sparse node numbering iff MAX_PXM_DOMAINS <= MAX_NUMNODES.
*/
- /* calculate total number of nodes in system from PXM bitmap */
- memset(pxm_to_nid_map, -1, sizeof(pxm_to_nid_map));
- memset(nid_to_pxm_map, -1, sizeof(nid_to_pxm_map));
nodes_clear(node_online_map);
for (i = 0; i < MAX_PXM_DOMAINS; i++) {
if (pxm_bit_test(i)) {
- int nid = num_online_nodes();
- pxm_to_nid_map[i] = nid;
- nid_to_pxm_map[nid] = i;
+ int nid = acpi_map_pxm_to_node(i);
node_set_online(nid);
}
}
/* set logical node id in memory chunk structure */
for (i = 0; i < num_node_memblks; i++)
- node_memblk[i].nid = pxm_to_nid_map[node_memblk[i].nid];
+ node_memblk[i].nid = pxm_to_node(node_memblk[i].nid);
/* assign memory bank numbers for each chunk on each node */
for_each_online_node(i) {
@@ -562,7 +554,7 @@ void __init acpi_numa_arch_fixup(void)
/* set logical node id in cpu structure */
for (i = 0; i < srat_num_cpus; i++)
- node_cpuid[i].nid = pxm_to_nid_map[node_cpuid[i].nid];
+ node_cpuid[i].nid = pxm_to_node(node_cpuid[i].nid);
printk(KERN_INFO "Number of logical nodes in system = %d\n",
num_online_nodes());
@@ -575,11 +567,11 @@ void __init acpi_numa_arch_fixup(void)
for (i = 0; i < slit_table->localities; i++) {
if (!pxm_bit_test(i))
continue;
- node_from = pxm_to_nid_map[i];
+ node_from = pxm_to_node(i);
for (j = 0; j < slit_table->localities; j++) {
if (!pxm_bit_test(j))
continue;
- node_to = pxm_to_nid_map[j];
+ node_to = pxm_to_node(j);
node_distance(node_from, node_to) =
slit_table->entry[i * slit_table->localities + j];
}
@@ -626,7 +618,7 @@ EXPORT_SYMBOL(acpi_unregister_gsi);
static int __init acpi_parse_fadt(unsigned long phys_addr, unsigned long size)
{
struct acpi_table_header *fadt_header;
- struct fadt_descriptor_rev2 *fadt;
+ struct fadt_descriptor *fadt;
if (!phys_addr || !size)
return -EINVAL;
@@ -635,7 +627,7 @@ static int __init acpi_parse_fadt(unsigned long phys_addr, unsigned long size)
if (fadt_header->revision != 3)
return -ENODEV; /* Only deal with ACPI 2.0 FADT */
- fadt = (struct fadt_descriptor_rev2 *)fadt_header;
+ fadt = (struct fadt_descriptor *)fadt_header;
if (!(fadt->iapc_boot_arch & BAF_8042_KEYBOARD_CONTROLLER))
acpi_kbd_controller_present = 0;
@@ -785,9 +777,9 @@ int acpi_map_cpu2node(acpi_handle handle, int cpu, long physid)
/*
* Assuming that the container driver would have set the proximity
- * domain and would have initialized pxm_to_nid_map[pxm_id] && pxm_flag
+ * domain and would have initialized pxm_to_node(pxm_id) && pxm_flag
*/
- node_cpuid[cpu].nid = (pxm_id < 0) ? 0 : pxm_to_nid_map[pxm_id];
+ node_cpuid[cpu].nid = (pxm_id < 0) ? 0 : pxm_to_node(pxm_id);
node_cpuid[cpu].phys_id = physid;
#endif
@@ -966,7 +958,7 @@ acpi_map_iosapic(acpi_handle handle, u32 depth, void *context, void **ret)
if (pxm < 0)
return AE_OK;
- node = pxm_to_nid_map[pxm];
+ node = pxm_to_node(pxm);
if (node >= MAX_NUMNODES || !node_online(node) ||
cpus_empty(node_to_cpumask(node)))
diff --git a/arch/ia64/kernel/asm-offsets.c b/arch/ia64/kernel/asm-offsets.c
index 77225659e968..16e7b6600ae6 100644
--- a/arch/ia64/kernel/asm-offsets.c
+++ b/arch/ia64/kernel/asm-offsets.c
@@ -217,16 +217,24 @@ void foo(void)
DEFINE(IA64_MCA_CPU_INIT_STACK_OFFSET,
offsetof (struct ia64_mca_cpu, init_stack));
BLANK();
- DEFINE(IA64_SAL_OS_STATE_COMMON_OFFSET,
- offsetof (struct ia64_sal_os_state, sal_ra));
DEFINE(IA64_SAL_OS_STATE_OS_GP_OFFSET,
offsetof (struct ia64_sal_os_state, os_gp));
- DEFINE(IA64_SAL_OS_STATE_PAL_MIN_STATE_OFFSET,
- offsetof (struct ia64_sal_os_state, pal_min_state));
DEFINE(IA64_SAL_OS_STATE_PROC_STATE_PARAM_OFFSET,
offsetof (struct ia64_sal_os_state, proc_state_param));
+ DEFINE(IA64_SAL_OS_STATE_SAL_RA_OFFSET,
+ offsetof (struct ia64_sal_os_state, sal_ra));
+ DEFINE(IA64_SAL_OS_STATE_SAL_GP_OFFSET,
+ offsetof (struct ia64_sal_os_state, sal_gp));
+ DEFINE(IA64_SAL_OS_STATE_PAL_MIN_STATE_OFFSET,
+ offsetof (struct ia64_sal_os_state, pal_min_state));
+ DEFINE(IA64_SAL_OS_STATE_OS_STATUS_OFFSET,
+ offsetof (struct ia64_sal_os_state, os_status));
+ DEFINE(IA64_SAL_OS_STATE_CONTEXT_OFFSET,
+ offsetof (struct ia64_sal_os_state, context));
DEFINE(IA64_SAL_OS_STATE_SIZE,
sizeof (struct ia64_sal_os_state));
+ BLANK();
+
DEFINE(IA64_PMSA_GR_OFFSET,
offsetof (struct pal_min_state_area_s, pmsa_gr));
DEFINE(IA64_PMSA_BANK1_GR_OFFSET,
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
index 12cfedce73b1..c33d0ba7e300 100644
--- a/arch/ia64/kernel/efi.c
+++ b/arch/ia64/kernel/efi.c
@@ -8,6 +8,8 @@
* Copyright (C) 1999-2003 Hewlett-Packard Co.
* David Mosberger-Tang <davidm@hpl.hp.com>
* Stephane Eranian <eranian@hpl.hp.com>
+ * (c) Copyright 2006 Hewlett-Packard Development Company, L.P.
+ * Bjorn Helgaas <bjorn.helgaas@hp.com>
*
* All EFI Runtime Services are not implemented yet as EFI only
* supports physical mode addressing on SoftSDV. This is to be fixed
@@ -622,28 +624,20 @@ efi_get_iobase (void)
return 0;
}
-static efi_memory_desc_t *
-efi_memory_descriptor (unsigned long phys_addr)
+static struct kern_memdesc *
+kern_memory_descriptor (unsigned long phys_addr)
{
- void *efi_map_start, *efi_map_end, *p;
- efi_memory_desc_t *md;
- u64 efi_desc_size;
-
- efi_map_start = __va(ia64_boot_param->efi_memmap);
- efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
- efi_desc_size = ia64_boot_param->efi_memdesc_size;
+ struct kern_memdesc *md;
- for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
- md = p;
-
- if (phys_addr - md->phys_addr < (md->num_pages << EFI_PAGE_SHIFT))
+ for (md = kern_memmap; md->start != ~0UL; md++) {
+ if (phys_addr - md->start < (md->num_pages << EFI_PAGE_SHIFT))
return md;
}
return 0;
}
-static int
-efi_memmap_has_mmio (void)
+static efi_memory_desc_t *
+efi_memory_descriptor (unsigned long phys_addr)
{
void *efi_map_start, *efi_map_end, *p;
efi_memory_desc_t *md;
@@ -656,8 +650,8 @@ efi_memmap_has_mmio (void)
for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
md = p;
- if (md->type == EFI_MEMORY_MAPPED_IO)
- return 1;
+ if (phys_addr - md->phys_addr < (md->num_pages << EFI_PAGE_SHIFT))
+ return md;
}
return 0;
}
@@ -683,71 +677,125 @@ efi_mem_attributes (unsigned long phys_addr)
}
EXPORT_SYMBOL(efi_mem_attributes);
-/*
- * Determines whether the memory at phys_addr supports the desired
- * attribute (WB, UC, etc). If this returns 1, the caller can safely
- * access size bytes at phys_addr with the specified attribute.
- */
-int
-efi_mem_attribute_range (unsigned long phys_addr, unsigned long size, u64 attr)
+u64
+efi_mem_attribute (unsigned long phys_addr, unsigned long size)
{
unsigned long end = phys_addr + size;
efi_memory_desc_t *md = efi_memory_descriptor(phys_addr);
+ u64 attr;
+
+ if (!md)
+ return 0;
+
+ /*
+ * EFI_MEMORY_RUNTIME is not a memory attribute; it just tells
+ * the kernel that firmware needs this region mapped.
+ */
+ attr = md->attribute & ~EFI_MEMORY_RUNTIME;
+ do {
+ unsigned long md_end = efi_md_end(md);
+
+ if (end <= md_end)
+ return attr;
+
+ md = efi_memory_descriptor(md_end);
+ if (!md || (md->attribute & ~EFI_MEMORY_RUNTIME) != attr)
+ return 0;
+ } while (md);
+ return 0;
+}
+
+u64
+kern_mem_attribute (unsigned long phys_addr, unsigned long size)
+{
+ unsigned long end = phys_addr + size;
+ struct kern_memdesc *md;
+ u64 attr;
/*
- * Some firmware doesn't report MMIO regions in the EFI memory
- * map. The Intel BigSur (a.k.a. HP i2000) has this problem.
- * On those platforms, we have to assume UC is valid everywhere.
+ * This is a hack for ioremap calls before we set up kern_memmap.
+ * Maybe we should do efi_memmap_init() earlier instead.
*/
- if (!md || (md->attribute & attr) != attr) {
- if (attr == EFI_MEMORY_UC && !efi_memmap_has_mmio())
- return 1;
+ if (!kern_memmap) {
+ attr = efi_mem_attribute(phys_addr, size);
+ if (attr & EFI_MEMORY_WB)
+ return EFI_MEMORY_WB;
return 0;
}
+ md = kern_memory_descriptor(phys_addr);
+ if (!md)
+ return 0;
+
+ attr = md->attribute;
do {
- unsigned long md_end = efi_md_end(md);
+ unsigned long md_end = kmd_end(md);
if (end <= md_end)
- return 1;
+ return attr;
- md = efi_memory_descriptor(md_end);
- if (!md || (md->attribute & attr) != attr)
+ md = kern_memory_descriptor(md_end);
+ if (!md || md->attribute != attr)
return 0;
} while (md);
return 0;
}
+EXPORT_SYMBOL(kern_mem_attribute);
-/*
- * For /dev/mem, we only allow read & write system calls to access
- * write-back memory, because read & write don't allow the user to
- * control access size.
- */
int
valid_phys_addr_range (unsigned long phys_addr, unsigned long size)
{
- return efi_mem_attribute_range(phys_addr, size, EFI_MEMORY_WB);
+ u64 attr;
+
+ /*
+ * /dev/mem reads and writes use copy_to_user(), which implicitly
+ * uses a granule-sized kernel identity mapping. It's really
+ * only safe to do this for regions in kern_memmap. For more
+ * details, see Documentation/ia64/aliasing.txt.
+ */
+ attr = kern_mem_attribute(phys_addr, size);
+ if (attr & EFI_MEMORY_WB || attr & EFI_MEMORY_UC)
+ return 1;
+ return 0;
}
-/*
- * We allow mmap of anything in the EFI memory map that supports
- * either write-back or uncacheable access. For uncacheable regions,
- * the supported access sizes are system-dependent, and the user is
- * responsible for using the correct size.
- *
- * Note that this doesn't currently allow access to hot-added memory,
- * because that doesn't appear in the boot-time EFI memory map.
- */
int
valid_mmap_phys_addr_range (unsigned long phys_addr, unsigned long size)
{
- if (efi_mem_attribute_range(phys_addr, size, EFI_MEMORY_WB))
- return 1;
+ /*
+ * MMIO regions are often missing from the EFI memory map.
+ * We must allow mmap of them for programs like X, so we
+ * currently can't do any useful validation.
+ */
+ return 1;
+}
- if (efi_mem_attribute_range(phys_addr, size, EFI_MEMORY_UC))
- return 1;
+pgprot_t
+phys_mem_access_prot(struct file *file, unsigned long pfn, unsigned long size,
+ pgprot_t vma_prot)
+{
+ unsigned long phys_addr = pfn << PAGE_SHIFT;
+ u64 attr;
- return 0;
+ /*
+ * For /dev/mem mmap, we use user mappings, but if the region is
+ * in kern_memmap (and hence may be covered by a kernel mapping),
+ * we must use the same attribute as the kernel mapping.
+ */
+ attr = kern_mem_attribute(phys_addr, size);
+ if (attr & EFI_MEMORY_WB)
+ return pgprot_cacheable(vma_prot);
+ else if (attr & EFI_MEMORY_UC)
+ return pgprot_noncached(vma_prot);
+
+ /*
+ * Some chipsets don't support UC access to memory. If
+ * WB is supported, we prefer that.
+ */
+ if (efi_mem_attribute(phys_addr, size) & EFI_MEMORY_WB)
+ return pgprot_cacheable(vma_prot);
+
+ return pgprot_noncached(vma_prot);
}
int __init
diff --git a/arch/ia64/kernel/efi_stub.S b/arch/ia64/kernel/efi_stub.S
index 5a7fe70212a9..a56e161d7515 100644
--- a/arch/ia64/kernel/efi_stub.S
+++ b/arch/ia64/kernel/efi_stub.S
@@ -61,7 +61,7 @@ GLOBAL_ENTRY(efi_call_phys)
or loc3=loc3,r17
mov b6=r2
;;
- andcm r16=loc3,r16 // get psr with IT, DT, and RT bits cleared
+ andcm r16=loc3,r16 // get psr with IT, DT, and RT bits cleared
br.call.sptk.many rp=ia64_switch_mode_phys
.ret0: mov out4=in5
mov out0=in1
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index bcb80ca5cf40..32c999f58d12 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -1584,7 +1584,7 @@ sys_call_table:
data8 sys_keyctl
data8 sys_ioprio_set
data8 sys_ioprio_get // 1275
- data8 sys_ni_syscall
+ data8 sys_move_pages
data8 sys_inotify_init
data8 sys_inotify_add_watch
data8 sys_inotify_rm_watch
diff --git a/arch/ia64/kernel/entry.h b/arch/ia64/kernel/entry.h
index 78eeb0793419..ebc3dfb88826 100644
--- a/arch/ia64/kernel/entry.h
+++ b/arch/ia64/kernel/entry.h
@@ -23,6 +23,7 @@
#define PT(f) (IA64_PT_REGS_##f##_OFFSET)
#define SW(f) (IA64_SWITCH_STACK_##f##_OFFSET)
+#define SOS(f) (IA64_SAL_OS_STATE_##f##_OFFSET)
#define PT_REGS_SAVES(off) \
.unwabi 3, 'i'; \
diff --git a/arch/ia64/kernel/mca_asm.S b/arch/ia64/kernel/mca_asm.S
index 6dff024cd62b..c1bd1feffab0 100644
--- a/arch/ia64/kernel/mca_asm.S
+++ b/arch/ia64/kernel/mca_asm.S
@@ -159,7 +159,7 @@ ia64_os_mca_spin:
GET_IA64_MCA_DATA(r2)
// Using MCA stack, struct ia64_sal_os_state, variable proc_state_param
;;
- add r3=IA64_MCA_CPU_MCA_STACK_OFFSET+MCA_SOS_OFFSET+IA64_SAL_OS_STATE_PROC_STATE_PARAM_OFFSET, r2
+ add r3=IA64_MCA_CPU_MCA_STACK_OFFSET+MCA_SOS_OFFSET+SOS(PROC_STATE_PARAM), r2
;;
ld8 r18=[r3] // Get processor state parameter on existing PALE_CHECK.
;;
@@ -479,9 +479,11 @@ ia64_state_save:
st8 [temp2]=r11,16 // rv_rc
mov r11=cr.iipa
;;
- st8 [temp1]=r18,16 // proc_state_param
- st8 [temp2]=r19,16 // monarch
+ st8 [temp1]=r18 // proc_state_param
+ st8 [temp2]=r19 // monarch
mov r6=IA64_KR(CURRENT)
+ add temp1=SOS(SAL_RA), regs
+ add temp2=SOS(SAL_GP), regs
;;
st8 [temp1]=r12,16 // sal_ra
st8 [temp2]=r10,16 // sal_gp
@@ -503,12 +505,14 @@ ia64_state_save:
st8 [temp2]=r11,16 // cr.iipa
mov r12=cr.iim
;;
- st8 [temp1]=r12,16 // cr.iim
+ st8 [temp1]=r12 // cr.iim
(p1) mov r12=IA64_MCA_COLD_BOOT
(p2) mov r12=IA64_INIT_WARM_BOOT
mov r6=cr.iha
+ add temp1=SOS(OS_STATUS), regs
;;
- st8 [temp2]=r6,16 // cr.iha
+ st8 [temp2]=r6 // cr.iha
+ add temp2=SOS(CONTEXT), regs
st8 [temp1]=r12 // os_status, default is cold boot
mov r6=IA64_MCA_SAME_CONTEXT
;;
@@ -820,8 +824,8 @@ ia64_state_restore:
// Restore the SAL to OS state. The previous code left regs at pt_regs.
add regs=MCA_SOS_OFFSET-MCA_PT_REGS_OFFSET, regs
;;
- add temp1=IA64_SAL_OS_STATE_COMMON_OFFSET, regs
- add temp2=IA64_SAL_OS_STATE_COMMON_OFFSET+8, regs
+ add temp1=SOS(SAL_RA), regs
+ add temp2=SOS(SAL_GP), regs
;;
ld8 r12=[temp1],16 // sal_ra
ld8 r9=[temp2],16 // sal_gp
@@ -842,8 +846,10 @@ ia64_state_restore:
;;
mov cr.itir=temp3
mov cr.iipa=temp4
- ld8 temp3=[temp1],16 // cr.iim
- ld8 temp4=[temp2],16 // cr.iha
+ ld8 temp3=[temp1] // cr.iim
+ ld8 temp4=[temp2] // cr.iha
+ add temp1=SOS(OS_STATUS), regs
+ add temp2=SOS(CONTEXT), regs
;;
mov cr.iim=temp3
mov cr.iha=temp4
@@ -916,7 +922,7 @@ ia64_state_restore:
ia64_new_stack:
add regs=MCA_PT_REGS_OFFSET, r3
- add temp2=MCA_SOS_OFFSET+IA64_SAL_OS_STATE_PAL_MIN_STATE_OFFSET, r3
+ add temp2=MCA_SOS_OFFSET+SOS(PAL_MIN_STATE), r3
mov b0=r2 // save return address
GET_IA64_MCA_DATA(temp1)
invala
@@ -1020,7 +1026,7 @@ ia64_old_stack:
ia64_set_kernel_registers:
add temp3=MCA_SP_OFFSET, r3
- add temp4=MCA_SOS_OFFSET+IA64_SAL_OS_STATE_OS_GP_OFFSET, r3
+ add temp4=MCA_SOS_OFFSET+SOS(OS_GP), r3
mov b0=r2 // save return address
GET_IA64_MCA_DATA(temp1)
;;
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 077f21216b65..6d7bc8ff7b3a 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -532,7 +532,6 @@ static ctl_table pfm_sysctl_root[] = {
static struct ctl_table_header *pfm_sysctl_header;
static int pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
-static int pfm_flush(struct file *filp);
#define pfm_get_cpu_var(v) __ia64_per_cpu_var(v)
#define pfm_get_cpu_data(a,b) per_cpu(a, b)
@@ -595,10 +594,11 @@ pfm_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
}
-static struct super_block *
-pfmfs_get_sb(struct file_system_type *fs_type, int flags, const char *dev_name, void *data)
+static int
+pfmfs_get_sb(struct file_system_type *fs_type, int flags, const char *dev_name, void *data,
+ struct vfsmount *mnt)
{
- return get_sb_pseudo(fs_type, "pfm:", NULL, PFMFS_MAGIC);
+ return get_sb_pseudo(fs_type, "pfm:", NULL, PFMFS_MAGIC, mnt);
}
static struct file_system_type pfm_fs_type = {
@@ -1773,7 +1773,7 @@ pfm_syswide_cleanup_other_cpu(pfm_context_t *ctx)
* When caller is self-monitoring, the context is unloaded.
*/
static int
-pfm_flush(struct file *filp)
+pfm_flush(struct file *filp, fl_owner_t id)
{
pfm_context_t *ctx;
struct task_struct *task;
diff --git a/arch/ia64/kernel/sal.c b/arch/ia64/kernel/sal.c
index 056f7a6eedc7..77fa65903d94 100644
--- a/arch/ia64/kernel/sal.c
+++ b/arch/ia64/kernel/sal.c
@@ -227,7 +227,7 @@ static int sal_cache_flush_drops_interrupts;
static void __init
check_sal_cache_flush (void)
{
- unsigned long flags, itv;
+ unsigned long flags;
int cpu;
u64 vector;
@@ -238,9 +238,6 @@ check_sal_cache_flush (void)
* Schedule a timer interrupt, wait until it's reported, and see if
* SAL_CACHE_FLUSH drops it.
*/
- itv = ia64_get_itv();
- BUG_ON((itv & (1 << 16)) == 0);
-
ia64_set_itv(IA64_TIMER_VECTOR);
ia64_set_itm(ia64_get_itc() + 1000);
@@ -260,7 +257,6 @@ check_sal_cache_flush (void)
ia64_eoi();
}
- ia64_set_itv(itv);
local_irq_restore(flags);
put_cpu();
}
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index e4dfda1eb7dd..6dba2d63f24d 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -260,6 +260,7 @@ reserve_memory (void)
n++;
num_rsvd_regions = n;
+ BUG_ON(IA64_MAX_RSVD_REGIONS + 1 < n);
sort_regions(rsvd_region, num_rsvd_regions);
}
diff --git a/arch/ia64/kernel/uncached.c b/arch/ia64/kernel/uncached.c
index fcd2bad0286f..5f03b9e524dd 100644
--- a/arch/ia64/kernel/uncached.c
+++ b/arch/ia64/kernel/uncached.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2001-2005 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2001-2006 Silicon Graphics, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License
@@ -29,15 +29,8 @@
#include <asm/tlbflush.h>
#include <asm/sn/arch.h>
-#define DEBUG 0
-#if DEBUG
-#define dprintk printk
-#else
-#define dprintk(x...) do { } while (0)
-#endif
-
-void __init efi_memmap_walk_uc (efi_freemem_callback_t callback);
+extern void __init efi_memmap_walk_uc(efi_freemem_callback_t, void *);
#define MAX_UNCACHED_GRANULES 5
static int allocated_granules;
@@ -60,6 +53,7 @@ static void uncached_ipi_visibility(void *data)
static void uncached_ipi_mc_drain(void *data)
{
int status;
+
status = ia64_pal_mc_drain();
if (status)
printk(KERN_WARNING "ia64_pal_mc_drain() failed with %i on "
@@ -67,30 +61,35 @@ static void uncached_ipi_mc_drain(void *data)
}
-static unsigned long
-uncached_get_new_chunk(struct gen_pool *poolp)
+/*
+ * Add a new chunk of uncached memory pages to the specified pool.
+ *
+ * @pool: pool to add new chunk of uncached memory to
+ * @nid: node id of node to allocate memory from, or -1
+ *
+ * This is accomplished by first allocating a granule of cached memory pages
+ * and then converting them to uncached memory pages.
+ */
+static int uncached_add_chunk(struct gen_pool *pool, int nid)
{
struct page *page;
- void *tmp;
int status, i;
- unsigned long addr, node;
+ unsigned long c_addr, uc_addr;
if (allocated_granules >= MAX_UNCACHED_GRANULES)
- return 0;
+ return -1;
+
+ /* attempt to allocate a granule's worth of cached memory pages */
- node = poolp->private;
- page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO,
+ page = alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO,
IA64_GRANULE_SHIFT-PAGE_SHIFT);
+ if (!page)
+ return -1;
- dprintk(KERN_INFO "get_new_chunk page %p, addr %lx\n",
- page, (unsigned long)(page-vmem_map) << PAGE_SHIFT);
+ /* convert the memory pages from cached to uncached */
- /*
- * Do magic if no mem on local node! XXX
- */
- if (!page)
- return 0;
- tmp = page_address(page);
+ c_addr = (unsigned long)page_address(page);
+ uc_addr = c_addr - PAGE_OFFSET + __IA64_UNCACHED_OFFSET;
/*
* There's a small race here where it's possible for someone to
@@ -100,76 +99,90 @@ uncached_get_new_chunk(struct gen_pool *poolp)
for (i = 0; i < (IA64_GRANULE_SIZE / PAGE_SIZE); i++)
SetPageUncached(&page[i]);
- flush_tlb_kernel_range(tmp, tmp + IA64_GRANULE_SIZE);
+ flush_tlb_kernel_range(uc_addr, uc_adddr + IA64_GRANULE_SIZE);
status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL);
-
- dprintk(KERN_INFO "pal_prefetch_visibility() returns %i on cpu %i\n",
- status, raw_smp_processor_id());
-
if (!status) {
status = smp_call_function(uncached_ipi_visibility, NULL, 0, 1);
if (status)
- printk(KERN_WARNING "smp_call_function failed for "
- "uncached_ipi_visibility! (%i)\n", status);
+ goto failed;
}
+ preempt_disable();
+
if (ia64_platform_is("sn2"))
- sn_flush_all_caches((unsigned long)tmp, IA64_GRANULE_SIZE);
+ sn_flush_all_caches(uc_addr, IA64_GRANULE_SIZE);
else
- flush_icache_range((unsigned long)tmp,
- (unsigned long)tmp+IA64_GRANULE_SIZE);
+ flush_icache_range(uc_addr, uc_addr + IA64_GRANULE_SIZE);
+
+ /* flush the just introduced uncached translation from the TLB */
+ local_flush_tlb_all();
+
+ preempt_enable();
ia64_pal_mc_drain();
status = smp_call_function(uncached_ipi_mc_drain, NULL, 0, 1);
if (status)
- printk(KERN_WARNING "smp_call_function failed for "
- "uncached_ipi_mc_drain! (%i)\n", status);
+ goto failed;
- addr = (unsigned long)tmp - PAGE_OFFSET + __IA64_UNCACHED_OFFSET;
+ /*
+ * The chunk of memory pages has been converted to uncached so now we
+ * can add it to the pool.
+ */
+ status = gen_pool_add(pool, uc_addr, IA64_GRANULE_SIZE, nid);
+ if (status)
+ goto failed;
allocated_granules++;
- return addr;
+ return 0;
+
+ /* failed to convert or add the chunk so give it back to the kernel */
+failed:
+ for (i = 0; i < (IA64_GRANULE_SIZE / PAGE_SIZE); i++)
+ ClearPageUncached(&page[i]);
+
+ free_pages(c_addr, IA64_GRANULE_SHIFT-PAGE_SHIFT);
+ return -1;
}
/*
* uncached_alloc_page
*
+ * @starting_nid: node id of node to start with, or -1
+ *
* Allocate 1 uncached page. Allocates on the requested node. If no
* uncached pages are available on the requested node, roundrobin starting
- * with higher nodes.
+ * with the next higher node.
*/
-unsigned long
-uncached_alloc_page(int nid)
+unsigned long uncached_alloc_page(int starting_nid)
{
- unsigned long maddr;
+ unsigned long uc_addr;
+ struct gen_pool *pool;
+ int nid;
- maddr = gen_pool_alloc(uncached_pool[nid], PAGE_SIZE);
+ if (unlikely(starting_nid >= MAX_NUMNODES))
+ return 0;
- dprintk(KERN_DEBUG "uncached_alloc_page returns %lx on node %i\n",
- maddr, nid);
+ if (starting_nid < 0)
+ starting_nid = numa_node_id();
+ nid = starting_nid;
- /*
- * If no memory is availble on our local node, try the
- * remaining nodes in the system.
- */
- if (!maddr) {
- int i;
-
- for (i = MAX_NUMNODES - 1; i >= 0; i--) {
- if (i == nid || !node_online(i))
- continue;
- maddr = gen_pool_alloc(uncached_pool[i], PAGE_SIZE);
- dprintk(KERN_DEBUG "uncached_alloc_page alternate search "
- "returns %lx on node %i\n", maddr, i);
- if (maddr) {
- break;
- }
- }
- }
+ do {
+ if (!node_online(nid))
+ continue;
+ pool = uncached_pool[nid];
+ if (pool == NULL)
+ continue;
+ do {
+ uc_addr = gen_pool_alloc(pool, PAGE_SIZE);
+ if (uc_addr != 0)
+ return uc_addr;
+ } while (uncached_add_chunk(pool, nid) == 0);
+
+ } while ((nid = (nid + 1) % MAX_NUMNODES) != starting_nid);
- return maddr;
+ return 0;
}
EXPORT_SYMBOL(uncached_alloc_page);
@@ -177,21 +190,22 @@ EXPORT_SYMBOL(uncached_alloc_page);
/*
* uncached_free_page
*
+ * @uc_addr: uncached address of page to free
+ *
* Free a single uncached page.
*/
-void
-uncached_free_page(unsigned long maddr)
+void uncached_free_page(unsigned long uc_addr)
{
- int node;
-
- node = paddr_to_nid(maddr - __IA64_UNCACHED_OFFSET);
+ int nid = paddr_to_nid(uc_addr - __IA64_UNCACHED_OFFSET);
+ struct gen_pool *pool = uncached_pool[nid];
- dprintk(KERN_DEBUG "uncached_free_page(%lx) on node %i\n", maddr, node);
+ if (unlikely(pool == NULL))
+ return;
- if ((maddr & (0XFUL << 60)) != __IA64_UNCACHED_OFFSET)
- panic("uncached_free_page invalid address %lx\n", maddr);
+ if ((uc_addr & (0XFUL << 60)) != __IA64_UNCACHED_OFFSET)
+ panic("uncached_free_page invalid address %lx\n", uc_addr);
- gen_pool_free(uncached_pool[node], maddr, PAGE_SIZE);
+ gen_pool_free(pool, uc_addr, PAGE_SIZE);
}
EXPORT_SYMBOL(uncached_free_page);
@@ -199,43 +213,39 @@ EXPORT_SYMBOL(uncached_free_page);
/*
* uncached_build_memmap,
*
+ * @uc_start: uncached starting address of a chunk of uncached memory
+ * @uc_end: uncached ending address of a chunk of uncached memory
+ * @arg: ignored, (NULL argument passed in on call to efi_memmap_walk_uc())
+ *
* Called at boot time to build a map of pages that can be used for
* memory special operations.
*/
-static int __init
-uncached_build_memmap(unsigned long start, unsigned long end, void *arg)
+static int __init uncached_build_memmap(unsigned long uc_start,
+ unsigned long uc_end, void *arg)
{
- long length = end - start;
- int node;
-
- dprintk(KERN_ERR "uncached_build_memmap(%lx %lx)\n", start, end);
+ int nid = paddr_to_nid(uc_start - __IA64_UNCACHED_OFFSET);
+ struct gen_pool *pool = uncached_pool[nid];
+ size_t size = uc_end - uc_start;
touch_softlockup_watchdog();
- memset((char *)start, 0, length);
- node = paddr_to_nid(start - __IA64_UNCACHED_OFFSET);
-
- for (; start < end ; start += PAGE_SIZE) {
- dprintk(KERN_INFO "sticking %lx into the pool!\n", start);
- gen_pool_free(uncached_pool[node], start, PAGE_SIZE);
+ if (pool != NULL) {
+ memset((char *)uc_start, 0, size);
+ (void) gen_pool_add(pool, uc_start, size, nid);
}
-
return 0;
}
-static int __init uncached_init(void) {
- int i;
+static int __init uncached_init(void)
+{
+ int nid;
- for (i = 0; i < MAX_NUMNODES; i++) {
- if (!node_online(i))
- continue;
- uncached_pool[i] = gen_pool_create(0, IA64_GRANULE_SHIFT,
- &uncached_get_new_chunk, i);
+ for_each_online_node(nid) {
+ uncached_pool[nid] = gen_pool_create(PAGE_SHIFT, nid);
}
- efi_memmap_walk_uc(uncached_build_memmap);
-
+ efi_memmap_walk_uc(uncached_build_memmap, NULL);
return 0;
}
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index cafa8776a53d..11f08001f8c2 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -671,9 +671,11 @@ int add_memory(u64 start, u64 size)
return ret;
}
+EXPORT_SYMBOL_GPL(add_memory);
int remove_memory(u64 start, u64 size)
{
return -EINVAL;
}
+EXPORT_SYMBOL_GPL(remove_memory);
#endif
diff --git a/arch/ia64/mm/ioremap.c b/arch/ia64/mm/ioremap.c
index 643ccc6960ce..07bd02b6c372 100644
--- a/arch/ia64/mm/ioremap.c
+++ b/arch/ia64/mm/ioremap.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/efi.h>
#include <asm/io.h>
+#include <asm/meminit.h>
static inline void __iomem *
__ioremap (unsigned long offset, unsigned long size)
@@ -21,16 +22,29 @@ __ioremap (unsigned long offset, unsigned long size)
void __iomem *
ioremap (unsigned long offset, unsigned long size)
{
- if (efi_mem_attribute_range(offset, size, EFI_MEMORY_WB))
- return phys_to_virt(offset);
+ u64 attr;
+ unsigned long gran_base, gran_size;
- if (efi_mem_attribute_range(offset, size, EFI_MEMORY_UC))
+ /*
+ * For things in kern_memmap, we must use the same attribute
+ * as the rest of the kernel. For more details, see
+ * Documentation/ia64/aliasing.txt.
+ */
+ attr = kern_mem_attribute(offset, size);
+ if (attr & EFI_MEMORY_WB)
+ return phys_to_virt(offset);
+ else if (attr & EFI_MEMORY_UC)
return __ioremap(offset, size);
/*
- * Someday this should check ACPI resources so we
- * can do the right thing for hot-plugged regions.
+ * Some chipsets don't support UC access to memory. If
+ * WB is supported for the whole granule, we prefer that.
*/
+ gran_base = GRANULEROUNDDOWN(offset);
+ gran_size = GRANULEROUNDUP(offset + size) - gran_base;
+ if (efi_mem_attribute(gran_base, gran_size) & EFI_MEMORY_WB)
+ return phys_to_virt(offset);
+
return __ioremap(offset, size);
}
EXPORT_SYMBOL(ioremap);
@@ -38,6 +52,9 @@ EXPORT_SYMBOL(ioremap);
void __iomem *
ioremap_nocache (unsigned long offset, unsigned long size)
{
+ if (kern_mem_attribute(offset, size) & EFI_MEMORY_WB)
+ return 0;
+
return __ioremap(offset, size);
}
EXPORT_SYMBOL(ioremap_nocache);
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c
index ab829a22f8a4..61dd8608da4f 100644
--- a/arch/ia64/pci/pci.c
+++ b/arch/ia64/pci/pci.c
@@ -352,7 +352,7 @@ pci_acpi_scan_root(struct acpi_device *device, int domain, int bus)
pxm = acpi_get_pxm(controller->acpi_handle);
#ifdef CONFIG_NUMA
if (pxm >= 0)
- controller->node = pxm_to_nid_map[pxm];
+ controller->node = pxm_to_node(pxm);
#endif
acpi_walk_resources(device->handle, METHOD_NAME__CRS, count_window,
@@ -645,18 +645,31 @@ char *ia64_pci_get_legacy_mem(struct pci_bus *bus)
int
pci_mmap_legacy_page_range(struct pci_bus *bus, struct vm_area_struct *vma)
{
+ unsigned long size = vma->vm_end - vma->vm_start;
+ pgprot_t prot;
char *addr;
+ /*
+ * Avoid attribute aliasing. See Documentation/ia64/aliasing.txt
+ * for more details.
+ */
+ if (!valid_mmap_phys_addr_range(vma->vm_pgoff << PAGE_SHIFT, size))
+ return -EINVAL;
+ prot = phys_mem_access_prot(NULL, vma->vm_pgoff, size,
+ vma->vm_page_prot);
+ if (pgprot_val(prot) != pgprot_val(pgprot_noncached(vma->vm_page_prot)))
+ return -EINVAL;
+
addr = pci_get_legacy_mem(bus);
if (IS_ERR(addr))
return PTR_ERR(addr);
vma->vm_pgoff += (unsigned long)addr >> PAGE_SHIFT;
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ vma->vm_page_prot = prot;
vma->vm_flags |= (VM_SHM | VM_RESERVED | VM_IO);
if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
- vma->vm_end - vma->vm_start, vma->vm_page_prot))
+ size, vma->vm_page_prot))
return -EAGAIN;
return 0;
diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c
index 30988dfbddff..93577abae36d 100644
--- a/arch/ia64/sn/kernel/setup.c
+++ b/arch/ia64/sn/kernel/setup.c
@@ -139,7 +139,7 @@ static int __init pxm_to_nasid(int pxm)
int i;
int nid;
- nid = pxm_to_nid_map[pxm];
+ nid = pxm_to_node(pxm);
for (i = 0; i < num_node_memblks; i++) {
if (node_memblk[i].nid == nid) {
return NASID_GET(node_memblk[i].start_paddr);
@@ -704,7 +704,7 @@ void __init build_cnode_tables(void)
* cnode == node for all C & M bricks.
*/
for_each_online_node(node) {
- nasid = pxm_to_nasid(nid_to_pxm_map[node]);
+ nasid = pxm_to_nasid(node_to_pxm(node));
sn_cnodeid_to_nasid[node] = nasid;
physical_node_map[nasid] = node;
}
diff --git a/arch/ia64/sn/kernel/sn2/cache.c b/arch/ia64/sn/kernel/sn2/cache.c
index bc3cfa17cd0f..2862cb33026d 100644
--- a/arch/ia64/sn/kernel/sn2/cache.c
+++ b/arch/ia64/sn/kernel/sn2/cache.c
@@ -3,11 +3,12 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2001-2003 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2001-2003, 2006 Silicon Graphics, Inc. All rights reserved.
*
*/
#include <linux/module.h>
#include <asm/pgalloc.h>
+#include <asm/sn/arch.h>
/**
* sn_flush_all_caches - flush a range of address from all caches (incl. L4)
@@ -17,18 +18,24 @@
* Flush a range of addresses from all caches including L4.
* All addresses fully or partially contained within
* @flush_addr to @flush_addr + @bytes are flushed
- * from the all caches.
+ * from all caches.
*/
void
sn_flush_all_caches(long flush_addr, long bytes)
{
- flush_icache_range(flush_addr, flush_addr+bytes);
+ unsigned long addr = flush_addr;
+
+ /* SHub1 requires a cached address */
+ if (is_shub1() && (addr & RGN_BITS) == RGN_BASE(RGN_UNCACHED))
+ addr = (addr - RGN_BASE(RGN_UNCACHED)) + RGN_BASE(RGN_KERNEL);
+
+ flush_icache_range(addr, addr + bytes);
/*
* The last call may have returned before the caches
* were actually flushed, so we call it again to make
* sure.
*/
- flush_icache_range(flush_addr, flush_addr+bytes);
+ flush_icache_range(addr, addr + bytes);
mb();
}
EXPORT_SYMBOL(sn_flush_all_caches);
diff --git a/arch/ia64/sn/kernel/sn2/sn_hwperf.c b/arch/ia64/sn/kernel/sn2/sn_hwperf.c
index 739c948dc504..9a8a29339d2d 100644
--- a/arch/ia64/sn/kernel/sn2/sn_hwperf.c
+++ b/arch/ia64/sn/kernel/sn2/sn_hwperf.c
@@ -51,6 +51,8 @@ static nasid_t sn_hwperf_master_nasid = INVALID_NASID;
static int sn_hwperf_init(void);
static DECLARE_MUTEX(sn_hwperf_init_mutex);
+#define cnode_possible(n) ((n) < num_cnodes)
+
static int sn_hwperf_enum_objects(int *nobj, struct sn_hwperf_object_info **ret)
{
int e;
@@ -127,14 +129,14 @@ static int sn_hwperf_geoid_to_cnode(char *location)
}
}
- return node_possible(cnode) ? cnode : -1;
+ return cnode_possible(cnode) ? cnode : -1;
}
static int sn_hwperf_obj_to_cnode(struct sn_hwperf_object_info * obj)
{
if (!SN_HWPERF_IS_NODE(obj) && !SN_HWPERF_IS_IONODE(obj))
BUG();
- if (!obj->sn_hwp_this_part)
+ if (SN_HWPERF_FOREIGN(obj))
return -1;
return sn_hwperf_geoid_to_cnode(obj->location);
}
@@ -199,12 +201,12 @@ static void print_pci_topology(struct seq_file *s)
static inline int sn_hwperf_has_cpus(cnodeid_t node)
{
- return node_online(node) && nr_cpus_node(node);
+ return node < MAX_NUMNODES && node_online(node) && nr_cpus_node(node);
}
static inline int sn_hwperf_has_mem(cnodeid_t node)
{
- return node_online(node) && NODE_DATA(node)->node_present_pages;
+ return node < MAX_NUMNODES && node_online(node) && NODE_DATA(node)->node_present_pages;
}
static struct sn_hwperf_object_info *
@@ -237,7 +239,7 @@ static int sn_hwperf_get_nearest_node_objdata(struct sn_hwperf_object_info *objb
int found_mem = 0;
int found_cpu = 0;
- if (!node_possible(node))
+ if (!cnode_possible(node))
return -EINVAL;
if (sn_hwperf_has_cpus(node)) {
@@ -442,7 +444,7 @@ static int sn_topology_show(struct seq_file *s, void *d)
seq_printf(s, "%s %d %s %s asic %s", slabname, ordinal, obj->location,
obj->sn_hwp_this_part ? "local" : "shared", obj->name);
- if (!SN_HWPERF_IS_NODE(obj) && !SN_HWPERF_IS_IONODE(obj))
+ if (ordinal < 0 || (!SN_HWPERF_IS_NODE(obj) && !SN_HWPERF_IS_IONODE(obj)))
seq_putc(s, '\n');
else {
cnodeid_t near_mem = -1;
@@ -468,22 +470,24 @@ static int sn_topology_show(struct seq_file *s, void *d)
/*
* CPUs on this node, if any
*/
- cpumask = node_to_cpumask(ordinal);
- for_each_online_cpu(i) {
- if (cpu_isset(i, cpumask)) {
- slice = 'a' + cpuid_to_slice(i);
- c = cpu_data(i);
- seq_printf(s, "cpu %d %s%c local"
- " freq %luMHz, arch ia64",
- i, obj->location, slice,
- c->proc_freq / 1000000);
- for_each_online_cpu(j) {
- seq_printf(s, j ? ":%d" : ", dist %d",
- node_distance(
- cpu_to_node(i),
- cpu_to_node(j)));
+ if (!SN_HWPERF_IS_IONODE(obj)) {
+ cpumask = node_to_cpumask(ordinal);
+ for_each_online_cpu(i) {
+ if (cpu_isset(i, cpumask)) {
+ slice = 'a' + cpuid_to_slice(i);
+ c = cpu_data(i);
+ seq_printf(s, "cpu %d %s%c local"
+ " freq %luMHz, arch ia64",
+ i, obj->location, slice,
+ c->proc_freq / 1000000);
+ for_each_online_cpu(j) {
+ seq_printf(s, j ? ":%d" : ", dist %d",
+ node_distance(
+ cpu_to_node(i),
+ cpu_to_node(j)));
+ }
+ seq_putc(s, '\n');
}
- seq_putc(s, '\n');
}
}
}
@@ -523,7 +527,7 @@ static int sn_topology_show(struct seq_file *s, void *d)
if (obj->sn_hwp_this_part && p->sn_hwp_this_part)
/* both ends local to this partition */
seq_puts(s, " local");
- else if (!obj->sn_hwp_this_part && !p->sn_hwp_this_part)
+ else if (SN_HWPERF_FOREIGN(p))
/* both ends of the link in foreign partiton */
seq_puts(s, " foreign");
else
@@ -776,7 +780,7 @@ sn_hwperf_ioctl(struct inode *in, struct file *fp, u32 op, u64 arg)
case SN_HWPERF_GET_NODE_NASID:
if (a.sz != sizeof(u64) ||
- (node = a.arg) < 0 || !node_possible(node)) {
+ (node = a.arg) < 0 || !cnode_possible(node)) {
r = -EINVAL;
goto error;
}
diff --git a/arch/ia64/sn/pci/tioce_provider.c b/arch/ia64/sn/pci/tioce_provider.c
index 4cac7bdc7c7f..2d7948567ebc 100644
--- a/arch/ia64/sn/pci/tioce_provider.c
+++ b/arch/ia64/sn/pci/tioce_provider.c
@@ -3,7 +3,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2003-2005 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (C) 2003-2006 Silicon Graphics, Inc. All Rights Reserved.
*/
#include <linux/types.h>
@@ -1023,7 +1023,7 @@ tioce_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont
tioce_mmr_seti(tioce_kern, &tioce_mmr->ce_adm_int_status_alias, ~0ULL);
tioce_mmr_seti(tioce_kern, &tioce_mmr->ce_adm_error_summary_alias,
~0ULL);
- tioce_mmr_seti(tioce_kern, &tioce_mmr->ce_dre_comp_err_addr, ~0ULL);
+ tioce_mmr_seti(tioce_kern, &tioce_mmr->ce_dre_comp_err_addr, 0ULL);
if (request_irq(SGI_PCIASIC_ERROR,
tioce_error_intr_handler,
diff --git a/arch/m68k/kernel/entry.S b/arch/m68k/kernel/entry.S
index 320fde05dc63..522079f8c2ba 100644
--- a/arch/m68k/kernel/entry.S
+++ b/arch/m68k/kernel/entry.S
@@ -226,7 +226,7 @@ ENTRY(nmi_handler)
inthandler:
SAVE_ALL_INT
GET_CURRENT(%d0)
- addqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+2)
+ addqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1)
| put exception # in d0
bfextu %sp@(PT_VECTOR){#4,#10},%d0
@@ -245,7 +245,7 @@ inthandler:
3: addql #8,%sp | pop parameters off stack
ret_from_interrupt:
- subqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+2)
+ subqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1)
jeq 1f
2:
RESTORE_ALL
diff --git a/arch/m68k/kernel/ints.c b/arch/m68k/kernel/ints.c
index 514d323ad536..4b85514792e7 100644
--- a/arch/m68k/kernel/ints.c
+++ b/arch/m68k/kernel/ints.c
@@ -95,6 +95,12 @@ void __init init_IRQ(void)
{
int i;
+ /* assembly irq entry code relies on this... */
+ if (HARDIRQ_MASK != 0x00ff0000) {
+ extern void hardirq_mask_is_broken(void);
+ hardirq_mask_is_broken();
+ }
+
for (i = 0; i < SYS_IRQS; i++) {
if (mach_default_handler)
irq_list[i].handler = (*mach_default_handler)[i];
diff --git a/arch/m68k/kernel/signal.c b/arch/m68k/kernel/signal.c
index 866917bfa028..f9af893cd289 100644
--- a/arch/m68k/kernel/signal.c
+++ b/arch/m68k/kernel/signal.c
@@ -763,7 +763,7 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size)
/* This is the X/Open sanctioned signal stack switching. */
if (ka->sa.sa_flags & SA_ONSTACK) {
- if (!on_sig_stack(usp))
+ if (!sas_ss_flags(usp))
usp = current->sas_ss_sp + current->sas_ss_size;
}
return (void __user *)((usp - frame_size) & -8UL);
diff --git a/arch/m68k/kernel/traps.c b/arch/m68k/kernel/traps.c
index cdf58fbb3e73..837a88709902 100644
--- a/arch/m68k/kernel/traps.c
+++ b/arch/m68k/kernel/traps.c
@@ -114,7 +114,7 @@ void __init base_trap_init(void)
if(MACH_IS_SUN3X) {
extern e_vector *sun3x_prom_vbr;
- __asm__ volatile ("movec %%vbr, %0" : "=r" ((void*)sun3x_prom_vbr));
+ __asm__ volatile ("movec %%vbr, %0" : "=r" (sun3x_prom_vbr));
}
/* setup the exception vector table */
@@ -992,6 +992,7 @@ void show_registers(struct pt_regs *regs)
void show_stack(struct task_struct *task, unsigned long *stack)
{
+ unsigned long *p;
unsigned long *endstack;
int i;
@@ -1004,12 +1005,13 @@ void show_stack(struct task_struct *task, unsigned long *stack)
endstack = (unsigned long *)(((unsigned long)stack + THREAD_SIZE - 1) & -THREAD_SIZE);
printk("Stack from %08lx:", (unsigned long)stack);
+ p = stack;
for (i = 0; i < kstack_depth_to_print; i++) {
- if (stack + 1 > endstack)
+ if (p + 1 > endstack)
break;
if (i % 8 == 0)
printk("\n ");
- printk(" %08lx", *stack++);
+ printk(" %08lx", *p++);
}
printk("\n");
show_trace(stack);
diff --git a/arch/m68k/lib/Makefile b/arch/m68k/lib/Makefile
index ebe51a513817..6bbf19f96007 100644
--- a/arch/m68k/lib/Makefile
+++ b/arch/m68k/lib/Makefile
@@ -4,5 +4,5 @@
EXTRA_AFLAGS := -traditional
-lib-y := ashldi3.o ashrdi3.o lshrdi3.o muldi3.o \
- checksum.o string.o semaphore.o
+lib-y := ashldi3.o ashrdi3.o lshrdi3.o muldi3.o \
+ checksum.o string.o semaphore.o uaccess.o
diff --git a/arch/m68k/lib/uaccess.c b/arch/m68k/lib/uaccess.c
new file mode 100644
index 000000000000..1bc188c0d983
--- /dev/null
+++ b/arch/m68k/lib/uaccess.c
@@ -0,0 +1,222 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/module.h>
+#include <asm/uaccess.h>
+
+unsigned long __generic_copy_from_user(void *to, const void __user *from,
+ unsigned long n)
+{
+ unsigned long tmp, res;
+
+ asm volatile ("\n"
+ " tst.l %0\n"
+ " jeq 2f\n"
+ "1: moves.l (%1)+,%3\n"
+ " move.l %3,(%2)+\n"
+ " subq.l #1,%0\n"
+ " jne 1b\n"
+ "2: btst #1,%5\n"
+ " jeq 4f\n"
+ "3: moves.w (%1)+,%3\n"
+ " move.w %3,(%2)+\n"
+ "4: btst #0,%5\n"
+ " jeq 6f\n"
+ "5: moves.b (%1)+,%3\n"
+ " move.b %3,(%2)+\n"
+ "6:\n"
+ " .section .fixup,\"ax\"\n"
+ " .even\n"
+ "10: move.l %0,%3\n"
+ "7: clr.l (%2)+\n"
+ " subq.l #1,%3\n"
+ " jne 7b\n"
+ " lsl.l #2,%0\n"
+ " btst #1,%5\n"
+ " jeq 8f\n"
+ "30: clr.w (%2)+\n"
+ " addq.l #2,%0\n"
+ "8: btst #0,%5\n"
+ " jeq 6b\n"
+ "50: clr.b (%2)+\n"
+ " addq.l #1,%0\n"
+ " jra 6b\n"
+ " .previous\n"
+ "\n"
+ " .section __ex_table,\"a\"\n"
+ " .align 4\n"
+ " .long 1b,10b\n"
+ " .long 3b,30b\n"
+ " .long 5b,50b\n"
+ " .previous"
+ : "=d" (res), "+a" (from), "+a" (to), "=&r" (tmp)
+ : "0" (n / 4), "d" (n & 3));
+
+ return res;
+}
+EXPORT_SYMBOL(__generic_copy_from_user);
+
+unsigned long __generic_copy_to_user(void __user *to, const void *from,
+ unsigned long n)
+{
+ unsigned long tmp, res;
+
+ asm volatile ("\n"
+ " tst.l %0\n"
+ " jeq 4f\n"
+ "1: move.l (%1)+,%3\n"
+ "2: moves.l %3,(%2)+\n"
+ "3: subq.l #1,%0\n"
+ " jne 1b\n"
+ "4: btst #1,%5\n"
+ " jeq 6f\n"
+ " move.w (%1)+,%3\n"
+ "5: moves.w %3,(%2)+\n"
+ "6: btst #0,%5\n"
+ " jeq 8f\n"
+ " move.b (%1)+,%3\n"
+ "7: moves.b %3,(%2)+\n"
+ "8:\n"
+ " .section .fixup,\"ax\"\n"
+ " .even\n"
+ "20: lsl.l #2,%0\n"
+ "50: add.l %5,%0\n"
+ " jra 7b\n"
+ " .previous\n"
+ "\n"
+ " .section __ex_table,\"a\"\n"
+ " .align 4\n"
+ " .long 2b,20b\n"
+ " .long 3b,20b\n"
+ " .long 5b,50b\n"
+ " .long 6b,50b\n"
+ " .long 7b,50b\n"
+ " .long 8b,50b\n"
+ " .previous"
+ : "=d" (res), "+a" (from), "+a" (to), "=&r" (tmp)
+ : "0" (n / 4), "d" (n & 3));
+
+ return res;
+}
+EXPORT_SYMBOL(__generic_copy_to_user);
+
+/*
+ * Copy a null terminated string from userspace.
+ */
+long strncpy_from_user(char *dst, const char __user *src, long count)
+{
+ long res;
+ char c;
+
+ if (count <= 0)
+ return count;
+
+ asm volatile ("\n"
+ "1: moves.b (%2)+,%4\n"
+ " move.b %4,(%1)+\n"
+ " jeq 2f\n"
+ " subq.l #1,%3\n"
+ " jne 1b\n"
+ "2: sub.l %3,%0\n"
+ "3:\n"
+ " .section .fixup,\"ax\"\n"
+ " .even\n"
+ "10: move.l %5,%0\n"
+ " jra 3b\n"
+ " .previous\n"
+ "\n"
+ " .section __ex_table,\"a\"\n"
+ " .align 4\n"
+ " .long 1b,10b\n"
+ " .previous"
+ : "=d" (res), "+a" (dst), "+a" (src), "+r" (count), "=&d" (c)
+ : "i" (-EFAULT), "0" (count));
+
+ return res;
+}
+EXPORT_SYMBOL(strncpy_from_user);
+
+/*
+ * Return the size of a string (including the ending 0)
+ *
+ * Return 0 on exception, a value greater than N if too long
+ */
+long strnlen_user(const char __user *src, long n)
+{
+ char c;
+ long res;
+
+ asm volatile ("\n"
+ "1: subq.l #1,%1\n"
+ " jmi 3f\n"
+ "2: moves.b (%0)+,%2\n"
+ " tst.b %2\n"
+ " jne 1b\n"
+ " jra 4f\n"
+ "\n"
+ "3: addq.l #1,%0\n"
+ "4: sub.l %4,%0\n"
+ "5:\n"
+ " .section .fixup,\"ax\"\n"
+ " .even\n"
+ "20: sub.l %0,%0\n"
+ " jra 5b\n"
+ " .previous\n"
+ "\n"
+ " .section __ex_table,\"a\"\n"
+ " .align 4\n"
+ " .long 2b,20b\n"
+ " .previous\n"
+ : "=&a" (res), "+d" (n), "=&d" (c)
+ : "0" (src), "r" (src));
+
+ return res;
+}
+EXPORT_SYMBOL(strnlen_user);
+
+/*
+ * Zero Userspace
+ */
+
+unsigned long clear_user(void __user *to, unsigned long n)
+{
+ unsigned long res;
+
+ asm volatile ("\n"
+ " tst.l %0\n"
+ " jeq 3f\n"
+ "1: moves.l %2,(%1)+\n"
+ "2: subq.l #1,%0\n"
+ " jne 1b\n"
+ "3: btst #1,%4\n"
+ " jeq 5f\n"
+ "4: moves.w %2,(%1)+\n"
+ "5: btst #0,%4\n"
+ " jeq 7f\n"
+ "6: moves.b %2,(%1)\n"
+ "7:\n"
+ " .section .fixup,\"ax\"\n"
+ " .even\n"
+ "10: lsl.l #2,%0\n"
+ "40: add.l %4,%0\n"
+ " jra 7b\n"
+ " .previous\n"
+ "\n"
+ " .section __ex_table,\"a\"\n"
+ " .align 4\n"
+ " .long 1b,10b\n"
+ " .long 2b,10b\n"
+ " .long 4b,40b\n"
+ " .long 5b,40b\n"
+ " .long 6b,40b\n"
+ " .long 7b,40b\n"
+ " .previous"
+ : "=d" (res), "+a" (to)
+ : "r" (0), "0" (n / 4), "d" (n & 3));
+
+ return res;
+}
+EXPORT_SYMBOL(clear_user);
diff --git a/arch/m68k/mac/config.c b/arch/m68k/mac/config.c
index 14f8d3f4e195..19dce75711b1 100644
--- a/arch/m68k/mac/config.c
+++ b/arch/m68k/mac/config.c
@@ -89,24 +89,11 @@ extern void mac_debugging_long(int, long);
static void mac_get_model(char *str);
-void mac_bang(int irq, void *vector, struct pt_regs *p)
-{
- printk(KERN_INFO "Resetting ...\n");
- mac_reset();
-}
-
static void mac_sched_init(irqreturn_t (*vector)(int, void *, struct pt_regs *))
{
via_init_clock(vector);
}
-#if 0
-void mac_waitbut (void)
-{
- ;
-}
-#endif
-
extern irqreturn_t mac_default_handler(int, void *, struct pt_regs *);
irqreturn_t (*mac_handlers[8])(int, void *, struct pt_regs *)=
diff --git a/arch/m68k/mac/macints.c b/arch/m68k/mac/macints.c
index 1809601ad903..7a1600bd195d 100644
--- a/arch/m68k/mac/macints.c
+++ b/arch/m68k/mac/macints.c
@@ -216,7 +216,6 @@ static void scc_irq_disable(int);
* console_loglevel determines NMI handler function
*/
-extern irqreturn_t mac_bang(int, void *, struct pt_regs *);
irqreturn_t mac_nmi_handler(int, void *, struct pt_regs *);
irqreturn_t mac_debug_handler(int, void *, struct pt_regs *);
diff --git a/arch/m68k/mac/via.c b/arch/m68k/mac/via.c
index cd528bf7b43f..a6e3814c8666 100644
--- a/arch/m68k/mac/via.c
+++ b/arch/m68k/mac/via.c
@@ -25,7 +25,6 @@
#include <linux/init.h>
#include <linux/ide.h>
-#include <asm/traps.h>
#include <asm/bootinfo.h>
#include <asm/macintosh.h>
#include <asm/macints.h>
@@ -71,7 +70,6 @@ void via_irq_enable(int irq);
void via_irq_disable(int irq);
void via_irq_clear(int irq);
-extern irqreturn_t mac_bang(int, void *, struct pt_regs *);
extern irqreturn_t mac_scc_dispatch(int, void *, struct pt_regs *);
extern int oss_present;
@@ -212,11 +210,6 @@ void __init via_init(void)
break;
}
#else
- /* The alernate IRQ mapping seems to just not work. Anyone with a */
- /* supported machine is welcome to take a stab at fixing it. It */
- /* _should_ work on the following Quadras: 610,650,700,800,900,950 */
- /* - 1999-06-12 (jmt) */
-
via_alt_mapping = 0;
#endif
@@ -270,12 +263,6 @@ void __init via_register_interrupts(void)
cpu_request_irq(IRQ_AUTO_1, via1_irq,
IRQ_FLG_LOCK|IRQ_FLG_FAST, "via1",
(void *) via1);
-#if 0 /* interferes with serial on some machines */
- if (!psc_present) {
- cpu_request_irq(IRQ_AUTO_6, mac_bang, IRQ_FLG_LOCK,
- "Off Switch", mac_bang);
- }
-#endif
}
cpu_request_irq(IRQ_AUTO_2, via2_irq, IRQ_FLG_LOCK|IRQ_FLG_FAST,
"via2", (void *) via2);
@@ -471,8 +458,8 @@ irqreturn_t via2_irq(int irq, void *dev_id, struct pt_regs *regs)
for (i = 0, irq_bit = 1 ; i < 7 ; i++, irq_bit <<= 1)
if (events & irq_bit) {
via2[gIER] = irq_bit;
- mac_do_irq_list(VIA2_SOURCE_BASE + i, regs);
via2[gIFR] = irq_bit | rbv_clear;
+ mac_do_irq_list(VIA2_SOURCE_BASE + i, regs);
via2[gIER] = irq_bit | 0x80;
}
return IRQ_HANDLED;
@@ -529,6 +516,7 @@ void via_irq_enable(int irq) {
}
via2[gIER] = irq_bit | 0x80;
} else if (irq_src == 7) {
+ nubus_active |= irq_bit;
if (rbv_present) {
/* enable the slot interrupt. SIER works like IER. */
via2[rSIER] = IER_SET_BIT(irq_idx);
@@ -550,7 +538,6 @@ void via_irq_enable(int irq) {
}
}
}
- nubus_active |= irq_bit;
}
}
diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c
index afb57eeafdcb..bdb11103694b 100644
--- a/arch/m68k/mm/motorola.c
+++ b/arch/m68k/mm/motorola.c
@@ -203,7 +203,7 @@ void __init paging_init(void)
{
int chunk;
unsigned long mem_avail = 0;
- unsigned long zones_size[3] = { 0, };
+ unsigned long zones_size[MAX_NR_ZONES] = { 0, };
#ifdef DEBUG
{
@@ -257,12 +257,12 @@ void __init paging_init(void)
#ifdef DEBUG
printk ("before free_area_init\n");
#endif
- zones_size[0] = (mach_max_dma_address < (unsigned long)high_memory ?
- (mach_max_dma_address+1) : (unsigned long)high_memory);
- zones_size[1] = (unsigned long)high_memory - zones_size[0];
+ zones_size[ZONE_DMA] = (mach_max_dma_address < (unsigned long)high_memory ?
+ (mach_max_dma_address+1) : (unsigned long)high_memory);
+ zones_size[ZONE_NORMAL] = (unsigned long)high_memory - zones_size[0];
- zones_size[0] = (zones_size[0] - PAGE_OFFSET) >> PAGE_SHIFT;
- zones_size[1] >>= PAGE_SHIFT;
+ zones_size[ZONE_DMA] = (zones_size[ZONE_DMA] - PAGE_OFFSET) >> PAGE_SHIFT;
+ zones_size[ZONE_NORMAL] >>= PAGE_SHIFT;
free_area_init(zones_size);
}
diff --git a/arch/m68k/mm/sun3mmu.c b/arch/m68k/mm/sun3mmu.c
index a47be196a47c..ac6640ade0b1 100644
--- a/arch/m68k/mm/sun3mmu.c
+++ b/arch/m68k/mm/sun3mmu.c
@@ -46,7 +46,7 @@ void __init paging_init(void)
unsigned long address;
unsigned long next_pgtable;
unsigned long bootmem_end;
- unsigned long zones_size[3] = {0, 0, 0};
+ unsigned long zones_size[MAX_NR_ZONES] = { 0, };
unsigned long size;
@@ -92,8 +92,7 @@ void __init paging_init(void)
current->mm = NULL;
/* memory sizing is a hack stolen from motorola.c.. hope it works for us */
- zones_size[0] = ((unsigned long)high_memory - PAGE_OFFSET) >> PAGE_SHIFT;
- zones_size[1] = 0;
+ zones_size[ZONE_DMA] = ((unsigned long)high_memory - PAGE_OFFSET) >> PAGE_SHIFT;
free_area_init(zones_size);
diff --git a/arch/m68knommu/kernel/signal.c b/arch/m68knommu/kernel/signal.c
index e1b3aa39e270..8e2c5a88efa7 100644
--- a/arch/m68knommu/kernel/signal.c
+++ b/arch/m68knommu/kernel/signal.c
@@ -553,7 +553,7 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size)
/* This is the X/Open sanctioned signal stack switching. */
if (ka->sa.sa_flags & SA_ONSTACK) {
- if (!on_sig_stack(usp))
+ if (!sas_ss_flags(usp))
usp = current->sas_ss_sp + current->sas_ss_size;
}
return (void *)((usp - frame_size) & -8UL);
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index a0ac0e5f61ad..2d2fdf77e308 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -497,7 +497,7 @@ einval: li v0, -EINVAL
sys sys_sched_get_priority_min 1
sys sys_sched_rr_get_interval 2 /* 4165 */
sys sys_nanosleep, 2
- sys sys_mremap, 4
+ sys sys_mremap, 5
sys sys_accept 3
sys sys_bind 3
sys sys_connect 3 /* 4170 */
diff --git a/arch/mips/kernel/sysirix.c b/arch/mips/kernel/sysirix.c
index 5407b784cd01..19e1ef43eb4b 100644
--- a/arch/mips/kernel/sysirix.c
+++ b/arch/mips/kernel/sysirix.c
@@ -694,7 +694,7 @@ asmlinkage int irix_statfs(const char __user *path,
if (error)
goto out;
- error = vfs_statfs(nd.dentry->d_inode->i_sb, &kbuf);
+ error = vfs_statfs(nd.dentry, &kbuf);
if (error)
goto dput_and_out;
@@ -732,7 +732,7 @@ asmlinkage int irix_fstatfs(unsigned int fd, struct irix_statfs __user *buf)
goto out;
}
- error = vfs_statfs(file->f_dentry->d_inode->i_sb, &kbuf);
+ error = vfs_statfs(file->f_dentry, &kbuf);
if (error)
goto out_f;
@@ -1360,7 +1360,7 @@ asmlinkage int irix_statvfs(char __user *fname, struct irix_statvfs __user *buf)
error = user_path_walk(fname, &nd);
if (error)
goto out;
- error = vfs_statfs(nd.dentry->d_inode->i_sb, &kbuf);
+ error = vfs_statfs(nd.dentry, &kbuf);
if (error)
goto dput_and_out;
@@ -1406,7 +1406,7 @@ asmlinkage int irix_fstatvfs(int fd, struct irix_statvfs __user *buf)
error = -EBADF;
goto out;
}
- error = vfs_statfs(file->f_dentry->d_inode->i_sb, &kbuf);
+ error = vfs_statfs(file->f_dentry, &kbuf);
if (error)
goto out_f;
@@ -1611,7 +1611,7 @@ asmlinkage int irix_statvfs64(char __user *fname, struct irix_statvfs64 __user *
error = user_path_walk(fname, &nd);
if (error)
goto out;
- error = vfs_statfs(nd.dentry->d_inode->i_sb, &kbuf);
+ error = vfs_statfs(nd.dentry, &kbuf);
if (error)
goto dput_and_out;
@@ -1658,7 +1658,7 @@ asmlinkage int irix_fstatvfs64(int fd, struct irix_statvfs __user *buf)
error = -EBADF;
goto out;
}
- error = vfs_statfs(file->f_dentry->d_inode->i_sb, &kbuf);
+ error = vfs_statfs(file->f_dentry, &kbuf);
if (error)
goto out_f;
diff --git a/arch/parisc/hpux/sys_hpux.c b/arch/parisc/hpux/sys_hpux.c
index 05273ccced0e..cb69727027ae 100644
--- a/arch/parisc/hpux/sys_hpux.c
+++ b/arch/parisc/hpux/sys_hpux.c
@@ -145,7 +145,7 @@ static int hpux_ustat(dev_t dev, struct hpux_ustat __user *ubuf)
s = user_get_super(dev);
if (s == NULL)
goto out;
- err = vfs_statfs(s, &sbuf);
+ err = vfs_statfs(s->s_root, &sbuf);
drop_super(s);
if (err)
goto out;
@@ -186,12 +186,12 @@ struct hpux_statfs {
int16_t f_pad;
};
-static int vfs_statfs_hpux(struct super_block *sb, struct hpux_statfs *buf)
+static int vfs_statfs_hpux(struct dentry *dentry, struct hpux_statfs *buf)
{
struct kstatfs st;
int retval;
- retval = vfs_statfs(sb, &st);
+ retval = vfs_statfs(dentry, &st);
if (retval)
return retval;
@@ -219,7 +219,7 @@ asmlinkage long hpux_statfs(const char __user *path,
error = user_path_walk(path, &nd);
if (!error) {
struct hpux_statfs tmp;
- error = vfs_statfs_hpux(nd.dentry->d_inode->i_sb, &tmp);
+ error = vfs_statfs_hpux(nd.dentry, &tmp);
if (!error && copy_to_user(buf, &tmp, sizeof(tmp)))
error = -EFAULT;
path_release(&nd);
@@ -237,7 +237,7 @@ asmlinkage long hpux_fstatfs(unsigned int fd, struct hpux_statfs __user * buf)
file = fget(fd);
if (!file)
goto out;
- error = vfs_statfs_hpux(file->f_dentry->d_inode->i_sb, &tmp);
+ error = vfs_statfs_hpux(file->f_dentry, &tmp);
if (!error && copy_to_user(buf, &tmp, sizeof(tmp)))
error = -EFAULT;
fput(file);
diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c
index 05767e83cf2d..cc38edfd90c5 100644
--- a/arch/parisc/kernel/signal.c
+++ b/arch/parisc/kernel/signal.c
@@ -248,7 +248,7 @@ get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size)
DBG(1,"get_sigframe: ka = %#lx, sp = %#lx, frame_size = %#lx\n",
(unsigned long)ka, sp, frame_size);
- if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! on_sig_stack(sp))
+ if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! sas_ss_flags(sp))
sp = current->sas_ss_sp; /* Stacks grow up! */
DBG(1,"get_sigframe: Returning sp = %#lx\n", (unsigned long)sp);
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 6729c98b66f9..e922a88b2bad 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -45,6 +45,10 @@ config GENERIC_CALIBRATE_DELAY
bool
default y
+config GENERIC_FIND_NEXT_BIT
+ bool
+ default y
+
config PPC
bool
default y
@@ -137,6 +141,15 @@ config PPC_85xx
select FSL_SOC
select 85xx
+config PPC_86xx
+ bool "Freescale 86xx"
+ select 6xx
+ select FSL_SOC
+ select PPC_FPU
+ select ALTIVEC
+ help
+ The Freescale E600 SoCs have 74xx cores.
+
config 40x
bool "AMCC 40x"
@@ -336,7 +349,7 @@ endchoice
config PPC_PSERIES
depends on PPC_MULTIPLATFORM && PPC64
- bool " IBM pSeries & new (POWER5-based) iSeries"
+ bool "IBM pSeries & new (POWER5-based) iSeries"
select PPC_I8259
select PPC_RTAS
select RTAS_ERROR_LOGGING
@@ -344,7 +357,7 @@ config PPC_PSERIES
default y
config PPC_CHRP
- bool " Common Hardware Reference Platform (CHRP) based machines"
+ bool "Common Hardware Reference Platform (CHRP) based machines"
depends on PPC_MULTIPLATFORM && PPC32
select PPC_I8259
select PPC_INDIRECT_PCI
@@ -354,7 +367,7 @@ config PPC_CHRP
default y
config PPC_PMAC
- bool " Apple PowerMac based machines"
+ bool "Apple PowerMac based machines"
depends on PPC_MULTIPLATFORM
select PPC_INDIRECT_PCI if PPC32
select PPC_MPC106 if PPC32
@@ -370,7 +383,7 @@ config PPC_PMAC64
default y
config PPC_PREP
- bool " PowerPC Reference Platform (PReP) based machines"
+ bool "PowerPC Reference Platform (PReP) based machines"
depends on PPC_MULTIPLATFORM && PPC32 && BROKEN
select PPC_I8259
select PPC_INDIRECT_PCI
@@ -379,7 +392,7 @@ config PPC_PREP
config PPC_MAPLE
depends on PPC_MULTIPLATFORM && PPC64
- bool " Maple 970FX Evaluation Board"
+ bool "Maple 970FX Evaluation Board"
select U3_DART
select MPIC_BROKEN_U3
select GENERIC_TBSYNC
@@ -391,8 +404,18 @@ config PPC_MAPLE
For more informations, refer to <http://www.970eval.com>
config PPC_CELL
- bool " Cell Broadband Processor Architecture"
+ bool
+ default n
+
+config PPC_CELL_NATIVE
+ bool
+ select PPC_CELL
+ default n
+
+config PPC_IBM_CELL_BLADE
+ bool " IBM Cell Blade"
depends on PPC_MULTIPLATFORM && PPC64
+ select PPC_CELL_NATIVE
select PPC_RTAS
select MMIO_NVRAM
select PPC_UDBG_16550
@@ -439,11 +462,6 @@ config MPIC_BROKEN_U3
depends on PPC_MAPLE
default y
-config CELL_IIC
- depends on PPC_CELL
- bool
- default y
-
config IBMVIO
depends on PPC_PSERIES || PPC_ISERIES
bool
@@ -545,6 +563,7 @@ source arch/powerpc/platforms/embedded6xx/Kconfig
source arch/powerpc/platforms/4xx/Kconfig
source arch/powerpc/platforms/83xx/Kconfig
source arch/powerpc/platforms/85xx/Kconfig
+source arch/powerpc/platforms/86xx/Kconfig
source arch/powerpc/platforms/8xx/Kconfig
source arch/powerpc/platforms/cell/Kconfig
@@ -699,7 +718,6 @@ config PPC_64K_PAGES
config SCHED_SMT
bool "SMT (Hyperthreading) scheduler support"
depends on PPC64 && SMP
- default off
help
SMT scheduler support improves the CPU scheduler's decision making
when dealing with POWER5 cpus at a cost of slightly increased
@@ -776,6 +794,7 @@ config GENERIC_ISA_DMA
config PPC_I8259
bool
+ default y if MPC8641_HPCN
default n
config PPC_INDIRECT_PCI
@@ -798,8 +817,8 @@ config MCA
bool
config PCI
- bool "PCI support" if 40x || CPM2 || PPC_83xx || PPC_85xx || PPC_MPC52xx || (EMBEDDED && PPC_ISERIES)
- default y if !40x && !CPM2 && !8xx && !APUS && !PPC_83xx && !PPC_85xx
+ bool "PCI support" if 40x || CPM2 || PPC_83xx || PPC_85xx || PPC_86xx || PPC_MPC52xx || (EMBEDDED && PPC_ISERIES)
+ default y if !40x && !CPM2 && !8xx && !APUS && !PPC_83xx && !PPC_85xx && !PPC_86xx
default PCI_PERMEDIA if !4xx && !CPM2 && !8xx && APUS
default PCI_QSPAN if !4xx && !CPM2 && 8xx
help
@@ -827,12 +846,12 @@ config PCI_8260
default y
config 8260_PCI9
- bool " Enable workaround for MPC826x erratum PCI 9"
+ bool "Enable workaround for MPC826x erratum PCI 9"
depends on PCI_8260 && !ADS8272
default y
choice
- prompt " IDMA channel for PCI 9 workaround"
+ prompt "IDMA channel for PCI 9 workaround"
depends on 8260_PCI9
config 8260_PCI9_IDMA1
@@ -849,6 +868,8 @@ config 8260_PCI9_IDMA4
endchoice
+source "drivers/pci/pcie/Kconfig"
+
source "drivers/pci/Kconfig"
source "drivers/pcmcia/Kconfig"
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index 8d48e9e7162a..c69006ae8246 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -110,13 +110,16 @@ config SERIAL_TEXT_DEBUG
depends on 4xx || LOPEC || MV64X60 || PPLUS || PRPMC800 || \
PPC_GEN550 || PPC_MPC52xx
+config PPC_EARLY_DEBUG
+ bool "Early debugging (dangerous)"
+
choice
- prompt "Early debugging (dangerous)"
- bool
- optional
+ prompt "Early debugging console"
+ depends on PPC_EARLY_DEBUG
help
- Enable early debugging. Careful, if you enable debugging for the
- wrong type of machine your kernel _will not boot_.
+ Use the selected console for early debugging. Careful, if you
+ enable debugging for the wrong type of machine your kernel
+ _will not boot_.
config PPC_EARLY_DEBUG_LPAR
bool "LPAR HV Console"
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index ed5b26aa8be3..01667d1d571d 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -108,7 +108,6 @@ ifeq ($(CONFIG_6xx),y)
CFLAGS += -mcpu=powerpc
endif
-cpu-as-$(CONFIG_PPC64BRIDGE) += -Wa,-mppc64bridge
cpu-as-$(CONFIG_4xx) += -Wa,-m405
cpu-as-$(CONFIG_6xx) += -Wa,-maltivec
cpu-as-$(CONFIG_POWER4) += -Wa,-maltivec
diff --git a/arch/powerpc/boot/main.c b/arch/powerpc/boot/main.c
index 816446f0e497..b66634c9ea34 100644
--- a/arch/powerpc/boot/main.c
+++ b/arch/powerpc/boot/main.c
@@ -33,6 +33,14 @@ extern char _vmlinux_end[];
extern char _initrd_start[];
extern char _initrd_end[];
+/* A buffer that may be edited by tools operating on a zImage binary so as to
+ * edit the command line passed to vmlinux (by setting /chosen/bootargs).
+ * The buffer is put in it's own section so that tools may locate it easier.
+ */
+static char builtin_cmdline[512]
+ __attribute__((section("__builtin_cmdline")));
+
+
struct addr_range {
unsigned long addr;
unsigned long size;
@@ -204,6 +212,23 @@ static int is_elf32(void *hdr)
return 1;
}
+void export_cmdline(void* chosen_handle)
+{
+ int len;
+ char cmdline[2] = { 0, 0 };
+
+ if (builtin_cmdline[0] == 0)
+ return;
+
+ len = getprop(chosen_handle, "bootargs", cmdline, sizeof(cmdline));
+ if (len > 0 && cmdline[0] != 0)
+ return;
+
+ setprop(chosen_handle, "bootargs", builtin_cmdline,
+ strlen(builtin_cmdline) + 1);
+}
+
+
void start(unsigned long a1, unsigned long a2, void *promptr, void *sp)
{
int len;
@@ -289,6 +314,8 @@ void start(unsigned long a1, unsigned long a2, void *promptr, void *sp)
memmove((void *)vmlinux.addr,(void *)vmlinuz.addr,vmlinuz.size);
}
+ export_cmdline(chosen_handle);
+
/* Skip over the ELF header */
#ifdef DEBUG
printf("... skipping 0x%lx bytes of ELF header\n\r",
diff --git a/arch/powerpc/boot/prom.h b/arch/powerpc/boot/prom.h
index 3e2ddd4a5a81..a57b184c564f 100644
--- a/arch/powerpc/boot/prom.h
+++ b/arch/powerpc/boot/prom.h
@@ -31,4 +31,11 @@ static inline int getprop(void *phandle, const char *name,
return call_prom("getprop", 4, 1, phandle, name, buf, buflen);
}
+
+static inline int setprop(void *phandle, const char *name,
+ void *buf, int buflen)
+{
+ return call_prom("setprop", 4, 1, phandle, name, buf, buflen);
+}
+
#endif /* _PPC_BOOT_PROM_H_ */
diff --git a/arch/powerpc/configs/cell_defconfig b/arch/powerpc/configs/cell_defconfig
index dbe421dc3c11..b8b8d4675dc0 100644
--- a/arch/powerpc/configs/cell_defconfig
+++ b/arch/powerpc/configs/cell_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.16
-# Thu Mar 23 20:48:09 2006
+# Linux kernel version: 2.6.17
+# Mon Jun 19 17:23:03 2006
#
CONFIG_PPC64=y
CONFIG_64BIT=y
@@ -11,6 +11,7 @@ CONFIG_GENERIC_HARDIRQS=y
CONFIG_RWSEM_XCHGADD_ALGORITHM=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_PPC=y
CONFIG_EARLY_PRINTK=y
CONFIG_COMPAT=y
@@ -55,7 +56,7 @@ CONFIG_SYSCTL=y
# CONFIG_AUDIT is not set
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
-# CONFIG_CPUSETS is not set
+CONFIG_CPUSETS=y
# CONFIG_RELAY is not set
CONFIG_INITRAMFS_SOURCE=""
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
@@ -116,13 +117,15 @@ CONFIG_PPC_MULTIPLATFORM=y
# CONFIG_PPC_PMAC is not set
# CONFIG_PPC_MAPLE is not set
CONFIG_PPC_CELL=y
+CONFIG_PPC_CELL_NATIVE=y
+CONFIG_PPC_IBM_CELL_BLADE=y
+CONFIG_PPC_SYSTEMSIM=y
# CONFIG_U3_DART is not set
CONFIG_PPC_RTAS=y
# CONFIG_RTAS_ERROR_LOGGING is not set
CONFIG_RTAS_PROC=y
CONFIG_RTAS_FLASH=y
CONFIG_MMIO_NVRAM=y
-CONFIG_CELL_IIC=y
# CONFIG_PPC_MPC106 is not set
# CONFIG_PPC_970_NAP is not set
# CONFIG_CPU_FREQ is not set
@@ -132,7 +135,9 @@ CONFIG_CELL_IIC=y
# Cell Broadband Engine options
#
CONFIG_SPU_FS=m
+CONFIG_SPU_BASE=y
CONFIG_SPUFS_MMAP=y
+CONFIG_CBE_RAS=y
#
# Kernel options
@@ -152,20 +157,24 @@ CONFIG_FORCE_MAX_ZONEORDER=13
CONFIG_KEXEC=y
# CONFIG_CRASH_DUMP is not set
CONFIG_IRQ_ALL_CPUS=y
-# CONFIG_NUMA is not set
+CONFIG_NUMA=y
+CONFIG_NODES_SHIFT=4
CONFIG_ARCH_SELECT_MEMORY_MODEL=y
-CONFIG_ARCH_FLATMEM_ENABLE=y
CONFIG_ARCH_SPARSEMEM_ENABLE=y
CONFIG_SELECT_MEMORY_MODEL=y
# CONFIG_FLATMEM_MANUAL is not set
# CONFIG_DISCONTIGMEM_MANUAL is not set
CONFIG_SPARSEMEM_MANUAL=y
CONFIG_SPARSEMEM=y
+CONFIG_NEED_MULTIPLE_NODES=y
CONFIG_HAVE_MEMORY_PRESENT=y
# CONFIG_SPARSEMEM_STATIC is not set
CONFIG_SPARSEMEM_EXTREME=y
-# CONFIG_MEMORY_HOTPLUG is not set
+CONFIG_MEMORY_HOTPLUG=y
CONFIG_SPLIT_PTLOCK_CPUS=4
+CONFIG_MIGRATION=y
+CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID=y
+CONFIG_ARCH_MEMORY_PROBE=y
# CONFIG_PPC_64K_PAGES is not set
CONFIG_SCHED_SMT=y
CONFIG_PROC_DEVICETREE=y
@@ -182,6 +191,7 @@ CONFIG_GENERIC_ISA_DMA=y
# CONFIG_PPC_INDIRECT_PCI is not set
CONFIG_PCI=y
CONFIG_PCI_DOMAINS=y
+CONFIG_PCIEPORTBUS=y
# CONFIG_PCI_DEBUG is not set
#
@@ -476,7 +486,7 @@ CONFIG_DM_MULTIPATH=m
#
CONFIG_NETDEVICES=y
# CONFIG_DUMMY is not set
-# CONFIG_BONDING is not set
+CONFIG_BONDING=y
# CONFIG_EQUALIZER is not set
# CONFIG_TUN is not set
@@ -624,6 +634,7 @@ CONFIG_SERIAL_NONSTANDARD=y
# CONFIG_N_HDLC is not set
# CONFIG_SPECIALIX is not set
# CONFIG_SX is not set
+# CONFIG_RIO is not set
# CONFIG_STALDRV is not set
#
@@ -766,6 +777,7 @@ CONFIG_I2C_ALGOBIT=y
# Multimedia devices
#
# CONFIG_VIDEO_DEV is not set
+CONFIG_VIDEO_V4L2=y
#
# Digital Video Broadcasting Devices
@@ -1054,11 +1066,7 @@ CONFIG_DEBUGGER=y
# CONFIG_XMON is not set
CONFIG_IRQSTACKS=y
# CONFIG_BOOTX_TEXT is not set
-# CONFIG_PPC_EARLY_DEBUG_LPAR is not set
-# CONFIG_PPC_EARLY_DEBUG_G5 is not set
-# CONFIG_PPC_EARLY_DEBUG_RTAS is not set
-# CONFIG_PPC_EARLY_DEBUG_MAPLE is not set
-# CONFIG_PPC_EARLY_DEBUG_ISERIES is not set
+# CONFIG_PPC_EARLY_DEBUG is not set
#
# Security options
diff --git a/arch/powerpc/configs/mpc85xx_cds_defconfig b/arch/powerpc/configs/mpc85xx_cds_defconfig
new file mode 100644
index 000000000000..9bb022a523fe
--- /dev/null
+++ b/arch/powerpc/configs/mpc85xx_cds_defconfig
@@ -0,0 +1,846 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.16
+# Sun Apr 2 11:23:42 2006
+#
+# CONFIG_PPC64 is not set
+CONFIG_PPC32=y
+CONFIG_PPC_MERGE=y
+CONFIG_MMU=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_PPC=y
+CONFIG_EARLY_PRINTK=y
+CONFIG_GENERIC_NVRAM=y
+CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
+CONFIG_ARCH_MAY_HAVE_PC_FDC=y
+CONFIG_PPC_OF=y
+CONFIG_PPC_UDBG_16550=y
+# CONFIG_GENERIC_TBSYNC is not set
+# CONFIG_DEFAULT_UIMAGE is not set
+
+#
+# Processor support
+#
+# CONFIG_CLASSIC32 is not set
+# CONFIG_PPC_52xx is not set
+# CONFIG_PPC_82xx is not set
+# CONFIG_PPC_83xx is not set
+CONFIG_PPC_85xx=y
+# CONFIG_40x is not set
+# CONFIG_44x is not set
+# CONFIG_8xx is not set
+# CONFIG_E200 is not set
+CONFIG_85xx=y
+CONFIG_E500=y
+CONFIG_BOOKE=y
+CONFIG_FSL_BOOKE=y
+# CONFIG_PHYS_64BIT is not set
+CONFIG_SPE=y
+
+#
+# Code maturity level options
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+
+#
+# General setup
+#
+CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+# CONFIG_POSIX_MQUEUE is not set
+# CONFIG_BSD_PROCESS_ACCT is not set
+CONFIG_SYSCTL=y
+# CONFIG_AUDIT is not set
+# CONFIG_IKCONFIG is not set
+# CONFIG_RELAY is not set
+CONFIG_INITRAMFS_SOURCE=""
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_EMBEDDED=y
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SHMEM=y
+CONFIG_SLAB=y
+# CONFIG_TINY_SHMEM is not set
+CONFIG_BASE_SMALL=0
+# CONFIG_SLOB is not set
+
+#
+# Loadable module support
+#
+# CONFIG_MODULES is not set
+
+#
+# Block layer
+#
+# CONFIG_LBD is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_LSF is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+CONFIG_DEFAULT_AS=y
+# CONFIG_DEFAULT_DEADLINE is not set
+# CONFIG_DEFAULT_CFQ is not set
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="anticipatory"
+CONFIG_MPIC=y
+# CONFIG_WANT_EARLY_SERIAL is not set
+
+#
+# Platform support
+#
+# CONFIG_MPC8540_ADS is not set
+CONFIG_MPC85xx_CDS=y
+CONFIG_MPC8540=y
+CONFIG_PPC_INDIRECT_PCI_BE=y
+
+#
+# Kernel options
+#
+# CONFIG_HIGHMEM is not set
+# CONFIG_HZ_100 is not set
+CONFIG_HZ_250=y
+# CONFIG_HZ_1000 is not set
+CONFIG_HZ=250
+CONFIG_PREEMPT_NONE=y
+# CONFIG_PREEMPT_VOLUNTARY is not set
+# CONFIG_PREEMPT is not set
+CONFIG_BINFMT_ELF=y
+CONFIG_BINFMT_MISC=y
+CONFIG_MATH_EMULATION=y
+CONFIG_ARCH_FLATMEM_ENABLE=y
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+# CONFIG_SPARSEMEM_STATIC is not set
+CONFIG_SPLIT_PTLOCK_CPUS=4
+CONFIG_PROC_DEVICETREE=y
+# CONFIG_CMDLINE_BOOL is not set
+# CONFIG_PM is not set
+# CONFIG_SOFTWARE_SUSPEND is not set
+# CONFIG_SECCOMP is not set
+CONFIG_ISA_DMA_API=y
+
+#
+# Bus options
+#
+CONFIG_PPC_I8259=y
+CONFIG_PPC_INDIRECT_PCI=y
+CONFIG_FSL_SOC=y
+CONFIG_PCI=y
+CONFIG_PCI_DOMAINS=y
+# CONFIG_PCI_DEBUG is not set
+
+#
+# PCCARD (PCMCIA/CardBus) support
+#
+# CONFIG_PCCARD is not set
+
+#
+# PCI Hotplug Support
+#
+# CONFIG_HOTPLUG_PCI is not set
+
+#
+# Advanced setup
+#
+# CONFIG_ADVANCED_OPTIONS is not set
+
+#
+# Default settings for advanced configuration options are used
+#
+CONFIG_HIGHMEM_START=0xfe000000
+CONFIG_LOWMEM_SIZE=0x30000000
+CONFIG_KERNEL_START=0xc0000000
+CONFIG_TASK_SIZE=0x80000000
+CONFIG_BOOT_LOAD=0x00800000
+
+#
+# Networking
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+# CONFIG_NETDEBUG is not set
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+CONFIG_UNIX=y
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_IP_MROUTE is not set
+# CONFIG_ARPD is not set
+CONFIG_SYN_COOKIES=y
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+# CONFIG_INET_TUNNEL is not set
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_BIC=y
+# CONFIG_IPV6 is not set
+# CONFIG_INET6_XFRM_TUNNEL is not set
+# CONFIG_INET6_TUNNEL is not set
+# CONFIG_NETFILTER is not set
+
+#
+# DCCP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_DCCP is not set
+
+#
+# SCTP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_SCTP is not set
+
+#
+# TIPC Configuration (EXPERIMENTAL)
+#
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_NET_DIVERT is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+
+#
+# QoS and/or fair queueing
+#
+# CONFIG_NET_SCHED is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_IEEE80211 is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+# CONFIG_FW_LOADER is not set
+# CONFIG_DEBUG_DRIVER is not set
+
+#
+# Connector - unified userspace <-> kernelspace linker
+#
+# CONFIG_CONNECTOR is not set
+
+#
+# Memory Technology Devices (MTD)
+#
+# CONFIG_MTD is not set
+
+#
+# Parallel port support
+#
+# CONFIG_PARPORT is not set
+
+#
+# Plug and Play support
+#
+
+#
+# Block devices
+#
+# CONFIG_BLK_DEV_FD is not set
+# CONFIG_BLK_CPQ_DA is not set
+# CONFIG_BLK_CPQ_CISS_DA is not set
+# CONFIG_BLK_DEV_DAC960 is not set
+# CONFIG_BLK_DEV_UMEM is not set
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=y
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_SX8 is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=32768
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+
+#
+# ATA/ATAPI/MFM/RLL support
+#
+CONFIG_IDE=y
+CONFIG_BLK_DEV_IDE=y
+
+#
+# Please see Documentation/ide.txt for help/info on IDE drives
+#
+# CONFIG_BLK_DEV_IDE_SATA is not set
+# CONFIG_BLK_DEV_IDEDISK is not set
+# CONFIG_IDEDISK_MULTI_MODE is not set
+# CONFIG_BLK_DEV_IDECD is not set
+# CONFIG_BLK_DEV_IDETAPE is not set
+# CONFIG_BLK_DEV_IDEFLOPPY is not set
+# CONFIG_IDE_TASK_IOCTL is not set
+
+#
+# IDE chipset support/bugfixes
+#
+CONFIG_IDE_GENERIC=y
+CONFIG_BLK_DEV_IDEPCI=y
+CONFIG_IDEPCI_SHARE_IRQ=y
+# CONFIG_BLK_DEV_OFFBOARD is not set
+CONFIG_BLK_DEV_GENERIC=y
+# CONFIG_BLK_DEV_OPTI621 is not set
+# CONFIG_BLK_DEV_SL82C105 is not set
+CONFIG_BLK_DEV_IDEDMA_PCI=y
+# CONFIG_BLK_DEV_IDEDMA_FORCED is not set
+# CONFIG_IDEDMA_PCI_AUTO is not set
+# CONFIG_BLK_DEV_AEC62XX is not set
+# CONFIG_BLK_DEV_ALI15X3 is not set
+# CONFIG_BLK_DEV_AMD74XX is not set
+# CONFIG_BLK_DEV_CMD64X is not set
+# CONFIG_BLK_DEV_TRIFLEX is not set
+# CONFIG_BLK_DEV_CY82C693 is not set
+# CONFIG_BLK_DEV_CS5520 is not set
+# CONFIG_BLK_DEV_CS5530 is not set
+# CONFIG_BLK_DEV_HPT34X is not set
+# CONFIG_BLK_DEV_HPT366 is not set
+# CONFIG_BLK_DEV_SC1200 is not set
+# CONFIG_BLK_DEV_PIIX is not set
+# CONFIG_BLK_DEV_IT821X is not set
+# CONFIG_BLK_DEV_NS87415 is not set
+# CONFIG_BLK_DEV_PDC202XX_OLD is not set
+# CONFIG_BLK_DEV_PDC202XX_NEW is not set
+# CONFIG_BLK_DEV_SVWKS is not set
+# CONFIG_BLK_DEV_SIIMAGE is not set
+# CONFIG_BLK_DEV_SLC90E66 is not set
+# CONFIG_BLK_DEV_TRM290 is not set
+CONFIG_BLK_DEV_VIA82CXXX=y
+# CONFIG_IDE_ARM is not set
+CONFIG_BLK_DEV_IDEDMA=y
+# CONFIG_IDEDMA_IVB is not set
+# CONFIG_IDEDMA_AUTO is not set
+# CONFIG_BLK_DEV_HD is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+# CONFIG_SCSI is not set
+
+#
+# Multi-device support (RAID and LVM)
+#
+# CONFIG_MD is not set
+
+#
+# Fusion MPT device support
+#
+# CONFIG_FUSION is not set
+
+#
+# IEEE 1394 (FireWire) support
+#
+# CONFIG_IEEE1394 is not set
+
+#
+# I2O device support
+#
+# CONFIG_I2O is not set
+
+#
+# Macintosh device drivers
+#
+# CONFIG_WINDFARM is not set
+
+#
+# Network device support
+#
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+
+#
+# ARCnet devices
+#
+# CONFIG_ARCNET is not set
+
+#
+# PHY device support
+#
+CONFIG_PHYLIB=y
+
+#
+# MII PHY device drivers
+#
+# CONFIG_MARVELL_PHY is not set
+# CONFIG_DAVICOM_PHY is not set
+# CONFIG_QSEMI_PHY is not set
+# CONFIG_LXT_PHY is not set
+# CONFIG_CICADA_PHY is not set
+
+#
+# Ethernet (10 or 100Mbit)
+#
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=y
+# CONFIG_HAPPYMEAL is not set
+# CONFIG_SUNGEM is not set
+# CONFIG_CASSINI is not set
+# CONFIG_NET_VENDOR_3COM is not set
+
+#
+# Tulip family network device support
+#
+# CONFIG_NET_TULIP is not set
+# CONFIG_HP100 is not set
+# CONFIG_NET_PCI is not set
+
+#
+# Ethernet (1000 Mbit)
+#
+# CONFIG_ACENIC is not set
+# CONFIG_DL2K is not set
+CONFIG_E1000=y
+CONFIG_E1000_NAPI=y
+# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
+# CONFIG_NS83820 is not set
+# CONFIG_HAMACHI is not set
+# CONFIG_YELLOWFIN is not set
+# CONFIG_R8169 is not set
+# CONFIG_SIS190 is not set
+# CONFIG_SKGE is not set
+# CONFIG_SKY2 is not set
+# CONFIG_SK98LIN is not set
+# CONFIG_TIGON3 is not set
+# CONFIG_BNX2 is not set
+CONFIG_GIANFAR=y
+CONFIG_GFAR_NAPI=y
+
+#
+# Ethernet (10000 Mbit)
+#
+# CONFIG_CHELSIO_T1 is not set
+# CONFIG_IXGB is not set
+# CONFIG_S2IO is not set
+
+#
+# Token Ring devices
+#
+# CONFIG_TR is not set
+
+#
+# Wireless LAN (non-hamradio)
+#
+# CONFIG_NET_RADIO is not set
+
+#
+# Wan interfaces
+#
+# CONFIG_WAN is not set
+# CONFIG_FDDI is not set
+# CONFIG_HIPPI is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_SHAPER is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+
+#
+# ISDN subsystem
+#
+# CONFIG_ISDN is not set
+
+#
+# Telephony Support
+#
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+
+#
+# Userland interfaces
+#
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_TSDEV is not set
+# CONFIG_INPUT_EVDEV is not set
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Hardware I/O ports
+#
+# CONFIG_SERIO is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+# CONFIG_VT is not set
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_PCI=y
+CONFIG_SERIAL_8250_NR_UARTS=4
+CONFIG_SERIAL_8250_RUNTIME_UARTS=4
+# CONFIG_SERIAL_8250_EXTENDED is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_JSM is not set
+CONFIG_UNIX98_PTYS=y
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
+
+#
+# IPMI
+#
+# CONFIG_IPMI_HANDLER is not set
+
+#
+# Watchdog Cards
+#
+# CONFIG_WATCHDOG is not set
+# CONFIG_NVRAM is not set
+CONFIG_GEN_RTC=y
+# CONFIG_GEN_RTC_X is not set
+# CONFIG_DTLK is not set
+# CONFIG_R3964 is not set
+# CONFIG_APPLICOM is not set
+
+#
+# Ftape, the floppy tape device driver
+#
+# CONFIG_AGP is not set
+# CONFIG_DRM is not set
+# CONFIG_RAW_DRIVER is not set
+
+#
+# TPM devices
+#
+# CONFIG_TCG_TPM is not set
+# CONFIG_TELCLOCK is not set
+
+#
+# I2C support
+#
+# CONFIG_I2C is not set
+
+#
+# SPI support
+#
+# CONFIG_SPI is not set
+# CONFIG_SPI_MASTER is not set
+
+#
+# Dallas's 1-wire bus
+#
+# CONFIG_W1 is not set
+
+#
+# Hardware Monitoring support
+#
+CONFIG_HWMON=y
+# CONFIG_HWMON_VID is not set
+# CONFIG_SENSORS_F71805F is not set
+# CONFIG_HWMON_DEBUG_CHIP is not set
+
+#
+# Misc devices
+#
+
+#
+# Multimedia devices
+#
+# CONFIG_VIDEO_DEV is not set
+
+#
+# Digital Video Broadcasting Devices
+#
+# CONFIG_DVB is not set
+
+#
+# Graphics support
+#
+# CONFIG_FB is not set
+
+#
+# Sound
+#
+# CONFIG_SOUND is not set
+
+#
+# USB support
+#
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB_ARCH_HAS_OHCI=y
+CONFIG_USB_ARCH_HAS_EHCI=y
+# CONFIG_USB is not set
+
+#
+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+#
+
+#
+# USB Gadget Support
+#
+# CONFIG_USB_GADGET is not set
+
+#
+# MMC/SD Card support
+#
+# CONFIG_MMC is not set
+
+#
+# LED devices
+#
+# CONFIG_NEW_LEDS is not set
+
+#
+# InfiniBand support
+#
+# CONFIG_INFINIBAND is not set
+
+#
+# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
+#
+
+#
+# Real Time Clock
+#
+# CONFIG_RTC_CLASS is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT2_FS_XIP is not set
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_XATTR=y
+# CONFIG_EXT3_FS_POSIX_ACL is not set
+# CONFIG_EXT3_FS_SECURITY is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+CONFIG_FS_MBCACHE=y
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_OCFS2_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_ROMFS_FS is not set
+CONFIG_INOTIFY=y
+# CONFIG_QUOTA is not set
+CONFIG_DNOTIFY=y
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+# CONFIG_MSDOS_FS is not set
+# CONFIG_VFAT_FS is not set
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_RAMFS=y
+# CONFIG_CONFIGFS_FS is not set
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+
+#
+# Network File Systems
+#
+CONFIG_NFS_FS=y
+# CONFIG_NFS_V3 is not set
+# CONFIG_NFS_V4 is not set
+# CONFIG_NFS_DIRECTIO is not set
+# CONFIG_NFSD is not set
+CONFIG_ROOT_NFS=y
+CONFIG_LOCKD=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+# CONFIG_RPCSEC_GSS_KRB5 is not set
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+# CONFIG_9P_FS is not set
+
+#
+# Partition Types
+#
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_ACORN_PARTITION is not set
+# CONFIG_OSF_PARTITION is not set
+# CONFIG_AMIGA_PARTITION is not set
+# CONFIG_ATARI_PARTITION is not set
+# CONFIG_MAC_PARTITION is not set
+# CONFIG_MSDOS_PARTITION is not set
+# CONFIG_LDM_PARTITION is not set
+# CONFIG_SGI_PARTITION is not set
+# CONFIG_ULTRIX_PARTITION is not set
+# CONFIG_SUN_PARTITION is not set
+# CONFIG_KARMA_PARTITION is not set
+# CONFIG_EFI_PARTITION is not set
+
+#
+# Native Language Support
+#
+# CONFIG_NLS is not set
+
+#
+# Library routines
+#
+# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC16 is not set
+CONFIG_CRC32=y
+# CONFIG_LIBCRC32C is not set
+
+#
+# Instrumentation Support
+#
+# CONFIG_PROFILING is not set
+
+#
+# Kernel hacking
+#
+# CONFIG_PRINTK_TIME is not set
+# CONFIG_MAGIC_SYSRQ is not set
+CONFIG_DEBUG_KERNEL=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_DEBUG_SLAB is not set
+CONFIG_DEBUG_MUTEXES=y
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_DEBUG_KOBJECT is not set
+# CONFIG_DEBUG_INFO is not set
+# CONFIG_DEBUG_FS is not set
+# CONFIG_DEBUG_VM is not set
+# CONFIG_UNWIND_INFO is not set
+CONFIG_FORCED_INLINING=y
+# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_DEBUGGER is not set
+# CONFIG_BDI_SWITCH is not set
+# CONFIG_BOOTX_TEXT is not set
+# CONFIG_PPC_EARLY_DEBUG_LPAR is not set
+# CONFIG_PPC_EARLY_DEBUG_G5 is not set
+# CONFIG_PPC_EARLY_DEBUG_RTAS is not set
+# CONFIG_PPC_EARLY_DEBUG_MAPLE is not set
+# CONFIG_PPC_EARLY_DEBUG_ISERIES is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+
+#
+# Cryptographic options
+#
+# CONFIG_CRYPTO is not set
+
+#
+# Hardware crypto devices
+#
diff --git a/arch/powerpc/configs/mpc8641_hpcn_defconfig b/arch/powerpc/configs/mpc8641_hpcn_defconfig
new file mode 100644
index 000000000000..d7a30f9bc535
--- /dev/null
+++ b/arch/powerpc/configs/mpc8641_hpcn_defconfig
@@ -0,0 +1,921 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.17-rc3
+# Fri Jun 16 10:47:09 2006
+#
+# CONFIG_PPC64 is not set
+CONFIG_PPC32=y
+CONFIG_PPC_MERGE=y
+CONFIG_MMU=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_PPC=y
+CONFIG_EARLY_PRINTK=y
+CONFIG_GENERIC_NVRAM=y
+CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
+CONFIG_ARCH_MAY_HAVE_PC_FDC=y
+CONFIG_PPC_OF=y
+CONFIG_PPC_UDBG_16550=y
+CONFIG_GENERIC_TBSYNC=y
+# CONFIG_DEFAULT_UIMAGE is not set
+
+#
+# Processor support
+#
+# CONFIG_CLASSIC32 is not set
+# CONFIG_PPC_52xx is not set
+# CONFIG_PPC_82xx is not set
+# CONFIG_PPC_83xx is not set
+# CONFIG_PPC_85xx is not set
+CONFIG_PPC_86xx=y
+# CONFIG_40x is not set
+# CONFIG_44x is not set
+# CONFIG_8xx is not set
+# CONFIG_E200 is not set
+CONFIG_6xx=y
+CONFIG_PPC_FPU=y
+CONFIG_ALTIVEC=y
+CONFIG_PPC_STD_MMU=y
+CONFIG_PPC_STD_MMU_32=y
+CONFIG_SMP=y
+CONFIG_NR_CPUS=2
+
+#
+# Code maturity level options
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_LOCK_KERNEL=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+
+#
+# General setup
+#
+CONFIG_LOCALVERSION=""
+# CONFIG_LOCALVERSION_AUTO is not set
+# CONFIG_SWAP is not set
+# CONFIG_SYSVIPC is not set
+# CONFIG_POSIX_MQUEUE is not set
+# CONFIG_BSD_PROCESS_ACCT is not set
+CONFIG_SYSCTL=y
+# CONFIG_AUDIT is not set
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+# CONFIG_CPUSETS is not set
+# CONFIG_RELAY is not set
+CONFIG_INITRAMFS_SOURCE=""
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_EMBEDDED=y
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+CONFIG_KALLSYMS_EXTRA_PASS=y
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+# CONFIG_ELF_CORE is not set
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SHMEM=y
+# CONFIG_SLAB is not set
+# CONFIG_TINY_SHMEM is not set
+CONFIG_BASE_SMALL=0
+CONFIG_SLOB=y
+
+#
+# Loadable module support
+#
+# CONFIG_MODULES is not set
+
+#
+# Block layer
+#
+# CONFIG_LBD is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_LSF is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+# CONFIG_IOSCHED_AS is not set
+CONFIG_IOSCHED_DEADLINE=y
+# CONFIG_IOSCHED_CFQ is not set
+# CONFIG_DEFAULT_AS is not set
+CONFIG_DEFAULT_DEADLINE=y
+# CONFIG_DEFAULT_CFQ is not set
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="deadline"
+CONFIG_MPIC=y
+# CONFIG_WANT_EARLY_SERIAL is not set
+CONFIG_PPC_INDIRECT_PCI_BE=y
+
+#
+# Platform Support
+#
+CONFIG_MPC8641_HPCN=y
+CONFIG_MPC8641=y
+
+#
+# Kernel options
+#
+CONFIG_HIGHMEM=y
+# CONFIG_HZ_100 is not set
+# CONFIG_HZ_250 is not set
+CONFIG_HZ_1000=y
+CONFIG_HZ=1000
+CONFIG_PREEMPT_NONE=y
+# CONFIG_PREEMPT_VOLUNTARY is not set
+# CONFIG_PREEMPT is not set
+CONFIG_PREEMPT_BKL=y
+CONFIG_BINFMT_ELF=y
+# CONFIG_BINFMT_MISC is not set
+# CONFIG_IRQ_ALL_CPUS is not set
+CONFIG_ARCH_FLATMEM_ENABLE=y
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+# CONFIG_SPARSEMEM_STATIC is not set
+CONFIG_SPLIT_PTLOCK_CPUS=4
+CONFIG_PROC_DEVICETREE=y
+# CONFIG_CMDLINE_BOOL is not set
+# CONFIG_PM is not set
+# CONFIG_SECCOMP is not set
+CONFIG_ISA_DMA_API=y
+
+#
+# Bus options
+#
+CONFIG_GENERIC_ISA_DMA=y
+CONFIG_PPC_I8259=y
+CONFIG_PPC_INDIRECT_PCI=y
+CONFIG_FSL_SOC=y
+CONFIG_PCI=y
+CONFIG_PCI_DOMAINS=y
+# CONFIG_PCI_DEBUG is not set
+
+#
+# PCCARD (PCMCIA/CardBus) support
+#
+# CONFIG_PCCARD is not set
+
+#
+# PCI Hotplug Support
+#
+# CONFIG_HOTPLUG_PCI is not set
+
+#
+# Advanced setup
+#
+# CONFIG_ADVANCED_OPTIONS is not set
+
+#
+# Default settings for advanced configuration options are used
+#
+CONFIG_HIGHMEM_START=0xfe000000
+CONFIG_LOWMEM_SIZE=0x30000000
+CONFIG_KERNEL_START=0xc0000000
+CONFIG_TASK_SIZE=0x80000000
+CONFIG_BOOT_LOAD=0x00800000
+
+#
+# Networking
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+# CONFIG_NETDEBUG is not set
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+CONFIG_UNIX=y
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+# CONFIG_INET_TUNNEL is not set
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_BIC=y
+CONFIG_IPV6=y
+# CONFIG_IPV6_PRIVACY is not set
+# CONFIG_IPV6_ROUTER_PREF is not set
+# CONFIG_INET6_AH is not set
+# CONFIG_INET6_ESP is not set
+# CONFIG_INET6_IPCOMP is not set
+# CONFIG_INET6_XFRM_TUNNEL is not set
+# CONFIG_INET6_TUNNEL is not set
+# CONFIG_IPV6_TUNNEL is not set
+# CONFIG_NETFILTER is not set
+
+#
+# DCCP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_DCCP is not set
+
+#
+# SCTP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_SCTP is not set
+
+#
+# TIPC Configuration (EXPERIMENTAL)
+#
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_NET_DIVERT is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+
+#
+# QoS and/or fair queueing
+#
+# CONFIG_NET_SCHED is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_IEEE80211 is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_FW_LOADER=y
+# CONFIG_DEBUG_DRIVER is not set
+
+#
+# Connector - unified userspace <-> kernelspace linker
+#
+# CONFIG_CONNECTOR is not set
+
+#
+# Memory Technology Devices (MTD)
+#
+# CONFIG_MTD is not set
+
+#
+# Parallel port support
+#
+# CONFIG_PARPORT is not set
+
+#
+# Plug and Play support
+#
+
+#
+# Block devices
+#
+# CONFIG_BLK_DEV_FD is not set
+# CONFIG_BLK_CPQ_DA is not set
+# CONFIG_BLK_CPQ_CISS_DA is not set
+# CONFIG_BLK_DEV_DAC960 is not set
+# CONFIG_BLK_DEV_UMEM is not set
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=y
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_SX8 is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=131072
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+
+#
+# ATA/ATAPI/MFM/RLL support
+#
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+# CONFIG_SCSI is not set
+
+#
+# Multi-device support (RAID and LVM)
+#
+# CONFIG_MD is not set
+
+#
+# Fusion MPT device support
+#
+# CONFIG_FUSION is not set
+
+#
+# IEEE 1394 (FireWire) support
+#
+# CONFIG_IEEE1394 is not set
+
+#
+# I2O device support
+#
+# CONFIG_I2O is not set
+
+#
+# Macintosh device drivers
+#
+# CONFIG_WINDFARM is not set
+
+#
+# Network device support
+#
+CONFIG_NETDEVICES=y
+CONFIG_DUMMY=y
+# CONFIG_BONDING is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+
+#
+# ARCnet devices
+#
+# CONFIG_ARCNET is not set
+
+#
+# PHY device support
+#
+CONFIG_PHYLIB=y
+
+#
+# MII PHY device drivers
+#
+# CONFIG_MARVELL_PHY is not set
+# CONFIG_DAVICOM_PHY is not set
+# CONFIG_QSEMI_PHY is not set
+# CONFIG_LXT_PHY is not set
+# CONFIG_CICADA_PHY is not set
+CONFIG_VITESSE_PHY=y
+
+#
+# Ethernet (10 or 100Mbit)
+#
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=y
+# CONFIG_HAPPYMEAL is not set
+# CONFIG_SUNGEM is not set
+# CONFIG_CASSINI is not set
+# CONFIG_NET_VENDOR_3COM is not set
+
+#
+# Tulip family network device support
+#
+# CONFIG_NET_TULIP is not set
+# CONFIG_HP100 is not set
+# CONFIG_NET_PCI is not set
+
+#
+# Ethernet (1000 Mbit)
+#
+# CONFIG_ACENIC is not set
+# CONFIG_DL2K is not set
+# CONFIG_E1000 is not set
+# CONFIG_NS83820 is not set
+# CONFIG_HAMACHI is not set
+# CONFIG_YELLOWFIN is not set
+# CONFIG_R8169 is not set
+# CONFIG_SIS190 is not set
+# CONFIG_SKGE is not set
+# CONFIG_SKY2 is not set
+# CONFIG_SK98LIN is not set
+# CONFIG_TIGON3 is not set
+# CONFIG_BNX2 is not set
+CONFIG_GIANFAR=y
+# CONFIG_GFAR_NAPI is not set
+
+#
+# Ethernet (10000 Mbit)
+#
+# CONFIG_CHELSIO_T1 is not set
+# CONFIG_IXGB is not set
+# CONFIG_S2IO is not set
+
+#
+# Token Ring devices
+#
+# CONFIG_TR is not set
+
+#
+# Wireless LAN (non-hamradio)
+#
+# CONFIG_NET_RADIO is not set
+
+#
+# Wan interfaces
+#
+# CONFIG_WAN is not set
+# CONFIG_FDDI is not set
+# CONFIG_HIPPI is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_SHAPER is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+
+#
+# ISDN subsystem
+#
+# CONFIG_ISDN is not set
+
+#
+# Telephony Support
+#
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+
+#
+# Userland interfaces
+#
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_TSDEV is not set
+# CONFIG_INPUT_EVDEV is not set
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Hardware I/O ports
+#
+CONFIG_SERIO=y
+CONFIG_SERIO_I8042=y
+CONFIG_SERIO_SERPORT=y
+# CONFIG_SERIO_PCIPS2 is not set
+CONFIG_SERIO_LIBPS2=y
+# CONFIG_SERIO_RAW is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_PCI=y
+CONFIG_SERIAL_8250_NR_UARTS=2
+CONFIG_SERIAL_8250_RUNTIME_UARTS=2
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_MANY_PORTS=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_SERIAL_8250_DETECT_IRQ=y
+CONFIG_SERIAL_8250_RSA=y
+
+#
+# Non-8250 serial port support
+#
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_JSM is not set
+CONFIG_UNIX98_PTYS=y
+# CONFIG_LEGACY_PTYS is not set
+
+#
+# IPMI
+#
+# CONFIG_IPMI_HANDLER is not set
+
+#
+# Watchdog Cards
+#
+# CONFIG_WATCHDOG is not set
+# CONFIG_NVRAM is not set
+# CONFIG_GEN_RTC is not set
+# CONFIG_DTLK is not set
+# CONFIG_R3964 is not set
+# CONFIG_APPLICOM is not set
+
+#
+# Ftape, the floppy tape device driver
+#
+# CONFIG_AGP is not set
+# CONFIG_DRM is not set
+# CONFIG_RAW_DRIVER is not set
+
+#
+# TPM devices
+#
+# CONFIG_TCG_TPM is not set
+# CONFIG_TELCLOCK is not set
+
+#
+# I2C support
+#
+CONFIG_I2C=y
+# CONFIG_I2C_CHARDEV is not set
+
+#
+# I2C Algorithms
+#
+# CONFIG_I2C_ALGOBIT is not set
+# CONFIG_I2C_ALGOPCF is not set
+# CONFIG_I2C_ALGOPCA is not set
+
+#
+# I2C Hardware Bus support
+#
+# CONFIG_I2C_ALI1535 is not set
+# CONFIG_I2C_ALI1563 is not set
+# CONFIG_I2C_ALI15X3 is not set
+# CONFIG_I2C_AMD756 is not set
+# CONFIG_I2C_AMD8111 is not set
+# CONFIG_I2C_I801 is not set
+# CONFIG_I2C_I810 is not set
+# CONFIG_I2C_PIIX4 is not set
+CONFIG_I2C_MPC=y
+# CONFIG_I2C_NFORCE2 is not set
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_PROSAVAGE is not set
+# CONFIG_I2C_SAVAGE4 is not set
+# CONFIG_I2C_SIS5595 is not set
+# CONFIG_I2C_SIS630 is not set
+# CONFIG_I2C_SIS96X is not set
+# CONFIG_I2C_VIA is not set
+# CONFIG_I2C_VIAPRO is not set
+# CONFIG_I2C_VOODOO3 is not set
+# CONFIG_I2C_PCA_ISA is not set
+
+#
+# Miscellaneous I2C Chip support
+#
+# CONFIG_SENSORS_DS1337 is not set
+# CONFIG_SENSORS_DS1374 is not set
+CONFIG_SENSORS_EEPROM=y
+# CONFIG_SENSORS_PCF8574 is not set
+# CONFIG_SENSORS_PCA9539 is not set
+# CONFIG_SENSORS_PCF8591 is not set
+# CONFIG_SENSORS_M41T00 is not set
+# CONFIG_SENSORS_MAX6875 is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_I2C_DEBUG_CHIP is not set
+
+#
+# SPI support
+#
+# CONFIG_SPI is not set
+# CONFIG_SPI_MASTER is not set
+
+#
+# Dallas's 1-wire bus
+#
+# CONFIG_W1 is not set
+
+#
+# Hardware Monitoring support
+#
+# CONFIG_HWMON is not set
+# CONFIG_HWMON_VID is not set
+
+#
+# Misc devices
+#
+
+#
+# Multimedia devices
+#
+# CONFIG_VIDEO_DEV is not set
+
+#
+# Digital Video Broadcasting Devices
+#
+# CONFIG_DVB is not set
+
+#
+# Graphics support
+#
+# CONFIG_FB is not set
+
+#
+# Console display driver support
+#
+CONFIG_VGA_CONSOLE=y
+# CONFIG_VGACON_SOFT_SCROLLBACK is not set
+CONFIG_DUMMY_CONSOLE=y
+
+#
+# Sound
+#
+# CONFIG_SOUND is not set
+
+#
+# USB support
+#
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB_ARCH_HAS_OHCI=y
+CONFIG_USB_ARCH_HAS_EHCI=y
+# CONFIG_USB is not set
+
+#
+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+#
+
+#
+# USB Gadget Support
+#
+# CONFIG_USB_GADGET is not set
+
+#
+# MMC/SD Card support
+#
+# CONFIG_MMC is not set
+
+#
+# LED devices
+#
+# CONFIG_NEW_LEDS is not set
+
+#
+# LED drivers
+#
+
+#
+# LED Triggers
+#
+
+#
+# InfiniBand support
+#
+# CONFIG_INFINIBAND is not set
+
+#
+# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
+#
+
+#
+# Real Time Clock
+#
+# CONFIG_RTC_CLASS is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT2_FS_XIP is not set
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_XATTR=y
+# CONFIG_EXT3_FS_POSIX_ACL is not set
+# CONFIG_EXT3_FS_SECURITY is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+CONFIG_FS_MBCACHE=y
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_OCFS2_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_INOTIFY is not set
+# CONFIG_QUOTA is not set
+# CONFIG_DNOTIFY is not set
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+# CONFIG_MSDOS_FS is not set
+# CONFIG_VFAT_FS is not set
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_RAMFS=y
+# CONFIG_CONFIGFS_FS is not set
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+
+#
+# Network File Systems
+#
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V3_ACL is not set
+# CONFIG_NFS_V4 is not set
+# CONFIG_NFS_DIRECTIO is not set
+CONFIG_NFSD=y
+# CONFIG_NFSD_V3 is not set
+CONFIG_NFSD_TCP=y
+CONFIG_ROOT_NFS=y
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_EXPORTFS=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+# CONFIG_RPCSEC_GSS_KRB5 is not set
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+# CONFIG_9P_FS is not set
+
+#
+# Partition Types
+#
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_ACORN_PARTITION is not set
+# CONFIG_OSF_PARTITION is not set
+# CONFIG_AMIGA_PARTITION is not set
+# CONFIG_ATARI_PARTITION is not set
+# CONFIG_MAC_PARTITION is not set
+CONFIG_MSDOS_PARTITION=y
+# CONFIG_BSD_DISKLABEL is not set
+# CONFIG_MINIX_SUBPARTITION is not set
+# CONFIG_SOLARIS_X86_PARTITION is not set
+# CONFIG_UNIXWARE_DISKLABEL is not set
+CONFIG_LDM_PARTITION=y
+# CONFIG_LDM_DEBUG is not set
+# CONFIG_SGI_PARTITION is not set
+# CONFIG_ULTRIX_PARTITION is not set
+# CONFIG_SUN_PARTITION is not set
+# CONFIG_KARMA_PARTITION is not set
+# CONFIG_EFI_PARTITION is not set
+
+#
+# Native Language Support
+#
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+# CONFIG_NLS_CODEPAGE_437 is not set
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+# CONFIG_NLS_ASCII is not set
+# CONFIG_NLS_ISO8859_1 is not set
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+# CONFIG_NLS_UTF8 is not set
+
+#
+# Library routines
+#
+# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC16 is not set
+CONFIG_CRC32=y
+# CONFIG_LIBCRC32C is not set
+
+#
+# Instrumentation Support
+#
+# CONFIG_PROFILING is not set
+
+#
+# Kernel hacking
+#
+# CONFIG_PRINTK_TIME is not set
+# CONFIG_MAGIC_SYSRQ is not set
+CONFIG_DEBUG_KERNEL=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_DEBUG_KOBJECT is not set
+# CONFIG_DEBUG_HIGHMEM is not set
+# CONFIG_DEBUG_INFO is not set
+# CONFIG_DEBUG_FS is not set
+# CONFIG_DEBUG_VM is not set
+# CONFIG_UNWIND_INFO is not set
+CONFIG_FORCED_INLINING=y
+# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_DEBUGGER is not set
+# CONFIG_BDI_SWITCH is not set
+# CONFIG_BOOTX_TEXT is not set
+# CONFIG_PPC_EARLY_DEBUG_LPAR is not set
+# CONFIG_PPC_EARLY_DEBUG_G5 is not set
+# CONFIG_PPC_EARLY_DEBUG_RTAS is not set
+# CONFIG_PPC_EARLY_DEBUG_MAPLE is not set
+# CONFIG_PPC_EARLY_DEBUG_ISERIES is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+
+#
+# Cryptographic options
+#
+# CONFIG_CRYPTO is not set
+
+#
+# Hardware crypto devices
+#
diff --git a/arch/powerpc/configs/pmac32_defconfig b/arch/powerpc/configs/pmac32_defconfig
index 57a027971d67..addc79381c3b 100644
--- a/arch/powerpc/configs/pmac32_defconfig
+++ b/arch/powerpc/configs/pmac32_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.16-rc6
-# Wed Mar 15 16:21:32 2006
+# Linux kernel version: 2.6.17-rc5
+# Mon May 29 14:47:49 2006
#
# CONFIG_PPC64 is not set
CONFIG_PPC32=y
@@ -9,6 +9,7 @@ CONFIG_PPC_MERGE=y
CONFIG_MMU=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_CALIBRATE_DELAY=y
CONFIG_PPC=y
CONFIG_EARLY_PRINTK=y
@@ -27,11 +28,11 @@ CONFIG_CLASSIC32=y
# CONFIG_PPC_52xx is not set
# CONFIG_PPC_82xx is not set
# CONFIG_PPC_83xx is not set
+# CONFIG_PPC_85xx is not set
# CONFIG_40x is not set
# CONFIG_44x is not set
# CONFIG_8xx is not set
# CONFIG_E200 is not set
-# CONFIG_E500 is not set
CONFIG_6xx=y
CONFIG_PPC_FPU=y
CONFIG_ALTIVEC=y
@@ -59,6 +60,7 @@ CONFIG_SYSCTL=y
# CONFIG_AUDIT is not set
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
+# CONFIG_RELAY is not set
CONFIG_INITRAMFS_SOURCE=""
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
# CONFIG_EMBEDDED is not set
@@ -73,10 +75,6 @@ CONFIG_BASE_FULL=y
CONFIG_FUTEX=y
CONFIG_EPOLL=y
CONFIG_SHMEM=y
-CONFIG_CC_ALIGN_FUNCTIONS=0
-CONFIG_CC_ALIGN_LABELS=0
-CONFIG_CC_ALIGN_LOOPS=0
-CONFIG_CC_ALIGN_JUMPS=0
CONFIG_SLAB=y
# CONFIG_TINY_SHMEM is not set
CONFIG_BASE_SMALL=0
@@ -88,7 +86,6 @@ CONFIG_BASE_SMALL=0
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
-CONFIG_OBSOLETE_MODPARM=y
# CONFIG_MODVERSIONS is not set
# CONFIG_MODULE_SRCVERSION_ALL is not set
CONFIG_KMOD=y
@@ -97,6 +94,8 @@ CONFIG_KMOD=y
# Block layer
#
CONFIG_LBD=y
+# CONFIG_BLK_DEV_IO_TRACE is not set
+CONFIG_LSF=y
#
# IO Schedulers
@@ -124,6 +123,7 @@ CONFIG_MPIC=y
# CONFIG_PPC_RTAS is not set
# CONFIG_MMIO_NVRAM is not set
CONFIG_PPC_MPC106=y
+# CONFIG_PPC_970_NAP is not set
CONFIG_CPU_FREQ=y
CONFIG_CPU_FREQ_TABLE=y
# CONFIG_CPU_FREQ_DEBUG is not set
@@ -182,7 +182,6 @@ CONFIG_GENERIC_ISA_DMA=y
CONFIG_PPC_INDIRECT_PCI=y
CONFIG_PCI=y
CONFIG_PCI_DOMAINS=y
-CONFIG_PCI_LEGACY_PROC=y
# CONFIG_PCI_DEBUG is not set
#
@@ -239,7 +238,9 @@ CONFIG_NET=y
CONFIG_PACKET=y
# CONFIG_PACKET_MMAP is not set
CONFIG_UNIX=y
-# CONFIG_NET_KEY is not set
+CONFIG_XFRM=y
+CONFIG_XFRM_USER=y
+CONFIG_NET_KEY=y
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
# CONFIG_IP_ADVANCED_ROUTER is not set
@@ -250,9 +251,10 @@ CONFIG_IP_FIB_HASH=y
# CONFIG_IP_MROUTE is not set
# CONFIG_ARPD is not set
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_AH is not set
-# CONFIG_INET_ESP is not set
+CONFIG_INET_AH=y
+CONFIG_INET_ESP=y
# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
# CONFIG_INET_TUNNEL is not set
CONFIG_INET_DIAG=y
CONFIG_INET_TCP_DIAG=y
@@ -264,6 +266,8 @@ CONFIG_TCP_CONG_BIC=y
#
# CONFIG_IP_VS is not set
# CONFIG_IPV6 is not set
+# CONFIG_INET6_XFRM_TUNNEL is not set
+# CONFIG_INET6_TUNNEL is not set
CONFIG_NETFILTER=y
# CONFIG_NETFILTER_DEBUG is not set
@@ -278,12 +282,15 @@ CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
CONFIG_NETFILTER_XT_MATCH_COMMENT=m
CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
-# CONFIG_NETFILTER_XT_MATCH_DCCP is not set
+CONFIG_NETFILTER_XT_MATCH_DCCP=m
+CONFIG_NETFILTER_XT_MATCH_ESP=m
CONFIG_NETFILTER_XT_MATCH_HELPER=m
CONFIG_NETFILTER_XT_MATCH_LENGTH=m
CONFIG_NETFILTER_XT_MATCH_LIMIT=m
CONFIG_NETFILTER_XT_MATCH_MAC=m
CONFIG_NETFILTER_XT_MATCH_MARK=m
+CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
CONFIG_NETFILTER_XT_MATCH_REALM=m
CONFIG_NETFILTER_XT_MATCH_SCTP=m
@@ -305,15 +312,15 @@ CONFIG_IP_NF_NETBIOS_NS=m
CONFIG_IP_NF_TFTP=m
CONFIG_IP_NF_AMANDA=m
CONFIG_IP_NF_PPTP=m
+CONFIG_IP_NF_H323=m
# CONFIG_IP_NF_QUEUE is not set
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_IPRANGE=m
-CONFIG_IP_NF_MATCH_MULTIPORT=m
CONFIG_IP_NF_MATCH_TOS=m
CONFIG_IP_NF_MATCH_RECENT=m
CONFIG_IP_NF_MATCH_ECN=m
CONFIG_IP_NF_MATCH_DSCP=m
-CONFIG_IP_NF_MATCH_AH_ESP=m
+CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_MATCH_OWNER=m
CONFIG_IP_NF_MATCH_ADDRTYPE=m
@@ -335,6 +342,7 @@ CONFIG_IP_NF_NAT_FTP=m
CONFIG_IP_NF_NAT_TFTP=m
CONFIG_IP_NF_NAT_AMANDA=m
CONFIG_IP_NF_NAT_PPTP=m
+CONFIG_IP_NF_NAT_H323=m
CONFIG_IP_NF_MANGLE=m
CONFIG_IP_NF_TARGET_TOS=m
CONFIG_IP_NF_TARGET_ECN=m
@@ -350,10 +358,12 @@ CONFIG_IP_NF_ARP_MANGLE=m
#
CONFIG_IP_DCCP=m
CONFIG_INET_DCCP_DIAG=m
+CONFIG_IP_DCCP_ACKVEC=y
#
# DCCP CCIDs Configuration (EXPERIMENTAL)
#
+CONFIG_IP_DCCP_CCID2=m
CONFIG_IP_DCCP_CCID3=m
CONFIG_IP_DCCP_TFRC_LIB=m
@@ -361,7 +371,6 @@ CONFIG_IP_DCCP_TFRC_LIB=m
# DCCP Kernel Hacking
#
# CONFIG_IP_DCCP_DEBUG is not set
-# CONFIG_IP_DCCP_UNLOAD_HACK is not set
#
# SCTP Configuration (EXPERIMENTAL)
@@ -477,6 +486,8 @@ CONFIG_IEEE80211=m
CONFIG_IEEE80211_CRYPT_WEP=m
CONFIG_IEEE80211_CRYPT_CCMP=m
CONFIG_IEEE80211_CRYPT_TKIP=m
+# CONFIG_IEEE80211_SOFTMAC is not set
+CONFIG_WIRELESS_EXT=y
#
# Device Drivers
@@ -662,9 +673,8 @@ CONFIG_SCSI_SYM53C8XX_2=y
CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=0
CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
-# CONFIG_SCSI_SYM53C8XX_IOMAPPED is not set
+CONFIG_SCSI_SYM53C8XX_MMIO=y
# CONFIG_SCSI_IPR is not set
-# CONFIG_SCSI_QLOGIC_FC is not set
# CONFIG_SCSI_QLOGIC_1280 is not set
# CONFIG_SCSI_QLA_FC is not set
# CONFIG_SCSI_LPFC is not set
@@ -694,16 +704,17 @@ CONFIG_BLK_DEV_MD=m
CONFIG_MD_LINEAR=m
CONFIG_MD_RAID0=m
CONFIG_MD_RAID1=m
-# CONFIG_MD_RAID10 is not set
+CONFIG_MD_RAID10=m
CONFIG_MD_RAID5=m
+CONFIG_MD_RAID5_RESHAPE=y
CONFIG_MD_RAID6=m
CONFIG_MD_MULTIPATH=m
CONFIG_MD_FAULTY=m
CONFIG_BLK_DEV_DM=m
CONFIG_DM_CRYPT=m
-# CONFIG_DM_SNAPSHOT is not set
-# CONFIG_DM_MIRROR is not set
-# CONFIG_DM_ZERO is not set
+CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_MIRROR=m
+CONFIG_DM_ZERO=m
# CONFIG_DM_MULTIPATH is not set
#
@@ -740,7 +751,7 @@ CONFIG_IEEE1394_OHCI1394=m
CONFIG_IEEE1394_VIDEO1394=m
CONFIG_IEEE1394_SBP2=m
# CONFIG_IEEE1394_SBP2_PHYS_DMA is not set
-CONFIG_IEEE1394_ETH1394=m
+# CONFIG_IEEE1394_ETH1394 is not set
CONFIG_IEEE1394_DV1394=m
CONFIG_IEEE1394_RAWIO=m
@@ -769,10 +780,10 @@ CONFIG_THERM_ADT746X=m
# Network device support
#
CONFIG_NETDEVICES=y
-# CONFIG_DUMMY is not set
+CONFIG_DUMMY=m
# CONFIG_BONDING is not set
# CONFIG_EQUALIZER is not set
-# CONFIG_TUN is not set
+CONFIG_TUN=m
#
# ARCnet devices
@@ -857,6 +868,7 @@ CONFIG_PCNET32=y
# Wireless LAN (non-hamradio)
#
CONFIG_NET_RADIO=y
+# CONFIG_NET_WIRELESS_RTNETLINK is not set
#
# Obsolete Wireless cards support (pre-802.11)
@@ -992,6 +1004,7 @@ CONFIG_HW_CONSOLE=y
# Serial drivers
#
CONFIG_SERIAL_8250=m
+CONFIG_SERIAL_8250_PCI=m
# CONFIG_SERIAL_8250_CS is not set
CONFIG_SERIAL_8250_NR_UARTS=4
CONFIG_SERIAL_8250_RUNTIME_UARTS=4
@@ -1027,6 +1040,7 @@ CONFIG_GEN_RTC=y
# Ftape, the floppy tape device driver
#
CONFIG_AGP=m
+# CONFIG_AGP_VIA is not set
CONFIG_AGP_UNINORTH=m
CONFIG_DRM=m
# CONFIG_DRM_TDFX is not set
@@ -1081,7 +1095,6 @@ CONFIG_I2C_POWERMAC=y
# CONFIG_I2C_PARPORT_LIGHT is not set
# CONFIG_I2C_PROSAVAGE is not set
# CONFIG_I2C_SAVAGE4 is not set
-# CONFIG_SCx200_ACB is not set
# CONFIG_I2C_SIS5595 is not set
# CONFIG_I2C_SIS630 is not set
# CONFIG_I2C_SIS96X is not set
@@ -1100,10 +1113,8 @@ CONFIG_I2C_POWERMAC=y
# CONFIG_SENSORS_PCF8574 is not set
# CONFIG_SENSORS_PCA9539 is not set
# CONFIG_SENSORS_PCF8591 is not set
-# CONFIG_SENSORS_RTC8564 is not set
# CONFIG_SENSORS_M41T00 is not set
# CONFIG_SENSORS_MAX6875 is not set
-# CONFIG_RTC_X1205_I2C is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
# CONFIG_I2C_DEBUG_BUS is not set
@@ -1131,18 +1142,16 @@ CONFIG_I2C_POWERMAC=y
#
#
-# Multimedia Capabilities Port drivers
-#
-
-#
# Multimedia devices
#
# CONFIG_VIDEO_DEV is not set
+CONFIG_VIDEO_V4L2=y
#
# Digital Video Broadcasting Devices
#
# CONFIG_DVB is not set
+# CONFIG_USB_DABUSB is not set
#
# Graphics support
@@ -1152,6 +1161,7 @@ CONFIG_FB_CFB_FILLRECT=y
CONFIG_FB_CFB_COPYAREA=y
CONFIG_FB_CFB_IMAGEBLIT=y
CONFIG_FB_MACMODES=y
+CONFIG_FB_FIRMWARE_EDID=y
CONFIG_FB_MODE_HELPERS=y
CONFIG_FB_TILEBLITTING=y
# CONFIG_FB_CIRRUS is not set
@@ -1175,7 +1185,6 @@ CONFIG_FB_MATROX_MYSTIQUE=y
# CONFIG_FB_MATROX_G is not set
# CONFIG_FB_MATROX_I2C is not set
# CONFIG_FB_MATROX_MULTIHEAD is not set
-# CONFIG_FB_RADEON_OLD is not set
CONFIG_FB_RADEON=y
CONFIG_FB_RADEON_I2C=y
# CONFIG_FB_RADEON_DEBUG is not set
@@ -1234,9 +1243,11 @@ CONFIG_SND_SEQ_DUMMY=m
CONFIG_SND_OSSEMUL=y
CONFIG_SND_MIXER_OSS=m
CONFIG_SND_PCM_OSS=m
+CONFIG_SND_PCM_OSS_PLUGINS=y
CONFIG_SND_SEQUENCER_OSS=y
# CONFIG_SND_DYNAMIC_MINORS is not set
CONFIG_SND_SUPPORT_OLD_API=y
+CONFIG_SND_VERBOSE_PROCFS=y
# CONFIG_SND_VERBOSE_PRINTK is not set
# CONFIG_SND_DEBUG is not set
@@ -1253,6 +1264,7 @@ CONFIG_SND_DUMMY=m
# PCI devices
#
# CONFIG_SND_AD1889 is not set
+# CONFIG_SND_ALS300 is not set
# CONFIG_SND_ALS4000 is not set
# CONFIG_SND_ALI5451 is not set
# CONFIG_SND_ATIIXP is not set
@@ -1285,6 +1297,7 @@ CONFIG_SND_DUMMY=m
# CONFIG_SND_MIXART is not set
# CONFIG_SND_NM256 is not set
# CONFIG_SND_PCXHR is not set
+# CONFIG_SND_RIPTIDE is not set
# CONFIG_SND_RME32 is not set
# CONFIG_SND_RME96 is not set
# CONFIG_SND_RME9652 is not set
@@ -1310,6 +1323,8 @@ CONFIG_SND_USB_AUDIO=m
#
# PCMCIA devices
#
+# CONFIG_SND_VXPOCKET is not set
+# CONFIG_SND_PDAUDIOCF is not set
#
# Open Sound System
@@ -1321,6 +1336,7 @@ CONFIG_SND_USB_AUDIO=m
#
CONFIG_USB_ARCH_HAS_HCD=y
CONFIG_USB_ARCH_HAS_OHCI=y
+CONFIG_USB_ARCH_HAS_EHCI=y
CONFIG_USB=y
# CONFIG_USB_DEBUG is not set
@@ -1336,7 +1352,9 @@ CONFIG_USB_DYNAMIC_MINORS=y
#
# USB Host Controller Drivers
#
-# CONFIG_USB_EHCI_HCD is not set
+CONFIG_USB_EHCI_HCD=m
+CONFIG_USB_EHCI_SPLIT_ISO=y
+CONFIG_USB_EHCI_ROOT_HUB_TT=y
# CONFIG_USB_ISP116X_HCD is not set
CONFIG_USB_OHCI_HCD=y
# CONFIG_USB_OHCI_BIG_ENDIAN is not set
@@ -1347,7 +1365,6 @@ CONFIG_USB_OHCI_LITTLE_ENDIAN=y
#
# USB Device Class drivers
#
-# CONFIG_OBSOLETE_OSS_USB_DRIVER is not set
CONFIG_USB_ACM=m
CONFIG_USB_PRINTER=m
@@ -1358,7 +1375,17 @@ CONFIG_USB_PRINTER=m
#
# may also be needed; see USB_STORAGE Help for more information
#
-# CONFIG_USB_STORAGE is not set
+CONFIG_USB_STORAGE=m
+# CONFIG_USB_STORAGE_DEBUG is not set
+# CONFIG_USB_STORAGE_DATAFAB is not set
+# CONFIG_USB_STORAGE_FREECOM is not set
+# CONFIG_USB_STORAGE_ISD200 is not set
+# CONFIG_USB_STORAGE_DPCM is not set
+# CONFIG_USB_STORAGE_USBAT is not set
+# CONFIG_USB_STORAGE_SDDR09 is not set
+# CONFIG_USB_STORAGE_SDDR55 is not set
+# CONFIG_USB_STORAGE_JUMPSHOT is not set
+# CONFIG_USB_STORAGE_ALAUDA is not set
# CONFIG_USB_LIBUSUAL is not set
#
@@ -1374,9 +1401,7 @@ CONFIG_USB_HIDINPUT_POWERBOOK=y
# CONFIG_USB_ACECAD is not set
# CONFIG_USB_KBTAB is not set
# CONFIG_USB_POWERMATE is not set
-# CONFIG_USB_MTOUCH is not set
-# CONFIG_USB_ITMTOUCH is not set
-# CONFIG_USB_EGALAX is not set
+# CONFIG_USB_TOUCHSCREEN is not set
# CONFIG_USB_YEALINK is not set
# CONFIG_USB_XPAD is not set
# CONFIG_USB_ATI_REMOTE is not set
@@ -1391,15 +1416,6 @@ CONFIG_USB_APPLETOUCH=y
# CONFIG_USB_MICROTEK is not set
#
-# USB Multimedia devices
-#
-# CONFIG_USB_DABUSB is not set
-
-#
-# Video4Linux support is needed for USB Multimedia device support
-#
-
-#
# USB Network Adapters
#
# CONFIG_USB_CATC is not set
@@ -1429,6 +1445,7 @@ CONFIG_USB_SERIAL=m
# CONFIG_USB_SERIAL_GENERIC is not set
# CONFIG_USB_SERIAL_AIRPRIME is not set
# CONFIG_USB_SERIAL_ANYDATA is not set
+# CONFIG_USB_SERIAL_ARK3116 is not set
# CONFIG_USB_SERIAL_BELKIN is not set
# CONFIG_USB_SERIAL_WHITEHEAT is not set
# CONFIG_USB_SERIAL_DIGI_ACCELEPORT is not set
@@ -1436,6 +1453,7 @@ CONFIG_USB_SERIAL=m
# CONFIG_USB_SERIAL_CYPRESS_M8 is not set
# CONFIG_USB_SERIAL_EMPEG is not set
# CONFIG_USB_SERIAL_FTDI_SIO is not set
+# CONFIG_USB_SERIAL_FUNSOFT is not set
CONFIG_USB_SERIAL_VISOR=m
CONFIG_USB_SERIAL_IPAQ=m
# CONFIG_USB_SERIAL_IR is not set
@@ -1460,6 +1478,7 @@ CONFIG_USB_SERIAL_KEYSPAN_USA49WLC=y
# CONFIG_USB_SERIAL_KLSI is not set
# CONFIG_USB_SERIAL_KOBIL_SCT is not set
# CONFIG_USB_SERIAL_MCT_U232 is not set
+# CONFIG_USB_SERIAL_NAVMAN is not set
# CONFIG_USB_SERIAL_PL2303 is not set
# CONFIG_USB_SERIAL_HP4X is not set
# CONFIG_USB_SERIAL_SAFE is not set
@@ -1484,6 +1503,7 @@ CONFIG_USB_EZUSB=y
# CONFIG_USB_PHIDGETKIT is not set
# CONFIG_USB_PHIDGETSERVO is not set
# CONFIG_USB_IDMOUSE is not set
+# CONFIG_USB_SISUSBVGA is not set
# CONFIG_USB_LD is not set
# CONFIG_USB_TEST is not set
@@ -1502,6 +1522,19 @@ CONFIG_USB_EZUSB=y
# CONFIG_MMC is not set
#
+# LED devices
+#
+# CONFIG_NEW_LEDS is not set
+
+#
+# LED drivers
+#
+
+#
+# LED Triggers
+#
+
+#
# InfiniBand support
#
# CONFIG_INFINIBAND is not set
@@ -1511,6 +1544,11 @@ CONFIG_USB_EZUSB=y
#
#
+# Real Time Clock
+#
+# CONFIG_RTC_CLASS is not set
+
+#
# File systems
#
CONFIG_EXT2_FS=y
@@ -1518,14 +1556,14 @@ CONFIG_EXT2_FS=y
# CONFIG_EXT2_FS_XIP is not set
CONFIG_EXT3_FS=y
CONFIG_EXT3_FS_XATTR=y
-# CONFIG_EXT3_FS_POSIX_ACL is not set
+CONFIG_EXT3_FS_POSIX_ACL=y
# CONFIG_EXT3_FS_SECURITY is not set
CONFIG_JBD=y
# CONFIG_JBD_DEBUG is not set
CONFIG_FS_MBCACHE=y
# CONFIG_REISERFS_FS is not set
# CONFIG_JFS_FS is not set
-# CONFIG_FS_POSIX_ACL is not set
+CONFIG_FS_POSIX_ACL=y
# CONFIG_XFS_FS is not set
# CONFIG_OCFS2_FS is not set
# CONFIG_MINIX_FS is not set
@@ -1534,7 +1572,7 @@ CONFIG_INOTIFY=y
# CONFIG_QUOTA is not set
CONFIG_DNOTIFY=y
# CONFIG_AUTOFS_FS is not set
-# CONFIG_AUTOFS4_FS is not set
+CONFIG_AUTOFS4_FS=m
CONFIG_FUSE_FS=m
#
@@ -1566,7 +1604,6 @@ CONFIG_SYSFS=y
CONFIG_TMPFS=y
# CONFIG_HUGETLB_PAGE is not set
CONFIG_RAMFS=y
-CONFIG_RELAYFS_FS=m
# CONFIG_CONFIGFS_FS is not set
#
@@ -1590,17 +1627,24 @@ CONFIG_HFSPLUS_FS=m
# Network File Systems
#
CONFIG_NFS_FS=y
-# CONFIG_NFS_V3 is not set
-# CONFIG_NFS_V4 is not set
+CONFIG_NFS_V3=y
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=y
# CONFIG_NFS_DIRECTIO is not set
-CONFIG_NFSD=y
-# CONFIG_NFSD_V3 is not set
-# CONFIG_NFSD_TCP is not set
+CONFIG_NFSD=m
+CONFIG_NFSD_V2_ACL=y
+CONFIG_NFSD_V3=y
+CONFIG_NFSD_V3_ACL=y
+CONFIG_NFSD_V4=y
+CONFIG_NFSD_TCP=y
CONFIG_LOCKD=y
-CONFIG_EXPORTFS=y
+CONFIG_LOCKD_V4=y
+CONFIG_EXPORTFS=m
+CONFIG_NFS_ACL_SUPPORT=y
CONFIG_NFS_COMMON=y
CONFIG_SUNRPC=y
-# CONFIG_RPCSEC_GSS_KRB5 is not set
+CONFIG_SUNRPC_GSS=y
+CONFIG_RPCSEC_GSS_KRB5=y
# CONFIG_RPCSEC_GSS_SPKM3 is not set
CONFIG_SMB_FS=m
# CONFIG_SMB_NLS_DEFAULT is not set
@@ -1681,7 +1725,7 @@ CONFIG_NLS_UTF8=m
CONFIG_CRC_CCITT=y
CONFIG_CRC16=y
CONFIG_CRC32=y
-# CONFIG_LIBCRC32C is not set
+CONFIG_LIBCRC32C=m
CONFIG_ZLIB_INFLATE=y
CONFIG_ZLIB_DEFLATE=y
CONFIG_TEXTSEARCH=y
@@ -1735,29 +1779,29 @@ CONFIG_BOOTX_TEXT=y
# Cryptographic options
#
CONFIG_CRYPTO=y
-# CONFIG_CRYPTO_HMAC is not set
-# CONFIG_CRYPTO_NULL is not set
-# CONFIG_CRYPTO_MD4 is not set
-# CONFIG_CRYPTO_MD5 is not set
-# CONFIG_CRYPTO_SHA1 is not set
-# CONFIG_CRYPTO_SHA256 is not set
-# CONFIG_CRYPTO_SHA512 is not set
-# CONFIG_CRYPTO_WP512 is not set
-# CONFIG_CRYPTO_TGR192 is not set
-# CONFIG_CRYPTO_DES is not set
-# CONFIG_CRYPTO_BLOWFISH is not set
-# CONFIG_CRYPTO_TWOFISH is not set
-# CONFIG_CRYPTO_SERPENT is not set
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_SHA1=y
+CONFIG_CRYPTO_SHA256=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_DES=y
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_AES=m
-# CONFIG_CRYPTO_CAST5 is not set
-# CONFIG_CRYPTO_CAST6 is not set
-# CONFIG_CRYPTO_TEA is not set
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_ARC4=m
-# CONFIG_CRYPTO_KHAZAD is not set
-# CONFIG_CRYPTO_ANUBIS is not set
-# CONFIG_CRYPTO_DEFLATE is not set
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_DEFLATE=m
CONFIG_CRYPTO_MICHAEL_MIC=m
-# CONFIG_CRYPTO_CRC32C is not set
+CONFIG_CRYPTO_CRC32C=m
# CONFIG_CRYPTO_TEST is not set
#
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig
index 58e68ce09b0f..31708ad4574e 100644
--- a/arch/powerpc/configs/pseries_defconfig
+++ b/arch/powerpc/configs/pseries_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.17-rc1
-# Wed Apr 19 11:48:00 2006
+# Linux kernel version: 2.6.17-rc4
+# Sun May 28 07:26:56 2006
#
CONFIG_PPC64=y
CONFIG_64BIT=y
@@ -11,6 +11,7 @@ CONFIG_GENERIC_HARDIRQS=y
CONFIG_RWSEM_XCHGADD_ALGORITHM=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_PPC=y
CONFIG_EARLY_PRINTK=y
CONFIG_COMPAT=y
@@ -126,8 +127,9 @@ CONFIG_RTAS_PROC=y
CONFIG_RTAS_FLASH=m
# CONFIG_MMIO_NVRAM is not set
CONFIG_IBMVIO=y
-# CONFIG_IBMEBUS is not set
+CONFIG_IBMEBUS=y
# CONFIG_PPC_MPC106 is not set
+# CONFIG_PPC_970_NAP is not set
# CONFIG_CPU_FREQ is not set
# CONFIG_WANT_EARLY_SERIAL is not set
@@ -143,7 +145,7 @@ CONFIG_PREEMPT_NONE=y
# CONFIG_PREEMPT is not set
# CONFIG_PREEMPT_BKL is not set
CONFIG_BINFMT_ELF=y
-# CONFIG_BINFMT_MISC is not set
+CONFIG_BINFMT_MISC=m
CONFIG_FORCE_MAX_ZONEORDER=13
CONFIG_IOMMU_VMERGE=y
CONFIG_HOTPLUG_CPU=y
@@ -155,6 +157,7 @@ CONFIG_EEH=y
CONFIG_SCANLOG=m
CONFIG_LPARCFG=y
CONFIG_NUMA=y
+CONFIG_NODES_SHIFT=4
CONFIG_ARCH_SELECT_MEMORY_MODEL=y
CONFIG_ARCH_SPARSEMEM_ENABLE=y
CONFIG_ARCH_SPARSEMEM_DEFAULT=y
@@ -467,7 +470,7 @@ CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_SPI_ATTRS=y
CONFIG_SCSI_FC_ATTRS=y
CONFIG_SCSI_ISCSI_ATTRS=m
-# CONFIG_SCSI_SAS_ATTRS is not set
+CONFIG_SCSI_SAS_ATTRS=m
#
# SCSI low-level drivers
@@ -499,13 +502,18 @@ CONFIG_SCSI_SYM53C8XX_2=y
CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=0
CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
-# CONFIG_SCSI_SYM53C8XX_IOMAPPED is not set
+CONFIG_SCSI_SYM53C8XX_MMIO=y
CONFIG_SCSI_IPR=y
CONFIG_SCSI_IPR_TRACE=y
CONFIG_SCSI_IPR_DUMP=y
-# CONFIG_SCSI_QLOGIC_FC is not set
# CONFIG_SCSI_QLOGIC_1280 is not set
-# CONFIG_SCSI_QLA_FC is not set
+CONFIG_SCSI_QLA_FC=m
+CONFIG_SCSI_QLA2XXX_EMBEDDED_FIRMWARE=y
+CONFIG_SCSI_QLA21XX=m
+CONFIG_SCSI_QLA22XX=m
+CONFIG_SCSI_QLA2300=m
+CONFIG_SCSI_QLA2322=m
+CONFIG_SCSI_QLA24XX=m
CONFIG_SCSI_LPFC=m
# CONFIG_SCSI_DC395x is not set
# CONFIG_SCSI_DC390T is not set
@@ -521,7 +529,7 @@ CONFIG_MD_RAID0=y
CONFIG_MD_RAID1=y
CONFIG_MD_RAID10=m
CONFIG_MD_RAID5=y
-# CONFIG_MD_RAID5_RESHAPE is not set
+CONFIG_MD_RAID5_RESHAPE=y
CONFIG_MD_RAID6=m
CONFIG_MD_MULTIPATH=m
CONFIG_MD_FAULTY=m
@@ -764,7 +772,7 @@ CONFIG_SERIAL_8250_RUNTIME_UARTS=4
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
CONFIG_SERIAL_ICOM=m
-# CONFIG_SERIAL_JSM is not set
+CONFIG_SERIAL_JSM=m
CONFIG_UNIX98_PTYS=y
CONFIG_LEGACY_PTYS=y
CONFIG_LEGACY_PTY_COUNT=256
@@ -773,7 +781,7 @@ CONFIG_LEGACY_PTY_COUNT=256
# CONFIG_TIPAR is not set
CONFIG_HVC_DRIVER=y
CONFIG_HVC_CONSOLE=y
-# CONFIG_HVC_RTAS is not set
+CONFIG_HVC_RTAS=y
CONFIG_HVCS=m
#
@@ -1031,9 +1039,7 @@ CONFIG_USB_HIDDEV=y
# CONFIG_USB_ACECAD is not set
# CONFIG_USB_KBTAB is not set
# CONFIG_USB_POWERMATE is not set
-# CONFIG_USB_MTOUCH is not set
-# CONFIG_USB_ITMTOUCH is not set
-# CONFIG_USB_EGALAX is not set
+# CONFIG_USB_TOUCHSCREEN is not set
# CONFIG_USB_YEALINK is not set
# CONFIG_USB_XPAD is not set
# CONFIG_USB_ATI_REMOTE is not set
@@ -1105,16 +1111,25 @@ CONFIG_USB_MON=y
# CONFIG_NEW_LEDS is not set
#
+# LED drivers
+#
+
+#
+# LED Triggers
+#
+
+#
# InfiniBand support
#
CONFIG_INFINIBAND=m
-# CONFIG_INFINIBAND_USER_MAD is not set
-# CONFIG_INFINIBAND_USER_ACCESS is not set
+CONFIG_INFINIBAND_USER_MAD=m
+CONFIG_INFINIBAND_USER_ACCESS=m
CONFIG_INFINIBAND_MTHCA=m
-# CONFIG_INFINIBAND_MTHCA_DEBUG is not set
+CONFIG_INFINIBAND_MTHCA_DEBUG=y
CONFIG_INFINIBAND_IPOIB=m
-# CONFIG_INFINIBAND_IPOIB_DEBUG is not set
-# CONFIG_INFINIBAND_SRP is not set
+CONFIG_INFINIBAND_IPOIB_DEBUG=y
+# CONFIG_INFINIBAND_IPOIB_DEBUG_DATA is not set
+CONFIG_INFINIBAND_SRP=m
#
# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
@@ -1159,15 +1174,15 @@ CONFIG_XFS_EXPORT=y
CONFIG_XFS_SECURITY=y
CONFIG_XFS_POSIX_ACL=y
# CONFIG_XFS_RT is not set
-# CONFIG_OCFS2_FS is not set
+CONFIG_OCFS2_FS=m
# CONFIG_MINIX_FS is not set
# CONFIG_ROMFS_FS is not set
CONFIG_INOTIFY=y
# CONFIG_QUOTA is not set
CONFIG_DNOTIFY=y
-CONFIG_AUTOFS_FS=m
-# CONFIG_AUTOFS4_FS is not set
-# CONFIG_FUSE_FS is not set
+# CONFIG_AUTOFS_FS is not set
+CONFIG_AUTOFS4_FS=m
+CONFIG_FUSE_FS=m
#
# CD-ROM/DVD Filesystems
@@ -1199,7 +1214,7 @@ CONFIG_TMPFS=y
CONFIG_HUGETLBFS=y
CONFIG_HUGETLB_PAGE=y
CONFIG_RAMFS=y
-# CONFIG_CONFIGFS_FS is not set
+CONFIG_CONFIGFS_FS=m
#
# Miscellaneous filesystems
@@ -1317,7 +1332,7 @@ CONFIG_ZLIB_DEFLATE=m
#
CONFIG_PROFILING=y
CONFIG_OPROFILE=y
-# CONFIG_KPROBES is not set
+CONFIG_KPROBES=y
#
# Kernel hacking
@@ -1329,7 +1344,7 @@ CONFIG_LOG_BUF_SHIFT=17
CONFIG_DETECT_SOFTLOCKUP=y
# CONFIG_SCHEDSTATS is not set
# CONFIG_DEBUG_SLAB is not set
-CONFIG_DEBUG_MUTEXES=y
+# CONFIG_DEBUG_MUTEXES is not set
# CONFIG_DEBUG_SPINLOCK is not set
# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
# CONFIG_DEBUG_KOBJECT is not set
@@ -1339,17 +1354,13 @@ CONFIG_DEBUG_FS=y
CONFIG_FORCED_INLINING=y
# CONFIG_RCU_TORTURE_TEST is not set
CONFIG_DEBUG_STACKOVERFLOW=y
-CONFIG_DEBUG_STACK_USAGE=y
+# CONFIG_DEBUG_STACK_USAGE is not set
CONFIG_DEBUGGER=y
CONFIG_XMON=y
CONFIG_XMON_DEFAULT=y
CONFIG_IRQSTACKS=y
# CONFIG_BOOTX_TEXT is not set
-# CONFIG_PPC_EARLY_DEBUG_LPAR is not set
-# CONFIG_PPC_EARLY_DEBUG_G5 is not set
-# CONFIG_PPC_EARLY_DEBUG_RTAS is not set
-# CONFIG_PPC_EARLY_DEBUG_MAPLE is not set
-# CONFIG_PPC_EARLY_DEBUG_ISERIES is not set
+# CONFIG_PPC_EARLY_DEBUG is not set
#
# Security options
diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
index faaec9c6f78f..4734b5de599d 100644
--- a/arch/powerpc/kernel/align.c
+++ b/arch/powerpc/kernel/align.c
@@ -35,17 +35,19 @@ struct aligninfo {
#define INVALID { 0, 0 }
-#define LD 1 /* load */
-#define ST 2 /* store */
-#define SE 4 /* sign-extend value */
-#define F 8 /* to/from fp regs */
-#define U 0x10 /* update index register */
-#define M 0x20 /* multiple load/store */
-#define SW 0x40 /* byte swap int or ... */
-#define S 0x40 /* ... single-precision fp */
-#define SX 0x40 /* byte count in XER */
+/* Bits in the flags field */
+#define LD 0 /* load */
+#define ST 1 /* store */
+#define SE 2 /* sign-extend value */
+#define F 4 /* to/from fp regs */
+#define U 8 /* update index register */
+#define M 0x10 /* multiple load/store */
+#define SW 0x20 /* byte swap */
+#define S 0x40 /* single-precision fp or... */
+#define SX 0x40 /* ... byte count in XER */
#define HARD 0x80 /* string, stwcx. */
+/* DSISR bits reported for a DCBZ instruction: */
#define DCBZ 0x5f /* 8xx/82xx dcbz faults when cache not enabled */
#define SWAP(a, b) (t = (a), (a) = (b), (b) = t)
@@ -256,12 +258,16 @@ static int emulate_dcbz(struct pt_regs *regs, unsigned char __user *addr)
#define REG_BYTE(rp, i) *((u8 *)(rp) + (i))
#endif
+#define SWIZ_PTR(p) ((unsigned char __user *)((p) ^ swiz))
+
static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
unsigned int reg, unsigned int nb,
- unsigned int flags, unsigned int instr)
+ unsigned int flags, unsigned int instr,
+ unsigned long swiz)
{
unsigned long *rptr;
- unsigned int nb0, i;
+ unsigned int nb0, i, bswiz;
+ unsigned long p;
/*
* We do not try to emulate 8 bytes multiple as they aren't really
@@ -280,9 +286,12 @@ static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
if (nb == 0)
return 1;
} else {
- if (__get_user(instr,
- (unsigned int __user *)regs->nip))
+ unsigned long pc = regs->nip ^ (swiz & 4);
+
+ if (__get_user(instr, (unsigned int __user *)pc))
return -EFAULT;
+ if (swiz == 0 && (flags & SW))
+ instr = cpu_to_le32(instr);
nb = (instr >> 11) & 0x1f;
if (nb == 0)
nb = 32;
@@ -300,7 +309,10 @@ static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
return -EFAULT; /* bad address */
rptr = &regs->gpr[reg];
- if (flags & LD) {
+ p = (unsigned long) addr;
+ bswiz = (flags & SW)? 3: 0;
+
+ if (!(flags & ST)) {
/*
* This zeroes the top 4 bytes of the affected registers
* in 64-bit mode, and also zeroes out any remaining
@@ -311,26 +323,28 @@ static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
memset(&regs->gpr[0], 0,
((nb0 + 3) / 4) * sizeof(unsigned long));
- for (i = 0; i < nb; ++i)
- if (__get_user(REG_BYTE(rptr, i), addr + i))
+ for (i = 0; i < nb; ++i, ++p)
+ if (__get_user(REG_BYTE(rptr, i ^ bswiz), SWIZ_PTR(p)))
return -EFAULT;
if (nb0 > 0) {
rptr = &regs->gpr[0];
addr += nb;
- for (i = 0; i < nb0; ++i)
- if (__get_user(REG_BYTE(rptr, i), addr + i))
+ for (i = 0; i < nb0; ++i, ++p)
+ if (__get_user(REG_BYTE(rptr, i ^ bswiz),
+ SWIZ_PTR(p)))
return -EFAULT;
}
} else {
- for (i = 0; i < nb; ++i)
- if (__put_user(REG_BYTE(rptr, i), addr + i))
+ for (i = 0; i < nb; ++i, ++p)
+ if (__put_user(REG_BYTE(rptr, i ^ bswiz), SWIZ_PTR(p)))
return -EFAULT;
if (nb0 > 0) {
rptr = &regs->gpr[0];
addr += nb;
- for (i = 0; i < nb0; ++i)
- if (__put_user(REG_BYTE(rptr, i), addr + i))
+ for (i = 0; i < nb0; ++i, ++p)
+ if (__put_user(REG_BYTE(rptr, i ^ bswiz),
+ SWIZ_PTR(p)))
return -EFAULT;
}
}
@@ -352,7 +366,7 @@ int fix_alignment(struct pt_regs *regs)
unsigned int reg, areg;
unsigned int dsisr;
unsigned char __user *addr;
- unsigned char __user *p;
+ unsigned long p, swiz;
int ret, t;
union {
u64 ll;
@@ -380,11 +394,15 @@ int fix_alignment(struct pt_regs *regs)
* let's make one up from the instruction
*/
if (cpu_has_feature(CPU_FTR_NODSISRALIGN)) {
- unsigned int real_instr;
- if (unlikely(__get_user(real_instr,
- (unsigned int __user *)regs->nip)))
+ unsigned long pc = regs->nip;
+
+ if (cpu_has_feature(CPU_FTR_PPC_LE) && (regs->msr & MSR_LE))
+ pc ^= 4;
+ if (unlikely(__get_user(instr, (unsigned int __user *)pc)))
return -EFAULT;
- dsisr = make_dsisr(real_instr);
+ if (cpu_has_feature(CPU_FTR_REAL_LE) && (regs->msr & MSR_LE))
+ instr = cpu_to_le32(instr);
+ dsisr = make_dsisr(instr);
}
/* extract the operation and registers from the dsisr */
@@ -397,6 +415,24 @@ int fix_alignment(struct pt_regs *regs)
nb = aligninfo[instr].len;
flags = aligninfo[instr].flags;
+ /* Byteswap little endian loads and stores */
+ swiz = 0;
+ if (regs->msr & MSR_LE) {
+ flags ^= SW;
+ /*
+ * So-called "PowerPC little endian" mode works by
+ * swizzling addresses rather than by actually doing
+ * any byte-swapping. To emulate this, we XOR each
+ * byte address with 7. We also byte-swap, because
+ * the processor's address swizzling depends on the
+ * operand size (it xors the address with 7 for bytes,
+ * 6 for halfwords, 4 for words, 0 for doublewords) but
+ * we will xor with 7 and load/store each byte separately.
+ */
+ if (cpu_has_feature(CPU_FTR_PPC_LE))
+ swiz = 7;
+ }
+
/* DAR has the operand effective address */
addr = (unsigned char __user *)regs->dar;
@@ -412,7 +448,8 @@ int fix_alignment(struct pt_regs *regs)
* function
*/
if (flags & M)
- return emulate_multiple(regs, addr, reg, nb, flags, instr);
+ return emulate_multiple(regs, addr, reg, nb,
+ flags, instr, swiz);
/* Verify the address of the operand */
if (unlikely(user_mode(regs) &&
@@ -431,51 +468,71 @@ int fix_alignment(struct pt_regs *regs)
/* If we are loading, get the data from user space, else
* get it from register values
*/
- if (flags & LD) {
+ if (!(flags & ST)) {
data.ll = 0;
ret = 0;
- p = addr;
+ p = (unsigned long) addr;
switch (nb) {
case 8:
- ret |= __get_user(data.v[0], p++);
- ret |= __get_user(data.v[1], p++);
- ret |= __get_user(data.v[2], p++);
- ret |= __get_user(data.v[3], p++);
+ ret |= __get_user(data.v[0], SWIZ_PTR(p++));
+ ret |= __get_user(data.v[1], SWIZ_PTR(p++));
+ ret |= __get_user(data.v[2], SWIZ_PTR(p++));
+ ret |= __get_user(data.v[3], SWIZ_PTR(p++));
case 4:
- ret |= __get_user(data.v[4], p++);
- ret |= __get_user(data.v[5], p++);
+ ret |= __get_user(data.v[4], SWIZ_PTR(p++));
+ ret |= __get_user(data.v[5], SWIZ_PTR(p++));
case 2:
- ret |= __get_user(data.v[6], p++);
- ret |= __get_user(data.v[7], p++);
+ ret |= __get_user(data.v[6], SWIZ_PTR(p++));
+ ret |= __get_user(data.v[7], SWIZ_PTR(p++));
if (unlikely(ret))
return -EFAULT;
}
- } else if (flags & F)
+ } else if (flags & F) {
data.dd = current->thread.fpr[reg];
- else
+ if (flags & S) {
+ /* Single-precision FP store requires conversion... */
+#ifdef CONFIG_PPC_FPU
+ preempt_disable();
+ enable_kernel_fp();
+ cvt_df(&data.dd, (float *)&data.v[4], &current->thread);
+ preempt_enable();
+#else
+ return 0;
+#endif
+ }
+ } else
data.ll = regs->gpr[reg];
- /* Perform other misc operations like sign extension, byteswap,
+ if (flags & SW) {
+ switch (nb) {
+ case 8:
+ SWAP(data.v[0], data.v[7]);
+ SWAP(data.v[1], data.v[6]);
+ SWAP(data.v[2], data.v[5]);
+ SWAP(data.v[3], data.v[4]);
+ break;
+ case 4:
+ SWAP(data.v[4], data.v[7]);
+ SWAP(data.v[5], data.v[6]);
+ break;
+ case 2:
+ SWAP(data.v[6], data.v[7]);
+ break;
+ }
+ }
+
+ /* Perform other misc operations like sign extension
* or floating point single precision conversion
*/
- switch (flags & ~U) {
+ switch (flags & ~(U|SW)) {
case LD+SE: /* sign extend */
if ( nb == 2 )
data.ll = data.x16.low16;
else /* nb must be 4 */
data.ll = data.x32.low32;
break;
- case LD+S: /* byte-swap */
- case ST+S:
- if (nb == 2) {
- SWAP(data.v[6], data.v[7]);
- } else {
- SWAP(data.v[4], data.v[7]);
- SWAP(data.v[5], data.v[6]);
- }
- break;
- /* Single-precision FP load and store require conversions... */
+ /* Single-precision FP load requires conversion... */
case LD+F+S:
#ifdef CONFIG_PPC_FPU
preempt_disable();
@@ -486,34 +543,24 @@ int fix_alignment(struct pt_regs *regs)
return 0;
#endif
break;
- case ST+F+S:
-#ifdef CONFIG_PPC_FPU
- preempt_disable();
- enable_kernel_fp();
- cvt_df(&data.dd, (float *)&data.v[4], &current->thread);
- preempt_enable();
-#else
- return 0;
-#endif
- break;
}
/* Store result to memory or update registers */
if (flags & ST) {
ret = 0;
- p = addr;
+ p = (unsigned long) addr;
switch (nb) {
case 8:
- ret |= __put_user(data.v[0], p++);
- ret |= __put_user(data.v[1], p++);
- ret |= __put_user(data.v[2], p++);
- ret |= __put_user(data.v[3], p++);
+ ret |= __put_user(data.v[0], SWIZ_PTR(p++));
+ ret |= __put_user(data.v[1], SWIZ_PTR(p++));
+ ret |= __put_user(data.v[2], SWIZ_PTR(p++));
+ ret |= __put_user(data.v[3], SWIZ_PTR(p++));
case 4:
- ret |= __put_user(data.v[4], p++);
- ret |= __put_user(data.v[5], p++);
+ ret |= __put_user(data.v[4], SWIZ_PTR(p++));
+ ret |= __put_user(data.v[5], SWIZ_PTR(p++));
case 2:
- ret |= __put_user(data.v[6], p++);
- ret |= __put_user(data.v[7], p++);
+ ret |= __put_user(data.v[6], SWIZ_PTR(p++));
+ ret |= __put_user(data.v[7], SWIZ_PTR(p++));
}
if (unlikely(ret))
return -EFAULT;
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 8f85c5e8a55a..ff2940548929 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -122,9 +122,8 @@ int main(void)
DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache));
DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr));
DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id));
-#ifdef CONFIG_PPC_64K_PAGES
- DEFINE(PACAPGDIR, offsetof(struct paca_struct, pgdir));
-#endif
+ DEFINE(PACACONTEXTSLLP, offsetof(struct paca_struct, context.sllp));
+ DEFINE(PACAVMALLOCSLLP, offsetof(struct paca_struct, vmalloc_sllp));
#ifdef CONFIG_HUGETLB_PAGE
DEFINE(PACALOWHTLBAREAS, offsetof(struct paca_struct, context.low_htlb_areas));
DEFINE(PACAHIGHHTLBAREAS, offsetof(struct paca_struct, context.high_htlb_areas));
diff --git a/arch/powerpc/kernel/cpu_setup_6xx.S b/arch/powerpc/kernel/cpu_setup_6xx.S
index 55ed7716636f..365381fcb27c 100644
--- a/arch/powerpc/kernel/cpu_setup_6xx.S
+++ b/arch/powerpc/kernel/cpu_setup_6xx.S
@@ -210,9 +210,11 @@ setup_745x_specifics:
* the firmware. If any, we disable NAP capability as
* it's known to be bogus on rev 2.1 and earlier
*/
+BEGIN_FTR_SECTION
mfspr r11,SPRN_L3CR
andis. r11,r11,L3CR_L3E@h
beq 1f
+END_FTR_SECTION_IFSET(CPU_FTR_L3CR)
lwz r6,CPU_SPEC_FEATURES(r5)
andi. r0,r6,CPU_FTR_L3_DISABLE_NAP
beq 1f
diff --git a/arch/powerpc/kernel/cpu_setup_power4.S b/arch/powerpc/kernel/cpu_setup_power4.S
index b61d86e7ceb6..271418308d53 100644
--- a/arch/powerpc/kernel/cpu_setup_power4.S
+++ b/arch/powerpc/kernel/cpu_setup_power4.S
@@ -73,23 +73,6 @@ _GLOBAL(__970_cpu_preinit)
isync
blr
-_GLOBAL(__setup_cpu_power4)
- blr
-
-_GLOBAL(__setup_cpu_be)
- /* Set large page sizes LP=0: 16MB, LP=1: 64KB */
- addi r3, 0, 0
- ori r3, r3, HID6_LB
- sldi r3, r3, 32
- nor r3, r3, r3
- mfspr r4, SPRN_HID6
- and r4, r4, r3
- addi r3, 0, 0x02000
- sldi r3, r3, 32
- or r4, r4, r3
- mtspr SPRN_HID6, r4
- blr
-
_GLOBAL(__setup_cpu_ppc970)
mfspr r0,SPRN_HID0
li r11,5 /* clear DOZE and SLEEP */
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 3f7182db9ed5..1c114880dc05 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -30,11 +30,7 @@ EXPORT_SYMBOL(cur_cpu_spec);
* part of the cputable though. That has to be fixed for both ppc32
* and ppc64
*/
-#ifdef CONFIG_PPC64
-extern void __setup_cpu_power3(unsigned long offset, struct cpu_spec* spec);
-extern void __setup_cpu_power4(unsigned long offset, struct cpu_spec* spec);
-extern void __setup_cpu_be(unsigned long offset, struct cpu_spec* spec);
-#else
+#ifdef CONFIG_PPC32
extern void __setup_cpu_603(unsigned long offset, struct cpu_spec* spec);
extern void __setup_cpu_604(unsigned long offset, struct cpu_spec* spec);
extern void __setup_cpu_750(unsigned long offset, struct cpu_spec* spec);
@@ -58,7 +54,8 @@ extern void __setup_cpu_ppc970(unsigned long offset, struct cpu_spec* spec);
#define COMMON_USER_POWER5_PLUS (COMMON_USER_PPC64 | PPC_FEATURE_POWER5_PLUS|\
PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP)
#define COMMON_USER_POWER6 (COMMON_USER_PPC64 | PPC_FEATURE_ARCH_2_05 |\
- PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP)
+ PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP | \
+ PPC_FEATURE_TRUE_LE)
#define COMMON_USER_BOOKE (PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | \
PPC_FEATURE_BOOKE)
@@ -78,11 +75,10 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x00400000,
.cpu_name = "POWER3 (630)",
.cpu_features = CPU_FTRS_POWER3,
- .cpu_user_features = COMMON_USER_PPC64,
+ .cpu_user_features = COMMON_USER_PPC64|PPC_FEATURE_PPC_LE,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 8,
- .cpu_setup = __setup_cpu_power3,
.oprofile_cpu_type = "ppc64/power3",
.oprofile_type = PPC_OPROFILE_RS64,
.platform = "power3",
@@ -92,11 +88,10 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x00410000,
.cpu_name = "POWER3 (630+)",
.cpu_features = CPU_FTRS_POWER3,
- .cpu_user_features = COMMON_USER_PPC64,
+ .cpu_user_features = COMMON_USER_PPC64|PPC_FEATURE_PPC_LE,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 8,
- .cpu_setup = __setup_cpu_power3,
.oprofile_cpu_type = "ppc64/power3",
.oprofile_type = PPC_OPROFILE_RS64,
.platform = "power3",
@@ -110,7 +105,6 @@ struct cpu_spec cpu_specs[] = {
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 8,
- .cpu_setup = __setup_cpu_power3,
.oprofile_cpu_type = "ppc64/rs64",
.oprofile_type = PPC_OPROFILE_RS64,
.platform = "rs64",
@@ -124,7 +118,6 @@ struct cpu_spec cpu_specs[] = {
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 8,
- .cpu_setup = __setup_cpu_power3,
.oprofile_cpu_type = "ppc64/rs64",
.oprofile_type = PPC_OPROFILE_RS64,
.platform = "rs64",
@@ -138,7 +131,6 @@ struct cpu_spec cpu_specs[] = {
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 8,
- .cpu_setup = __setup_cpu_power3,
.oprofile_cpu_type = "ppc64/rs64",
.oprofile_type = PPC_OPROFILE_RS64,
.platform = "rs64",
@@ -152,7 +144,6 @@ struct cpu_spec cpu_specs[] = {
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 8,
- .cpu_setup = __setup_cpu_power3,
.oprofile_cpu_type = "ppc64/rs64",
.oprofile_type = PPC_OPROFILE_RS64,
.platform = "rs64",
@@ -166,7 +157,6 @@ struct cpu_spec cpu_specs[] = {
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 8,
- .cpu_setup = __setup_cpu_power4,
.oprofile_cpu_type = "ppc64/power4",
.oprofile_type = PPC_OPROFILE_POWER4,
.platform = "power4",
@@ -180,7 +170,6 @@ struct cpu_spec cpu_specs[] = {
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 8,
- .cpu_setup = __setup_cpu_power4,
.oprofile_cpu_type = "ppc64/power4",
.oprofile_type = PPC_OPROFILE_POWER4,
.platform = "power4",
@@ -200,17 +189,11 @@ struct cpu_spec cpu_specs[] = {
.oprofile_type = PPC_OPROFILE_POWER4,
.platform = "ppc970",
},
-#endif /* CONFIG_PPC64 */
-#if defined(CONFIG_PPC64) || defined(CONFIG_POWER4)
{ /* PPC970FX */
.pvr_mask = 0xffff0000,
.pvr_value = 0x003c0000,
.cpu_name = "PPC970FX",
-#ifdef CONFIG_PPC32
- .cpu_features = CPU_FTRS_970_32,
-#else
.cpu_features = CPU_FTRS_PPC970,
-#endif
.cpu_user_features = COMMON_USER_POWER4 |
PPC_FEATURE_HAS_ALTIVEC_COMP,
.icache_bsize = 128,
@@ -221,8 +204,6 @@ struct cpu_spec cpu_specs[] = {
.oprofile_type = PPC_OPROFILE_POWER4,
.platform = "ppc970",
},
-#endif /* defined(CONFIG_PPC64) || defined(CONFIG_POWER4) */
-#ifdef CONFIG_PPC64
{ /* PPC970MP */
.pvr_mask = 0xffff0000,
.pvr_value = 0x00440000,
@@ -232,6 +213,7 @@ struct cpu_spec cpu_specs[] = {
PPC_FEATURE_HAS_ALTIVEC_COMP,
.icache_bsize = 128,
.dcache_bsize = 128,
+ .num_pmcs = 8,
.cpu_setup = __setup_cpu_ppc970,
.oprofile_cpu_type = "ppc64/970",
.oprofile_type = PPC_OPROFILE_POWER4,
@@ -246,9 +228,13 @@ struct cpu_spec cpu_specs[] = {
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 6,
- .cpu_setup = __setup_cpu_power4,
.oprofile_cpu_type = "ppc64/power5",
.oprofile_type = PPC_OPROFILE_POWER4,
+ /* SIHV / SIPR bits are implemented on POWER4+ (GQ)
+ * and above but only works on POWER5 and above
+ */
+ .oprofile_mmcra_sihv = MMCRA_SIHV,
+ .oprofile_mmcra_sipr = MMCRA_SIPR,
.platform = "power5",
},
{ /* Power5 GS */
@@ -260,9 +246,10 @@ struct cpu_spec cpu_specs[] = {
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 6,
- .cpu_setup = __setup_cpu_power4,
.oprofile_cpu_type = "ppc64/power5+",
.oprofile_type = PPC_OPROFILE_POWER4,
+ .oprofile_mmcra_sihv = MMCRA_SIHV,
+ .oprofile_mmcra_sipr = MMCRA_SIPR,
.platform = "power5+",
},
{ /* Power6 */
@@ -273,10 +260,13 @@ struct cpu_spec cpu_specs[] = {
.cpu_user_features = COMMON_USER_POWER6,
.icache_bsize = 128,
.dcache_bsize = 128,
- .num_pmcs = 6,
- .cpu_setup = __setup_cpu_power4,
+ .num_pmcs = 8,
.oprofile_cpu_type = "ppc64/power6",
.oprofile_type = PPC_OPROFILE_POWER4,
+ .oprofile_mmcra_sihv = POWER6_MMCRA_SIHV,
+ .oprofile_mmcra_sipr = POWER6_MMCRA_SIPR,
+ .oprofile_mmcra_clear = POWER6_MMCRA_THRM |
+ POWER6_MMCRA_OTHER,
.platform = "power6",
},
{ /* Cell Broadband Engine */
@@ -289,7 +279,6 @@ struct cpu_spec cpu_specs[] = {
PPC_FEATURE_SMT,
.icache_bsize = 128,
.dcache_bsize = 128,
- .cpu_setup = __setup_cpu_be,
.platform = "ppc-cell-be",
},
{ /* default match */
@@ -301,7 +290,6 @@ struct cpu_spec cpu_specs[] = {
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 6,
- .cpu_setup = __setup_cpu_power4,
.platform = "power4",
}
#endif /* CONFIG_PPC64 */
@@ -323,7 +311,7 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x00030000,
.cpu_name = "603",
.cpu_features = CPU_FTRS_603,
- .cpu_user_features = COMMON_USER,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.cpu_setup = __setup_cpu_603,
@@ -334,7 +322,7 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x00060000,
.cpu_name = "603e",
.cpu_features = CPU_FTRS_603,
- .cpu_user_features = COMMON_USER,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.cpu_setup = __setup_cpu_603,
@@ -345,7 +333,7 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x00070000,
.cpu_name = "603ev",
.cpu_features = CPU_FTRS_603,
- .cpu_user_features = COMMON_USER,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.cpu_setup = __setup_cpu_603,
@@ -356,7 +344,7 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x00040000,
.cpu_name = "604",
.cpu_features = CPU_FTRS_604,
- .cpu_user_features = COMMON_USER,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 2,
@@ -368,7 +356,7 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x00090000,
.cpu_name = "604e",
.cpu_features = CPU_FTRS_604,
- .cpu_user_features = COMMON_USER,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -380,7 +368,7 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x00090000,
.cpu_name = "604r",
.cpu_features = CPU_FTRS_604,
- .cpu_user_features = COMMON_USER,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -392,7 +380,7 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x000a0000,
.cpu_name = "604ev",
.cpu_features = CPU_FTRS_604,
- .cpu_user_features = COMMON_USER,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -404,7 +392,7 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x00084202,
.cpu_name = "740/750",
.cpu_features = CPU_FTRS_740_NOTAU,
- .cpu_user_features = COMMON_USER,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -416,7 +404,7 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x00080100,
.cpu_name = "750CX",
.cpu_features = CPU_FTRS_750,
- .cpu_user_features = COMMON_USER,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -428,7 +416,7 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x00082200,
.cpu_name = "750CX",
.cpu_features = CPU_FTRS_750,
- .cpu_user_features = COMMON_USER,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -440,7 +428,7 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x00082210,
.cpu_name = "750CXe",
.cpu_features = CPU_FTRS_750,
- .cpu_user_features = COMMON_USER,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -452,7 +440,7 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x00083214,
.cpu_name = "750CXe",
.cpu_features = CPU_FTRS_750,
- .cpu_user_features = COMMON_USER,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -464,7 +452,7 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x00083000,
.cpu_name = "745/755",
.cpu_features = CPU_FTRS_750,
- .cpu_user_features = COMMON_USER,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -476,7 +464,7 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x70000100,
.cpu_name = "750FX",
.cpu_features = CPU_FTRS_750FX1,
- .cpu_user_features = COMMON_USER,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -488,7 +476,7 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x70000200,
.cpu_name = "750FX",
.cpu_features = CPU_FTRS_750FX2,
- .cpu_user_features = COMMON_USER,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -500,7 +488,7 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x70000000,
.cpu_name = "750FX",
.cpu_features = CPU_FTRS_750FX,
- .cpu_user_features = COMMON_USER,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -512,7 +500,7 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x70020000,
.cpu_name = "750GX",
.cpu_features = CPU_FTRS_750GX,
- .cpu_user_features = COMMON_USER,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -524,7 +512,7 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x00080000,
.cpu_name = "740/750",
.cpu_features = CPU_FTRS_740,
- .cpu_user_features = COMMON_USER,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -536,7 +524,8 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x000c1101,
.cpu_name = "7400 (1.1)",
.cpu_features = CPU_FTRS_7400_NOTAU,
- .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
+ .cpu_user_features = COMMON_USER |
+ PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -548,7 +537,8 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x000c0000,
.cpu_name = "7400",
.cpu_features = CPU_FTRS_7400,
- .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
+ .cpu_user_features = COMMON_USER |
+ PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -560,7 +550,8 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x800c0000,
.cpu_name = "7410",
.cpu_features = CPU_FTRS_7400,
- .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
+ .cpu_user_features = COMMON_USER |
+ PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -572,7 +563,8 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x80000200,
.cpu_name = "7450",
.cpu_features = CPU_FTRS_7450_20,
- .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
+ .cpu_user_features = COMMON_USER |
+ PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 6,
@@ -586,7 +578,8 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x80000201,
.cpu_name = "7450",
.cpu_features = CPU_FTRS_7450_21,
- .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
+ .cpu_user_features = COMMON_USER |
+ PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 6,
@@ -600,7 +593,8 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x80000000,
.cpu_name = "7450",
.cpu_features = CPU_FTRS_7450_23,
- .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
+ .cpu_user_features = COMMON_USER |
+ PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 6,
@@ -614,7 +608,8 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x80010100,
.cpu_name = "7455",
.cpu_features = CPU_FTRS_7455_1,
- .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
+ .cpu_user_features = COMMON_USER |
+ PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 6,
@@ -628,7 +623,8 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x80010200,
.cpu_name = "7455",
.cpu_features = CPU_FTRS_7455_20,
- .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
+ .cpu_user_features = COMMON_USER |
+ PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 6,
@@ -642,7 +638,8 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x80010000,
.cpu_name = "7455",
.cpu_features = CPU_FTRS_7455,
- .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
+ .cpu_user_features = COMMON_USER |
+ PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 6,
@@ -656,7 +653,8 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x80020100,
.cpu_name = "7447/7457",
.cpu_features = CPU_FTRS_7447_10,
- .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
+ .cpu_user_features = COMMON_USER |
+ PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 6,
@@ -670,7 +668,8 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x80020101,
.cpu_name = "7447/7457",
.cpu_features = CPU_FTRS_7447_10,
- .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
+ .cpu_user_features = COMMON_USER |
+ PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 6,
@@ -684,7 +683,7 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x80020000,
.cpu_name = "7447/7457",
.cpu_features = CPU_FTRS_7447,
- .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 6,
@@ -698,7 +697,8 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x80030000,
.cpu_name = "7447A",
.cpu_features = CPU_FTRS_7447A,
- .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
+ .cpu_user_features = COMMON_USER |
+ PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 6,
@@ -712,7 +712,8 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x80040000,
.cpu_name = "7448",
.cpu_features = CPU_FTRS_7447A,
- .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
+ .cpu_user_features = COMMON_USER |
+ PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 6,
@@ -721,6 +722,18 @@ struct cpu_spec cpu_specs[] = {
.oprofile_type = PPC_OPROFILE_G4,
.platform = "ppc7450",
},
+ { /* 8641 */
+ .pvr_mask = 0xffffffff,
+ .pvr_value = 0x80040010,
+ .cpu_name = "8641",
+ .cpu_features = CPU_FTRS_7447A,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .num_pmcs = 6,
+ .cpu_setup = __setup_cpu_745x
+ },
+
{ /* 82xx (8240, 8245, 8260 are all 603e cores) */
.pvr_mask = 0x7fff0000,
.pvr_value = 0x00810000,
diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c
index 778f22fd85d2..dbcb85994f46 100644
--- a/arch/powerpc/kernel/crash.c
+++ b/arch/powerpc/kernel/crash.c
@@ -22,6 +22,7 @@
#include <linux/elf.h>
#include <linux/elfcore.h>
#include <linux/init.h>
+#include <linux/irq.h>
#include <linux/types.h>
#include <asm/processor.h>
@@ -174,6 +175,8 @@ static void crash_kexec_prepare_cpus(void)
void default_machine_crash_shutdown(struct pt_regs *regs)
{
+ unsigned int irq;
+
/*
* This function is only called after the system
* has paniced or is otherwise in a critical state.
@@ -186,6 +189,16 @@ void default_machine_crash_shutdown(struct pt_regs *regs)
*/
local_irq_disable();
+ for_each_irq(irq) {
+ struct irq_desc *desc = irq_descp(irq);
+
+ if (desc->status & IRQ_INPROGRESS)
+ desc->handler->end(irq);
+
+ if (!(desc->status & IRQ_DISABLED))
+ desc->handler->disable(irq);
+ }
+
if (ppc_md.kexec_cpu_down)
ppc_md.kexec_cpu_down(1, 0);
diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c
index 764d07329716..371973be8d71 100644
--- a/arch/powerpc/kernel/crash_dump.c
+++ b/arch/powerpc/kernel/crash_dump.c
@@ -25,6 +25,11 @@
#define DBG(fmt...)
#endif
+void reserve_kdump_trampoline(void)
+{
+ lmb_reserve(0, KDUMP_RESERVE_LIMIT);
+}
+
static void __init create_trampoline(unsigned long addr)
{
/* The maximum range of a single instruction branch, is the current
@@ -39,11 +44,11 @@ static void __init create_trampoline(unsigned long addr)
create_branch(addr + 4, addr + PHYSICAL_START, 0);
}
-void __init kdump_setup(void)
+void __init setup_kdump_trampoline(void)
{
unsigned long i;
- DBG(" -> kdump_setup()\n");
+ DBG(" -> setup_kdump_trampoline()\n");
for (i = KDUMP_TRAMPOLINE_START; i < KDUMP_TRAMPOLINE_END; i += 8) {
create_trampoline(i);
@@ -52,7 +57,7 @@ void __init kdump_setup(void)
create_trampoline(__pa(system_reset_fwnmi) - PHYSICAL_START);
create_trampoline(__pa(machine_check_fwnmi) - PHYSICAL_START);
- DBG(" <- kdump_setup()\n");
+ DBG(" <- setup_kdump_trampoline()\n");
}
#ifdef CONFIG_PROC_VMCORE
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 19ad5c6b1818..221062c960c9 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -57,6 +57,7 @@ system_call_common:
beq- 1f
ld r1,PACAKSAVE(r13)
1: std r10,0(r1)
+ crclr so
std r11,_NIP(r1)
std r12,_MSR(r1)
std r0,GPR0(r1)
@@ -75,7 +76,6 @@ system_call_common:
std r11,GPR11(r1)
std r11,GPR12(r1)
std r9,GPR13(r1)
- crclr so
mfcr r9
mflr r10
li r11,0xc01
diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S
index 340730fb8c91..01f71200c603 100644
--- a/arch/powerpc/kernel/fpu.S
+++ b/arch/powerpc/kernel/fpu.S
@@ -72,7 +72,7 @@ _GLOBAL(load_up_fpu)
std r12,_MSR(r1)
#endif
lfd fr0,THREAD_FPSCR(r5)
- mtfsf 0xff,fr0
+ MTFSF_L(fr0)
REST_32FPRS(0, r5)
#ifndef CONFIG_SMP
subi r4,r5,THREAD
@@ -127,7 +127,7 @@ _GLOBAL(giveup_fpu)
_GLOBAL(cvt_fd)
lfd 0,THREAD_FPSCR(r5) /* load up fpscr value */
- mtfsf 0xff,0
+ MTFSF_L(0)
lfs 0,0(r3)
stfd 0,0(r4)
mffs 0
@@ -136,7 +136,7 @@ _GLOBAL(cvt_fd)
_GLOBAL(cvt_df)
lfd 0,THREAD_FPSCR(r5) /* load up fpscr value */
- mtfsf 0xff,0
+ MTFSF_L(0)
lfd 0,0(r3)
stfs 0,0(r4)
mffs 0
diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S
index a0579e859b21..b25b25902d15 100644
--- a/arch/powerpc/kernel/head_32.S
+++ b/arch/powerpc/kernel/head_32.S
@@ -973,6 +973,13 @@ __secondary_start_gemini:
b __secondary_start
#endif /* CONFIG_GEMINI */
+ .globl __secondary_start_mpc86xx
+__secondary_start_mpc86xx:
+ mfspr r3, SPRN_PIR
+ stw r3, __secondary_hold_acknowledge@l(0)
+ mr r24, r3 /* cpu # */
+ b __secondary_start
+
.globl __secondary_start_pmac_0
__secondary_start_pmac_0:
/* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */
@@ -1088,7 +1095,12 @@ load_up_mmu:
LOAD_BAT(1,r3,r4,r5)
LOAD_BAT(2,r3,r4,r5)
LOAD_BAT(3,r3,r4,r5)
-
+BEGIN_FTR_SECTION
+ LOAD_BAT(4,r3,r4,r5)
+ LOAD_BAT(5,r3,r4,r5)
+ LOAD_BAT(6,r3,r4,r5)
+ LOAD_BAT(7,r3,r4,r5)
+END_FTR_SECTION_IFSET(CPU_FTR_HAS_HIGH_BATS)
blr
/*
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index b7d140430a41..831acbdf2592 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -316,6 +316,21 @@ label##_pSeries: \
mtspr SPRN_SPRG1,r13; /* save r13 */ \
EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common)
+#define HSTD_EXCEPTION_PSERIES(n, label) \
+ . = n; \
+ .globl label##_pSeries; \
+label##_pSeries: \
+ HMT_MEDIUM; \
+ mtspr SPRN_SPRG1,r20; /* save r20 */ \
+ mfspr r20,SPRN_HSRR0; /* copy HSRR0 to SRR0 */ \
+ mtspr SPRN_SRR0,r20; \
+ mfspr r20,SPRN_HSRR1; /* copy HSRR0 to SRR0 */ \
+ mtspr SPRN_SRR1,r20; \
+ mfspr r20,SPRN_SPRG1; /* restore r20 */ \
+ mtspr SPRN_SPRG1,r13; /* save r13 */ \
+ EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common)
+
+
#define STD_EXCEPTION_ISERIES(n, label, area) \
.globl label##_iSeries; \
label##_iSeries: \
@@ -544,8 +559,17 @@ system_call_pSeries:
STD_EXCEPTION_PSERIES(0xf20, altivec_unavailable)
+#ifdef CONFIG_CBE_RAS
+ HSTD_EXCEPTION_PSERIES(0x1200, cbe_system_error)
+#endif /* CONFIG_CBE_RAS */
STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint)
+#ifdef CONFIG_CBE_RAS
+ HSTD_EXCEPTION_PSERIES(0x1600, cbe_maintenance)
+#endif /* CONFIG_CBE_RAS */
STD_EXCEPTION_PSERIES(0x1700, altivec_assist)
+#ifdef CONFIG_CBE_RAS
+ HSTD_EXCEPTION_PSERIES(0x1800, cbe_thermal)
+#endif /* CONFIG_CBE_RAS */
. = 0x3000
@@ -827,6 +851,11 @@ machine_check_common:
#else
STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception)
#endif
+#ifdef CONFIG_CBE_RAS
+ STD_EXCEPTION_COMMON(0x1200, cbe_system_error, .cbe_system_error_exception)
+ STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, .cbe_maintenance_exception)
+ STD_EXCEPTION_COMMON(0x1800, cbe_thermal, .cbe_thermal_exception)
+#endif /* CONFIG_CBE_RAS */
/*
* Here we have detected that the kernel stack pointer is bad.
diff --git a/arch/powerpc/kernel/iomap.c b/arch/powerpc/kernel/iomap.c
index fd8214caedee..a13a93dfc655 100644
--- a/arch/powerpc/kernel/iomap.c
+++ b/arch/powerpc/kernel/iomap.c
@@ -106,8 +106,6 @@ EXPORT_SYMBOL(iowrite32_rep);
void __iomem *ioport_map(unsigned long port, unsigned int len)
{
- if (!_IO_IS_VALID(port))
- return NULL;
return (void __iomem *) (port+pci_io_base);
}
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index 4eba60a32890..7cb77c20fc5d 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -418,10 +418,11 @@ void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
* Build a iommu_table structure. This contains a bit map which
* is used to manage allocation of the tce space.
*/
-struct iommu_table *iommu_init_table(struct iommu_table *tbl)
+struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
{
unsigned long sz;
static int welcomed = 0;
+ struct page *page;
/* Set aside 1/4 of the table for large allocations. */
tbl->it_halfpoint = tbl->it_size * 3 / 4;
@@ -429,10 +430,10 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl)
/* number of bytes needed for the bitmap */
sz = (tbl->it_size + 7) >> 3;
- tbl->it_map = (unsigned long *)__get_free_pages(GFP_ATOMIC, get_order(sz));
- if (!tbl->it_map)
+ page = alloc_pages_node(nid, GFP_ATOMIC, get_order(sz));
+ if (!page)
panic("iommu_init_table: Can't allocate %ld bytes\n", sz);
-
+ tbl->it_map = page_address(page);
memset(tbl->it_map, 0, sz);
tbl->it_hint = 0;
@@ -536,11 +537,12 @@ void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle,
* to the dma address (mapping) of the first page.
*/
void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
- dma_addr_t *dma_handle, unsigned long mask, gfp_t flag)
+ dma_addr_t *dma_handle, unsigned long mask, gfp_t flag, int node)
{
void *ret = NULL;
dma_addr_t mapping;
unsigned int npages, order;
+ struct page *page;
size = PAGE_ALIGN(size);
npages = size >> PAGE_SHIFT;
@@ -560,9 +562,10 @@ void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
return NULL;
/* Alloc enough pages (and possibly more) */
- ret = (void *)__get_free_pages(flag, order);
- if (!ret)
+ page = alloc_pages_node(node, flag, order);
+ if (!page)
return NULL;
+ ret = page_address(page);
memset(ret, 0, size);
/* Set up tces to cover the allocated range */
@@ -570,9 +573,9 @@ void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
mask >> PAGE_SHIFT, order);
if (mapping == DMA_ERROR_CODE) {
free_pages((unsigned long)ret, order);
- ret = NULL;
- } else
- *dma_handle = mapping;
+ return NULL;
+ }
+ *dma_handle = mapping;
return ret;
}
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 57d560c68897..40d4c14fde8f 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -47,6 +47,7 @@
#include <linux/cpumask.h>
#include <linux/profile.h>
#include <linux/bitops.h>
+#include <linux/pci.h>
#include <asm/uaccess.h>
#include <asm/system.h>
@@ -379,8 +380,8 @@ unsigned int real_irq_to_virt_slowpath(unsigned int real_irq)
#endif /* CONFIG_PPC64 */
#ifdef CONFIG_IRQSTACKS
-struct thread_info *softirq_ctx[NR_CPUS];
-struct thread_info *hardirq_ctx[NR_CPUS];
+struct thread_info *softirq_ctx[NR_CPUS] __read_mostly;
+struct thread_info *hardirq_ctx[NR_CPUS] __read_mostly;
void irq_ctx_init(void)
{
@@ -436,6 +437,30 @@ void do_softirq(void)
}
EXPORT_SYMBOL(do_softirq);
+#ifdef CONFIG_PCI_MSI
+int pci_enable_msi(struct pci_dev * pdev)
+{
+ if (ppc_md.enable_msi)
+ return ppc_md.enable_msi(pdev);
+ else
+ return -1;
+}
+
+void pci_disable_msi(struct pci_dev * pdev)
+{
+ if (ppc_md.disable_msi)
+ ppc_md.disable_msi(pdev);
+}
+
+void pci_scan_msi_device(struct pci_dev *dev) {}
+int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec) {return -1;}
+void pci_disable_msix(struct pci_dev *dev) {}
+void msi_remove_pci_irq_vectors(struct pci_dev *dev) {}
+void disable_msi_mode(struct pci_dev *dev, int pos, int type) {}
+void pci_no_msi(void) {}
+
+#endif
+
#ifdef CONFIG_PPC64
static int __init setup_noirqdistrib(char *str)
{
diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c
index 2cbde865d4f5..c02deaab26c7 100644
--- a/arch/powerpc/kernel/lparcfg.c
+++ b/arch/powerpc/kernel/lparcfg.c
@@ -521,10 +521,10 @@ static ssize_t lparcfg_write(struct file *file, const char __user * buf,
current_weight = (resource >> 5 * 8) & 0xFF;
- pr_debug("%s: current_entitled = %lu, current_weight = %lu\n",
+ pr_debug("%s: current_entitled = %lu, current_weight = %u\n",
__FUNCTION__, current_entitled, current_weight);
- pr_debug("%s: new_entitled = %lu, new_weight = %lu\n",
+ pr_debug("%s: new_entitled = %lu, new_weight = %u\n",
__FUNCTION__, *new_entitled_ptr, *new_weight_ptr);
retval = plpar_hcall_norets(H_SET_PPP, *new_entitled_ptr,
diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c
index ee166c586642..a8fa04ef27cd 100644
--- a/arch/powerpc/kernel/machine_kexec_64.c
+++ b/arch/powerpc/kernel/machine_kexec_64.c
@@ -21,6 +21,7 @@
#include <asm/machdep.h>
#include <asm/cacheflush.h>
#include <asm/paca.h>
+#include <asm/lmb.h>
#include <asm/mmu.h>
#include <asm/sections.h> /* _end */
#include <asm/prom.h>
@@ -335,7 +336,105 @@ static void __init export_htab_values(void)
of_node_put(node);
}
+static struct property crashk_base_prop = {
+ .name = "linux,crashkernel-base",
+ .length = sizeof(unsigned long),
+ .value = (unsigned char *)&crashk_res.start,
+};
+
+static unsigned long crashk_size;
+
+static struct property crashk_size_prop = {
+ .name = "linux,crashkernel-size",
+ .length = sizeof(unsigned long),
+ .value = (unsigned char *)&crashk_size,
+};
+
+static void __init export_crashk_values(void)
+{
+ struct device_node *node;
+ struct property *prop;
+
+ node = of_find_node_by_path("/chosen");
+ if (!node)
+ return;
+
+ /* There might be existing crash kernel properties, but we can't
+ * be sure what's in them, so remove them. */
+ prop = of_find_property(node, "linux,crashkernel-base", NULL);
+ if (prop)
+ prom_remove_property(node, prop);
+
+ prop = of_find_property(node, "linux,crashkernel-size", NULL);
+ if (prop)
+ prom_remove_property(node, prop);
+
+ if (crashk_res.start != 0) {
+ prom_add_property(node, &crashk_base_prop);
+ crashk_size = crashk_res.end - crashk_res.start + 1;
+ prom_add_property(node, &crashk_size_prop);
+ }
+
+ of_node_put(node);
+}
+
void __init kexec_setup(void)
{
export_htab_values();
+ export_crashk_values();
+}
+
+static int __init early_parse_crashk(char *p)
+{
+ unsigned long size;
+
+ if (!p)
+ return 1;
+
+ size = memparse(p, &p);
+
+ if (*p == '@')
+ crashk_res.start = memparse(p + 1, &p);
+ else
+ crashk_res.start = KDUMP_KERNELBASE;
+
+ crashk_res.end = crashk_res.start + size - 1;
+
+ return 0;
+}
+early_param("crashkernel", early_parse_crashk);
+
+void __init reserve_crashkernel(void)
+{
+ unsigned long size;
+
+ if (crashk_res.start == 0)
+ return;
+
+ /* We might have got these values via the command line or the
+ * device tree, either way sanitise them now. */
+
+ size = crashk_res.end - crashk_res.start + 1;
+
+ if (crashk_res.start != KDUMP_KERNELBASE)
+ printk("Crash kernel location must be 0x%x\n",
+ KDUMP_KERNELBASE);
+
+ crashk_res.start = KDUMP_KERNELBASE;
+ size = PAGE_ALIGN(size);
+ crashk_res.end = crashk_res.start + size - 1;
+
+ /* Crash kernel trumps memory limit */
+ if (memory_limit && memory_limit <= crashk_res.end) {
+ memory_limit = crashk_res.end + 1;
+ printk("Adjusted memory limit for crashkernel, now 0x%lx\n",
+ memory_limit);
+ }
+
+ lmb_reserve(crashk_res.start, size);
+}
+
+int overlaps_crashkernel(unsigned long start, unsigned long size)
+{
+ return (start + size) > crashk_res.start && start <= crashk_res.end;
}
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index be982023409e..01d3916c4cb1 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -216,7 +216,7 @@ _GLOBAL(call_setup_cpu)
lwz r4,0(r4)
add r4,r4,r3
lwz r5,CPU_SPEC_SETUP(r4)
- cmpi 0,r5,0
+ cmpwi 0,r5,0
add r5,r5,r3
beqlr
mtctr r5
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index 2778cce058e2..e8883d42c43c 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -482,7 +482,9 @@ _GLOBAL(identify_cpu)
sub r0,r3,r5
std r0,0(r4)
ld r4,CPU_SPEC_SETUP(r3)
+ cmpdi 0,r4,0
add r4,r4,r5
+ beqlr
ld r4,0(r4)
add r4,r4,r5
mtctr r4
@@ -768,9 +770,6 @@ _GLOBAL(giveup_altivec)
#endif /* CONFIG_ALTIVEC */
-_GLOBAL(__setup_cpu_power3)
- blr
-
_GLOBAL(execve)
li r0,__NR_execve
sc
diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c
index ada50aa5b600..6960f090991e 100644
--- a/arch/powerpc/kernel/nvram_64.c
+++ b/arch/powerpc/kernel/nvram_64.c
@@ -204,7 +204,7 @@ static void nvram_print_partitions(char * label)
printk(KERN_WARNING "indx\t\tsig\tchks\tlen\tname\n");
list_for_each(p, &nvram_part->partition) {
tmp_part = list_entry(p, struct nvram_partition, partition);
- printk(KERN_WARNING "%d \t%02x\t%02x\t%d\t%s\n",
+ printk(KERN_WARNING "%4d \t%02x\t%02x\t%d\t%s\n",
tmp_part->index, tmp_part->header.signature,
tmp_part->header.checksum, tmp_part->header.length,
tmp_part->header.name);
diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c
index b129d2e4b759..c858eb4bef17 100644
--- a/arch/powerpc/kernel/pci_32.c
+++ b/arch/powerpc/kernel/pci_32.c
@@ -1113,9 +1113,10 @@ check_for_io_childs(struct pci_bus *bus, struct resource* res, int *found_vga)
int i;
int rc = 0;
-#define push_end(res, size) do { unsigned long __sz = (size) ; \
- res->end = ((res->end + __sz) / (__sz + 1)) * (__sz + 1) + __sz; \
- } while (0)
+#define push_end(res, mask) do { \
+ BUG_ON((mask+1) & mask); \
+ res->end = (res->end + mask) | mask; \
+} while (0)
list_for_each_entry(dev, &bus->devices, bus_list) {
u16 class = dev->class >> 8;
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index 4c4449be81ce..5ad87c426bed 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -42,14 +42,6 @@
unsigned long pci_probe_only = 1;
int pci_assign_all_buses = 0;
-/*
- * legal IO pages under MAX_ISA_PORT. This is to ensure we don't touch
- * devices we don't have access to.
- */
-unsigned long io_page_mask;
-
-EXPORT_SYMBOL(io_page_mask);
-
#ifdef CONFIG_PPC_MULTIPLATFORM
static void fixup_resource(struct resource *res, struct pci_dev *dev);
static void do_bus_setup(struct pci_bus *bus);
@@ -235,8 +227,10 @@ struct pci_controller * pcibios_alloc_controller(struct device_node *dev)
pci_setup_pci_controller(phb);
phb->arch_data = dev;
phb->is_dynamic = mem_init_done;
- if (dev)
+ if (dev) {
+ PHB_SET_NODE(phb, of_node_to_nid(dev));
add_linux_pci_domain(dev, phb);
+ }
return phb;
}
@@ -396,7 +390,7 @@ struct pci_dev *of_create_pci_dev(struct device_node *node,
dev->current_state = 4; /* unknown power state */
- if (!strcmp(type, "pci")) {
+ if (!strcmp(type, "pci") || !strcmp(type, "pciex")) {
/* a PCI-PCI bridge */
dev->hdr_type = PCI_HEADER_TYPE_BRIDGE;
dev->rom_base_reg = PCI_ROM_ADDRESS1;
@@ -605,7 +599,7 @@ static int __init pcibios_init(void)
iSeries_pcibios_init();
#endif
- printk("PCI: Probing PCI hardware\n");
+ printk(KERN_DEBUG "PCI: Probing PCI hardware\n");
/* Scan all of the recorded PCI controllers. */
list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
@@ -630,14 +624,14 @@ static int __init pcibios_init(void)
/* Cache the location of the ISA bridge (if we have one) */
ppc64_isabridge_dev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
if (ppc64_isabridge_dev != NULL)
- printk("ISA bridge at %s\n", pci_name(ppc64_isabridge_dev));
+ printk(KERN_DEBUG "ISA bridge at %s\n", pci_name(ppc64_isabridge_dev));
#ifdef CONFIG_PPC_MULTIPLATFORM
/* map in PCI I/O space */
phbs_remap_io();
#endif
- printk("PCI: Probing PCI hardware done\n");
+ printk(KERN_DEBUG "PCI: Probing PCI hardware done\n");
return 0;
}
@@ -804,7 +798,7 @@ static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
else
prot |= _PAGE_GUARDED;
- printk("PCI map for %s:%lx, prot: %lx\n", pci_name(dev), rp->start,
+ printk(KERN_DEBUG "PCI map for %s:%lx, prot: %lx\n", pci_name(dev), rp->start,
prot);
return __pgprot(prot);
@@ -894,8 +888,8 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
return ret;
}
-#ifdef CONFIG_PPC_MULTIPLATFORM
-static ssize_t pci_show_devspec(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t pci_show_devspec(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct pci_dev *pdev;
struct device_node *np;
@@ -907,13 +901,10 @@ static ssize_t pci_show_devspec(struct device *dev, struct device_attribute *att
return sprintf(buf, "%s", np->full_name);
}
static DEVICE_ATTR(devspec, S_IRUGO, pci_show_devspec, NULL);
-#endif /* CONFIG_PPC_MULTIPLATFORM */
void pcibios_add_platform_entries(struct pci_dev *pdev)
{
-#ifdef CONFIG_PPC_MULTIPLATFORM
device_create_file(&pdev->dev, &dev_attr_devspec);
-#endif /* CONFIG_PPC_MULTIPLATFORM */
}
#ifdef CONFIG_PPC_MULTIPLATFORM
@@ -1104,8 +1095,6 @@ void __init pci_setup_phb_io(struct pci_controller *hose, int primary)
pci_process_ISA_OF_ranges(isa_dn, hose->io_base_phys,
hose->io_base_virt);
of_node_put(isa_dn);
- /* Allow all IO */
- io_page_mask = -1;
}
}
@@ -1212,7 +1201,7 @@ int remap_bus_range(struct pci_bus *bus)
return 1;
if (start_phys == 0)
return 1;
- printk("mapping IO %lx -> %lx, size: %lx\n", start_phys, start_virt, size);
+ printk(KERN_DEBUG "mapping IO %lx -> %lx, size: %lx\n", start_phys, start_virt, size);
if (__ioremap_explicit(start_phys, start_virt, size,
_PAGE_NO_CACHE | _PAGE_GUARDED))
return 1;
@@ -1232,27 +1221,13 @@ static void phbs_remap_io(void)
static void __devinit fixup_resource(struct resource *res, struct pci_dev *dev)
{
struct pci_controller *hose = pci_bus_to_host(dev->bus);
- unsigned long start, end, mask, offset;
+ unsigned long offset;
if (res->flags & IORESOURCE_IO) {
offset = (unsigned long)hose->io_base_virt - pci_io_base;
- start = res->start += offset;
- end = res->end += offset;
-
- /* Need to allow IO access to pages that are in the
- ISA range */
- if (start < MAX_ISA_PORT) {
- if (end > MAX_ISA_PORT)
- end = MAX_ISA_PORT;
-
- start >>= PAGE_SHIFT;
- end >>= PAGE_SHIFT;
-
- /* get the range of pages for the map */
- mask = ((1 << (end+1)) - 1) ^ ((1 << start) - 1);
- io_page_mask |= mask;
- }
+ res->start += offset;
+ res->end += offset;
} else if (res->flags & IORESOURCE_MEM) {
res->start += hose->pci_mem_offset;
res->end += hose->pci_mem_offset;
@@ -1442,3 +1417,12 @@ long sys_pciconfig_iobase(long which, unsigned long in_bus,
return -EOPNOTSUPP;
}
+
+#ifdef CONFIG_NUMA
+int pcibus_to_node(struct pci_bus *bus)
+{
+ struct pci_controller *phb = pci_bus_to_host(bus);
+ return phb->node;
+}
+EXPORT_SYMBOL(pcibus_to_node);
+#endif
diff --git a/arch/powerpc/kernel/pci_direct_iommu.c b/arch/powerpc/kernel/pci_direct_iommu.c
index e1a32f802c0b..72ce082ce738 100644
--- a/arch/powerpc/kernel/pci_direct_iommu.c
+++ b/arch/powerpc/kernel/pci_direct_iommu.c
@@ -82,13 +82,17 @@ static int pci_direct_dma_supported(struct device *dev, u64 mask)
return mask < 0x100000000ull;
}
+static struct dma_mapping_ops pci_direct_ops = {
+ .alloc_coherent = pci_direct_alloc_coherent,
+ .free_coherent = pci_direct_free_coherent,
+ .map_single = pci_direct_map_single,
+ .unmap_single = pci_direct_unmap_single,
+ .map_sg = pci_direct_map_sg,
+ .unmap_sg = pci_direct_unmap_sg,
+ .dma_supported = pci_direct_dma_supported,
+};
+
void __init pci_direct_iommu_init(void)
{
- pci_dma_ops.alloc_coherent = pci_direct_alloc_coherent;
- pci_dma_ops.free_coherent = pci_direct_free_coherent;
- pci_dma_ops.map_single = pci_direct_map_single;
- pci_dma_ops.unmap_single = pci_direct_unmap_single;
- pci_dma_ops.map_sg = pci_direct_map_sg;
- pci_dma_ops.unmap_sg = pci_direct_unmap_sg;
- pci_dma_ops.dma_supported = pci_direct_dma_supported;
+ pci_dma_ops = pci_direct_ops;
}
diff --git a/arch/powerpc/kernel/pci_dn.c b/arch/powerpc/kernel/pci_dn.c
index 12c4c9e9bbc7..1c18953514c3 100644
--- a/arch/powerpc/kernel/pci_dn.c
+++ b/arch/powerpc/kernel/pci_dn.c
@@ -31,6 +31,7 @@
#include <asm/pci-bridge.h>
#include <asm/pSeries_reconfig.h>
#include <asm/ppc-pci.h>
+#include <asm/firmware.h>
/*
* Traverse_func that inits the PCI fields of the device node.
@@ -59,6 +60,11 @@ static void * __devinit update_dn_pci_info(struct device_node *dn, void *data)
pdn->busno = (regs[0] >> 16) & 0xff;
pdn->devfn = (regs[0] >> 8) & 0xff;
}
+ if (firmware_has_feature(FW_FEATURE_ISERIES)) {
+ u32 *busp = (u32 *)get_property(dn, "linux,subbus", NULL);
+ if (busp)
+ pdn->bussubno = *busp;
+ }
pdn->pci_ext_config_space = (type && *type == 1);
return NULL;
diff --git a/arch/powerpc/kernel/pci_iommu.c b/arch/powerpc/kernel/pci_iommu.c
index c1d95e14bbed..0688b2534acb 100644
--- a/arch/powerpc/kernel/pci_iommu.c
+++ b/arch/powerpc/kernel/pci_iommu.c
@@ -44,16 +44,16 @@
*/
#define PCI_GET_DN(dev) ((struct device_node *)((dev)->sysdata))
-static inline struct iommu_table *devnode_table(struct device *dev)
+static inline struct iommu_table *device_to_table(struct device *hwdev)
{
struct pci_dev *pdev;
- if (!dev) {
+ if (!hwdev) {
pdev = ppc64_isabridge_dev;
if (!pdev)
return NULL;
} else
- pdev = to_pci_dev(dev);
+ pdev = to_pci_dev(hwdev);
return PCI_DN(PCI_GET_DN(pdev))->iommu_table;
}
@@ -85,14 +85,15 @@ static inline unsigned long device_to_mask(struct device *hwdev)
static void *pci_iommu_alloc_coherent(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flag)
{
- return iommu_alloc_coherent(devnode_table(hwdev), size, dma_handle,
- device_to_mask(hwdev), flag);
+ return iommu_alloc_coherent(device_to_table(hwdev), size, dma_handle,
+ device_to_mask(hwdev), flag,
+ pcibus_to_node(to_pci_dev(hwdev)->bus));
}
static void pci_iommu_free_coherent(struct device *hwdev, size_t size,
void *vaddr, dma_addr_t dma_handle)
{
- iommu_free_coherent(devnode_table(hwdev), size, vaddr, dma_handle);
+ iommu_free_coherent(device_to_table(hwdev), size, vaddr, dma_handle);
}
/* Creates TCEs for a user provided buffer. The user buffer must be
@@ -104,7 +105,7 @@ static void pci_iommu_free_coherent(struct device *hwdev, size_t size,
static dma_addr_t pci_iommu_map_single(struct device *hwdev, void *vaddr,
size_t size, enum dma_data_direction direction)
{
- return iommu_map_single(devnode_table(hwdev), vaddr, size,
+ return iommu_map_single(device_to_table(hwdev), vaddr, size,
device_to_mask(hwdev), direction);
}
@@ -112,27 +113,27 @@ static dma_addr_t pci_iommu_map_single(struct device *hwdev, void *vaddr,
static void pci_iommu_unmap_single(struct device *hwdev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction direction)
{
- iommu_unmap_single(devnode_table(hwdev), dma_handle, size, direction);
+ iommu_unmap_single(device_to_table(hwdev), dma_handle, size, direction);
}
static int pci_iommu_map_sg(struct device *pdev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction)
{
- return iommu_map_sg(pdev, devnode_table(pdev), sglist,
+ return iommu_map_sg(pdev, device_to_table(pdev), sglist,
nelems, device_to_mask(pdev), direction);
}
static void pci_iommu_unmap_sg(struct device *pdev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction)
{
- iommu_unmap_sg(devnode_table(pdev), sglist, nelems, direction);
+ iommu_unmap_sg(device_to_table(pdev), sglist, nelems, direction);
}
/* We support DMA to/from any memory page via the iommu */
static int pci_iommu_dma_supported(struct device *dev, u64 mask)
{
- struct iommu_table *tbl = devnode_table(dev);
+ struct iommu_table *tbl = device_to_table(dev);
if (!tbl || tbl->it_offset > mask) {
printk(KERN_INFO "Warning: IOMMU table offset too big for device mask\n");
@@ -147,13 +148,17 @@ static int pci_iommu_dma_supported(struct device *dev, u64 mask)
return 1;
}
+struct dma_mapping_ops pci_iommu_ops = {
+ .alloc_coherent = pci_iommu_alloc_coherent,
+ .free_coherent = pci_iommu_free_coherent,
+ .map_single = pci_iommu_map_single,
+ .unmap_single = pci_iommu_unmap_single,
+ .map_sg = pci_iommu_map_sg,
+ .unmap_sg = pci_iommu_unmap_sg,
+ .dma_supported = pci_iommu_dma_supported,
+};
+
void pci_iommu_init(void)
{
- pci_dma_ops.alloc_coherent = pci_iommu_alloc_coherent;
- pci_dma_ops.free_coherent = pci_iommu_free_coherent;
- pci_dma_ops.map_single = pci_iommu_map_single;
- pci_dma_ops.unmap_single = pci_iommu_unmap_single;
- pci_dma_ops.map_sg = pci_iommu_map_sg;
- pci_dma_ops.unmap_sg = pci_iommu_unmap_sg;
- pci_dma_ops.dma_supported = pci_iommu_dma_supported;
+ pci_dma_ops = pci_iommu_ops;
}
diff --git a/arch/powerpc/kernel/proc_ppc64.c b/arch/powerpc/kernel/proc_ppc64.c
index 3c2cf661f6d9..2b87f82df135 100644
--- a/arch/powerpc/kernel/proc_ppc64.c
+++ b/arch/powerpc/kernel/proc_ppc64.c
@@ -52,7 +52,7 @@ static int __init proc_ppc64_create(void)
if (!root)
return 1;
- if (!machine_is(pseries) && !machine_is(cell))
+ if (!of_find_node_by_path("/rtas"))
return 0;
if (!proc_mkdir("rtas", root))
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 2dd47d2dd998..e4732459c485 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -708,6 +708,61 @@ int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
return put_user(val, (unsigned int __user *) adr);
}
+int set_endian(struct task_struct *tsk, unsigned int val)
+{
+ struct pt_regs *regs = tsk->thread.regs;
+
+ if ((val == PR_ENDIAN_LITTLE && !cpu_has_feature(CPU_FTR_REAL_LE)) ||
+ (val == PR_ENDIAN_PPC_LITTLE && !cpu_has_feature(CPU_FTR_PPC_LE)))
+ return -EINVAL;
+
+ if (regs == NULL)
+ return -EINVAL;
+
+ if (val == PR_ENDIAN_BIG)
+ regs->msr &= ~MSR_LE;
+ else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE)
+ regs->msr |= MSR_LE;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+int get_endian(struct task_struct *tsk, unsigned long adr)
+{
+ struct pt_regs *regs = tsk->thread.regs;
+ unsigned int val;
+
+ if (!cpu_has_feature(CPU_FTR_PPC_LE) &&
+ !cpu_has_feature(CPU_FTR_REAL_LE))
+ return -EINVAL;
+
+ if (regs == NULL)
+ return -EINVAL;
+
+ if (regs->msr & MSR_LE) {
+ if (cpu_has_feature(CPU_FTR_REAL_LE))
+ val = PR_ENDIAN_LITTLE;
+ else
+ val = PR_ENDIAN_PPC_LITTLE;
+ } else
+ val = PR_ENDIAN_BIG;
+
+ return put_user(val, (unsigned int __user *)adr);
+}
+
+int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
+{
+ tsk->thread.align_ctl = val;
+ return 0;
+}
+
+int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
+{
+ return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr);
+}
+
#define TRUNC_PTR(x) ((typeof(x))(((unsigned long)(x)) & 0xffffffff))
int sys_clone(unsigned long clone_flags, unsigned long usp,
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 9a07f97f0712..483455c5bb02 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -50,6 +50,7 @@
#include <asm/machdep.h>
#include <asm/pSeries_reconfig.h>
#include <asm/pci-bridge.h>
+#include <asm/kexec.h>
#ifdef DEBUG
#define DBG(fmt...) printk(KERN_ERR fmt)
@@ -836,6 +837,42 @@ static unsigned long __init unflatten_dt_node(unsigned long mem,
return mem;
}
+static int __init early_parse_mem(char *p)
+{
+ if (!p)
+ return 1;
+
+ memory_limit = PAGE_ALIGN(memparse(p, &p));
+ DBG("memory limit = 0x%lx\n", memory_limit);
+
+ return 0;
+}
+early_param("mem", early_parse_mem);
+
+/*
+ * The device tree may be allocated below our memory limit, or inside the
+ * crash kernel region for kdump. If so, move it out now.
+ */
+static void move_device_tree(void)
+{
+ unsigned long start, size;
+ void *p;
+
+ DBG("-> move_device_tree\n");
+
+ start = __pa(initial_boot_params);
+ size = initial_boot_params->totalsize;
+
+ if ((memory_limit && (start + size) > memory_limit) ||
+ overlaps_crashkernel(start, size)) {
+ p = __va(lmb_alloc_base(size, PAGE_SIZE, lmb.rmo_size));
+ memcpy(p, initial_boot_params, size);
+ initial_boot_params = (struct boot_param_header *)p;
+ DBG("Moved device tree to 0x%p\n", p);
+ }
+
+ DBG("<- move_device_tree\n");
+}
/**
* unflattens the device-tree passed by the firmware, creating the
@@ -911,7 +948,10 @@ static struct ibm_pa_feature {
{CPU_FTR_CTRL, 0, 0, 3, 0},
{CPU_FTR_NOEXECUTE, 0, 0, 6, 0},
{CPU_FTR_NODSISRALIGN, 0, 1, 1, 1},
+#if 0
+ /* put this back once we know how to test if firmware does 64k IO */
{CPU_FTR_CI_LARGE_PAGE, 0, 1, 2, 0},
+#endif
};
static void __init check_cpu_pa_features(unsigned long node)
@@ -1070,6 +1110,7 @@ static int __init early_init_dt_scan_chosen(unsigned long node,
iommu_force_on = 1;
#endif
+ /* mem=x on the command line is the preferred mechanism */
lprop = of_get_flat_dt_prop(node, "linux,memory-limit", NULL);
if (lprop)
memory_limit = *lprop;
@@ -1123,17 +1164,6 @@ static int __init early_init_dt_scan_chosen(unsigned long node,
DBG("Command line is: %s\n", cmd_line);
- if (strstr(cmd_line, "mem=")) {
- char *p, *q;
-
- for (q = cmd_line; (p = strstr(q, "mem=")) != 0; ) {
- q = p + 4;
- if (p > cmd_line && p[-1] != ' ')
- continue;
- memory_limit = memparse(q, &q);
- }
- }
-
/* break now */
return 1;
}
@@ -1237,9 +1267,17 @@ static void __init early_reserve_mem(void)
{
u64 base, size;
u64 *reserve_map;
+ unsigned long self_base;
+ unsigned long self_size;
reserve_map = (u64 *)(((unsigned long)initial_boot_params) +
initial_boot_params->off_mem_rsvmap);
+
+ /* before we do anything, lets reserve the dt blob */
+ self_base = __pa((unsigned long)initial_boot_params);
+ self_size = initial_boot_params->totalsize;
+ lmb_reserve(self_base, self_size);
+
#ifdef CONFIG_PPC32
/*
* Handle the case where we might be booting from an old kexec
@@ -1254,6 +1292,9 @@ static void __init early_reserve_mem(void)
size_32 = *(reserve_map_32++);
if (size_32 == 0)
break;
+ /* skip if the reservation is for the blob */
+ if (base_32 == self_base && size_32 == self_size)
+ continue;
DBG("reserving: %x -> %x\n", base_32, size_32);
lmb_reserve(base_32, size_32);
}
@@ -1265,6 +1306,9 @@ static void __init early_reserve_mem(void)
size = *(reserve_map++);
if (size == 0)
break;
+ /* skip if the reservation is for the blob */
+ if (base == self_base && size == self_size)
+ continue;
DBG("reserving: %llx -> %llx\n", base, size);
lmb_reserve(base, size);
}
@@ -1292,18 +1336,26 @@ void __init early_init_devtree(void *params)
lmb_init();
of_scan_flat_dt(early_init_dt_scan_root, NULL);
of_scan_flat_dt(early_init_dt_scan_memory, NULL);
- lmb_enforce_memory_limit(memory_limit);
- lmb_analyze();
- DBG("Phys. mem: %lx\n", lmb_phys_mem_size());
+ /* Save command line for /proc/cmdline and then parse parameters */
+ strlcpy(saved_command_line, cmd_line, COMMAND_LINE_SIZE);
+ parse_early_param();
/* Reserve LMB regions used by kernel, initrd, dt, etc... */
lmb_reserve(PHYSICAL_START, __pa(klimit) - PHYSICAL_START);
-#ifdef CONFIG_CRASH_DUMP
- lmb_reserve(0, KDUMP_RESERVE_LIMIT);
-#endif
+ reserve_kdump_trampoline();
+ reserve_crashkernel();
early_reserve_mem();
+ lmb_enforce_memory_limit(memory_limit);
+ lmb_analyze();
+
+ DBG("Phys. mem: %lx\n", lmb_phys_mem_size());
+
+ /* We may need to relocate the flat tree, do it now.
+ * FIXME .. and the initrd too? */
+ move_device_tree();
+
DBG("Scanning CPUs ...\n");
/* Retreive CPU related informations from the flat tree
@@ -2053,29 +2105,46 @@ int prom_update_property(struct device_node *np,
return 0;
}
-#ifdef CONFIG_KEXEC
-/* We may have allocated the flat device tree inside the crash kernel region
- * in prom_init. If so we need to move it out into regular memory. */
-void kdump_move_device_tree(void)
-{
- unsigned long start, end;
- struct boot_param_header *new;
-
- start = __pa((unsigned long)initial_boot_params);
- end = start + initial_boot_params->totalsize;
-
- if (end < crashk_res.start || start > crashk_res.end)
- return;
- new = (struct boot_param_header*)
- __va(lmb_alloc(initial_boot_params->totalsize, PAGE_SIZE));
-
- memcpy(new, initial_boot_params, initial_boot_params->totalsize);
+/* Find the device node for a given logical cpu number, also returns the cpu
+ * local thread number (index in ibm,interrupt-server#s) if relevant and
+ * asked for (non NULL)
+ */
+struct device_node *of_get_cpu_node(int cpu, unsigned int *thread)
+{
+ int hardid;
+ struct device_node *np;
- initial_boot_params = new;
+ hardid = get_hard_smp_processor_id(cpu);
- DBG("Flat device tree blob moved to %p\n", initial_boot_params);
+ for_each_node_by_type(np, "cpu") {
+ u32 *intserv;
+ unsigned int plen, t;
- /* XXX should we unreserve the old DT? */
+ /* Check for ibm,ppc-interrupt-server#s. If it doesn't exist
+ * fallback to "reg" property and assume no threads
+ */
+ intserv = (u32 *)get_property(np, "ibm,ppc-interrupt-server#s",
+ &plen);
+ if (intserv == NULL) {
+ u32 *reg = (u32 *)get_property(np, "reg", NULL);
+ if (reg == NULL)
+ continue;
+ if (*reg == hardid) {
+ if (thread)
+ *thread = 0;
+ return np;
+ }
+ } else {
+ plen /= sizeof(u32);
+ for (t = 0; t < plen; t++) {
+ if (hardid == intserv[t]) {
+ if (thread)
+ *thread = t;
+ return np;
+ }
+ }
+ }
+ }
+ return NULL;
}
-#endif /* CONFIG_KEXEC */
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index f70bd090dacd..8c28eb0cbdac 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -194,19 +194,12 @@ static int __initdata of_platform;
static char __initdata prom_cmd_line[COMMAND_LINE_SIZE];
-static unsigned long __initdata prom_memory_limit;
-
static unsigned long __initdata alloc_top;
static unsigned long __initdata alloc_top_high;
static unsigned long __initdata alloc_bottom;
static unsigned long __initdata rmo_top;
static unsigned long __initdata ram_top;
-#ifdef CONFIG_KEXEC
-static unsigned long __initdata prom_crashk_base;
-static unsigned long __initdata prom_crashk_size;
-#endif
-
static struct mem_map_entry __initdata mem_reserve_map[MEM_RESERVE_MAP_SIZE];
static int __initdata mem_reserve_cnt;
@@ -574,7 +567,7 @@ static void __init early_cmdline_parse(void)
if ((long)_prom->chosen > 0)
l = prom_getprop(_prom->chosen, "bootargs", p, COMMAND_LINE_SIZE-1);
#ifdef CONFIG_CMDLINE
- if (l == 0) /* dbl check */
+ if (l <= 0 || p[0] == '\0') /* dbl check */
strlcpy(RELOC(prom_cmd_line),
RELOC(CONFIG_CMDLINE), sizeof(prom_cmd_line));
#endif /* CONFIG_CMDLINE */
@@ -593,45 +586,6 @@ static void __init early_cmdline_parse(void)
RELOC(iommu_force_on) = 1;
}
#endif
-
- opt = strstr(RELOC(prom_cmd_line), RELOC("mem="));
- if (opt) {
- opt += 4;
- RELOC(prom_memory_limit) = prom_memparse(opt, (const char **)&opt);
-#ifdef CONFIG_PPC64
- /* Align to 16 MB == size of ppc64 large page */
- RELOC(prom_memory_limit) = ALIGN(RELOC(prom_memory_limit), 0x1000000);
-#endif
- }
-
-#ifdef CONFIG_KEXEC
- /*
- * crashkernel=size@addr specifies the location to reserve for
- * crash kernel.
- */
- opt = strstr(RELOC(prom_cmd_line), RELOC("crashkernel="));
- if (opt) {
- opt += 12;
- RELOC(prom_crashk_size) =
- prom_memparse(opt, (const char **)&opt);
-
- if (ALIGN(RELOC(prom_crashk_size), 0x1000000) !=
- RELOC(prom_crashk_size)) {
- prom_printf("Warning: crashkernel size is not "
- "aligned to 16MB\n");
- }
-
- /*
- * At present, the crash kernel always run at 32MB.
- * Just ignore whatever user passed.
- */
- RELOC(prom_crashk_base) = 0x2000000;
- if (*opt == '@') {
- prom_printf("Warning: PPC64 kdump kernel always runs "
- "at 32 MB\n");
- }
- }
-#endif
}
#ifdef CONFIG_PPC_PSERIES
@@ -1116,29 +1070,6 @@ static void __init prom_init_mem(void)
}
/*
- * If prom_memory_limit is set we reduce the upper limits *except* for
- * alloc_top_high. This must be the real top of RAM so we can put
- * TCE's up there.
- */
-
- RELOC(alloc_top_high) = RELOC(ram_top);
-
- if (RELOC(prom_memory_limit)) {
- if (RELOC(prom_memory_limit) <= RELOC(alloc_bottom)) {
- prom_printf("Ignoring mem=%x <= alloc_bottom.\n",
- RELOC(prom_memory_limit));
- RELOC(prom_memory_limit) = 0;
- } else if (RELOC(prom_memory_limit) >= RELOC(ram_top)) {
- prom_printf("Ignoring mem=%x >= ram_top.\n",
- RELOC(prom_memory_limit));
- RELOC(prom_memory_limit) = 0;
- } else {
- RELOC(ram_top) = RELOC(prom_memory_limit);
- RELOC(rmo_top) = min(RELOC(rmo_top), RELOC(prom_memory_limit));
- }
- }
-
- /*
* Setup our top alloc point, that is top of RMO or top of
* segment 0 when running non-LPAR.
* Some RS64 machines have buggy firmware where claims up at
@@ -1150,20 +1081,14 @@ static void __init prom_init_mem(void)
RELOC(rmo_top) = RELOC(ram_top);
RELOC(rmo_top) = min(0x30000000ul, RELOC(rmo_top));
RELOC(alloc_top) = RELOC(rmo_top);
+ RELOC(alloc_top_high) = RELOC(ram_top);
prom_printf("memory layout at init:\n");
- prom_printf(" memory_limit : %x (16 MB aligned)\n", RELOC(prom_memory_limit));
prom_printf(" alloc_bottom : %x\n", RELOC(alloc_bottom));
prom_printf(" alloc_top : %x\n", RELOC(alloc_top));
prom_printf(" alloc_top_hi : %x\n", RELOC(alloc_top_high));
prom_printf(" rmo_top : %x\n", RELOC(rmo_top));
prom_printf(" ram_top : %x\n", RELOC(ram_top));
-#ifdef CONFIG_KEXEC
- if (RELOC(prom_crashk_base)) {
- prom_printf(" crashk_base : %x\n", RELOC(prom_crashk_base));
- prom_printf(" crashk_size : %x\n", RELOC(prom_crashk_size));
- }
-#endif
}
@@ -1349,16 +1274,10 @@ static void __init prom_initialize_tce_table(void)
reserve_mem(local_alloc_bottom, local_alloc_top - local_alloc_bottom);
- if (RELOC(prom_memory_limit)) {
- /*
- * We align the start to a 16MB boundary so we can map
- * the TCE area using large pages if possible.
- * The end should be the top of RAM so no need to align it.
- */
- RELOC(prom_tce_alloc_start) = _ALIGN_DOWN(local_alloc_bottom,
- 0x1000000);
- RELOC(prom_tce_alloc_end) = local_alloc_top;
- }
+ /* These are only really needed if there is a memory limit in
+ * effect, but we don't know so export them always. */
+ RELOC(prom_tce_alloc_start) = local_alloc_bottom;
+ RELOC(prom_tce_alloc_end) = local_alloc_top;
/* Flag the first invalid entry */
prom_debug("ending prom_initialize_tce_table\n");
@@ -2041,11 +1960,7 @@ static void __init flatten_device_tree(void)
/* Version 16 is not backward compatible */
hdr->last_comp_version = 0x10;
- /* Reserve the whole thing and copy the reserve map in, we
- * also bump mem_reserve_cnt to cause further reservations to
- * fail since it's too late.
- */
- reserve_mem(RELOC(dt_header_start), hdr->totalsize);
+ /* Copy the reserve map in */
memcpy(rsvmap, RELOC(mem_reserve_map), sizeof(mem_reserve_map));
#ifdef DEBUG_PROM
@@ -2058,6 +1973,9 @@ static void __init flatten_device_tree(void)
RELOC(mem_reserve_map)[i].size);
}
#endif
+ /* Bump mem_reserve_cnt to cause further reservations to fail
+ * since it's too late.
+ */
RELOC(mem_reserve_cnt) = MEM_RESERVE_MAP_SIZE;
prom_printf("Device tree strings 0x%x -> 0x%x\n",
@@ -2280,10 +2198,6 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
*/
prom_init_mem();
-#ifdef CONFIG_KEXEC
- if (RELOC(prom_crashk_base))
- reserve_mem(RELOC(prom_crashk_base), RELOC(prom_crashk_size));
-#endif
/*
* Determine which cpu is actually running right _now_
*/
@@ -2317,10 +2231,6 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
/*
* Fill in some infos for use by the kernel later on
*/
- if (RELOC(prom_memory_limit))
- prom_setprop(_prom->chosen, "/chosen", "linux,memory-limit",
- &RELOC(prom_memory_limit),
- sizeof(prom_memory_limit));
#ifdef CONFIG_PPC64
if (RELOC(ppc64_iommu_off))
prom_setprop(_prom->chosen, "/chosen", "linux,iommu-off",
@@ -2340,16 +2250,6 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
}
#endif
-#ifdef CONFIG_KEXEC
- if (RELOC(prom_crashk_base)) {
- prom_setprop(_prom->chosen, "/chosen", "linux,crashkernel-base",
- PTRRELOC(&prom_crashk_base),
- sizeof(RELOC(prom_crashk_base)));
- prom_setprop(_prom->chosen, "/chosen", "linux,crashkernel-size",
- PTRRELOC(&prom_crashk_size),
- sizeof(RELOC(prom_crashk_size)));
- }
-#endif
/*
* Fixup any known bugs in the device-tree
*/
diff --git a/arch/powerpc/kernel/prom_parse.c b/arch/powerpc/kernel/prom_parse.c
index 3934c227549b..45df420383cc 100644
--- a/arch/powerpc/kernel/prom_parse.c
+++ b/arch/powerpc/kernel/prom_parse.c
@@ -548,3 +548,28 @@ int of_pci_address_to_resource(struct device_node *dev, int bar,
return __of_address_to_resource(dev, addrp, size, flags, r);
}
EXPORT_SYMBOL_GPL(of_pci_address_to_resource);
+
+void of_parse_dma_window(struct device_node *dn, unsigned char *dma_window_prop,
+ unsigned long *busno, unsigned long *phys, unsigned long *size)
+{
+ u32 *dma_window, cells;
+ unsigned char *prop;
+
+ dma_window = (u32 *)dma_window_prop;
+
+ /* busno is always one cell */
+ *busno = *(dma_window++);
+
+ prop = get_property(dn, "ibm,#dma-address-cells", NULL);
+ if (!prop)
+ prop = get_property(dn, "#address-cells", NULL);
+
+ cells = prop ? *(u32 *)prop : prom_n_addr_cells(dn);
+ *phys = of_read_addr(dma_window, cells);
+
+ dma_window += cells;
+
+ prop = get_property(dn, "ibm,#dma-size-cells", NULL);
+ cells = prop ? *(u32 *)prop : prom_n_size_cells(dn);
+ *size = of_read_addr(dma_window, cells);
+}
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index 4a677d1bd4ef..5563e2e7d89c 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -404,7 +404,6 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
ret = ptrace_detach(child, data);
break;
-#ifdef CONFIG_PPC64
case PPC_PTRACE_GETREGS: { /* Get GPRs 0 - 31. */
int i;
unsigned long *reg = &((unsigned long *)child->thread.regs)[0];
@@ -468,7 +467,6 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
}
break;
}
-#endif /* CONFIG_PPC64 */
#ifdef CONFIG_ALTIVEC
case PTRACE_GETVRREGS:
diff --git a/arch/powerpc/kernel/rtas-rtc.c b/arch/powerpc/kernel/rtas-rtc.c
index 34d073fb6091..77578c093dda 100644
--- a/arch/powerpc/kernel/rtas-rtc.c
+++ b/arch/powerpc/kernel/rtas-rtc.c
@@ -14,19 +14,20 @@
unsigned long __init rtas_get_boot_time(void)
{
int ret[8];
- int error, wait_time;
+ int error;
+ unsigned int wait_time;
u64 max_wait_tb;
max_wait_tb = get_tb() + tb_ticks_per_usec * 1000 * MAX_RTC_WAIT;
do {
error = rtas_call(rtas_token("get-time-of-day"), 0, 8, ret);
- if (error == RTAS_CLOCK_BUSY || rtas_is_extended_busy(error)) {
- wait_time = rtas_extended_busy_delay_time(error);
+
+ wait_time = rtas_busy_delay_time(error);
+ if (wait_time) {
/* This is boot time so we spin. */
udelay(wait_time*1000);
- error = RTAS_CLOCK_BUSY;
}
- } while (error == RTAS_CLOCK_BUSY && (get_tb() < max_wait_tb));
+ } while (wait_time && (get_tb() < max_wait_tb));
if (error != 0 && printk_ratelimit()) {
printk(KERN_WARNING "error: reading the clock failed (%d)\n",
@@ -44,24 +45,25 @@ unsigned long __init rtas_get_boot_time(void)
void rtas_get_rtc_time(struct rtc_time *rtc_tm)
{
int ret[8];
- int error, wait_time;
+ int error;
+ unsigned int wait_time;
u64 max_wait_tb;
max_wait_tb = get_tb() + tb_ticks_per_usec * 1000 * MAX_RTC_WAIT;
do {
error = rtas_call(rtas_token("get-time-of-day"), 0, 8, ret);
- if (error == RTAS_CLOCK_BUSY || rtas_is_extended_busy(error)) {
+
+ wait_time = rtas_busy_delay_time(error);
+ if (wait_time) {
if (in_interrupt() && printk_ratelimit()) {
memset(rtc_tm, 0, sizeof(struct rtc_time));
printk(KERN_WARNING "error: reading clock"
" would delay interrupt\n");
return; /* delay not allowed */
}
- wait_time = rtas_extended_busy_delay_time(error);
msleep(wait_time);
- error = RTAS_CLOCK_BUSY;
}
- } while (error == RTAS_CLOCK_BUSY && (get_tb() < max_wait_tb));
+ } while (wait_time && (get_tb() < max_wait_tb));
if (error != 0 && printk_ratelimit()) {
printk(KERN_WARNING "error: reading the clock failed (%d)\n",
@@ -88,14 +90,14 @@ int rtas_set_rtc_time(struct rtc_time *tm)
tm->tm_year + 1900, tm->tm_mon + 1,
tm->tm_mday, tm->tm_hour, tm->tm_min,
tm->tm_sec, 0);
- if (error == RTAS_CLOCK_BUSY || rtas_is_extended_busy(error)) {
+
+ wait_time = rtas_busy_delay_time(error);
+ if (wait_time) {
if (in_interrupt())
return 1; /* probably decrementer */
- wait_time = rtas_extended_busy_delay_time(error);
msleep(wait_time);
- error = RTAS_CLOCK_BUSY;
}
- } while (error == RTAS_CLOCK_BUSY && (get_tb() < max_wait_tb));
+ } while (wait_time && (get_tb() < max_wait_tb));
if (error != 0 && printk_ratelimit())
printk(KERN_WARNING "error: setting the clock failed (%d)\n",
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 0112318213ab..17dc79198515 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -370,24 +370,36 @@ int rtas_call(int token, int nargs, int nret, int *outputs, ...)
return ret;
}
-/* Given an RTAS status code of 990n compute the hinted delay of 10^n
- * (last digit) milliseconds. For now we bound at n=5 (100 sec).
+/* For RTAS_BUSY (-2), delay for 1 millisecond. For an extended busy status
+ * code of 990n, perform the hinted delay of 10^n (last digit) milliseconds.
*/
-unsigned int rtas_extended_busy_delay_time(int status)
+unsigned int rtas_busy_delay_time(int status)
{
- int order = status - 9900;
- unsigned long ms;
+ int order;
+ unsigned int ms = 0;
+
+ if (status == RTAS_BUSY) {
+ ms = 1;
+ } else if (status >= 9900 && status <= 9905) {
+ order = status - 9900;
+ for (ms = 1; order > 0; order--)
+ ms *= 10;
+ }
- if (order < 0)
- order = 0; /* RTC depends on this for -2 clock busy */
- else if (order > 5)
- order = 5; /* bound */
+ return ms;
+}
- /* Use microseconds for reasonable accuracy */
- for (ms = 1; order > 0; order--)
- ms *= 10;
+/* For an RTAS busy status code, perform the hinted delay. */
+unsigned int rtas_busy_delay(int status)
+{
+ unsigned int ms;
- return ms;
+ might_sleep();
+ ms = rtas_busy_delay_time(status);
+ if (ms)
+ msleep(ms);
+
+ return ms;
}
int rtas_error_rc(int rtas_rc)
@@ -438,22 +450,14 @@ int rtas_get_power_level(int powerdomain, int *level)
int rtas_set_power_level(int powerdomain, int level, int *setlevel)
{
int token = rtas_token("set-power-level");
- unsigned int wait_time;
int rc;
if (token == RTAS_UNKNOWN_SERVICE)
return -ENOENT;
- while (1) {
+ do {
rc = rtas_call(token, 2, 2, setlevel, powerdomain, level);
- if (rc == RTAS_BUSY)
- udelay(1);
- else if (rtas_is_extended_busy(rc)) {
- wait_time = rtas_extended_busy_delay_time(rc);
- udelay(wait_time * 1000);
- } else
- break;
- }
+ } while (rtas_busy_delay(rc));
if (rc < 0)
return rtas_error_rc(rc);
@@ -463,22 +467,14 @@ int rtas_set_power_level(int powerdomain, int level, int *setlevel)
int rtas_get_sensor(int sensor, int index, int *state)
{
int token = rtas_token("get-sensor-state");
- unsigned int wait_time;
int rc;
if (token == RTAS_UNKNOWN_SERVICE)
return -ENOENT;
- while (1) {
+ do {
rc = rtas_call(token, 2, 2, state, sensor, index);
- if (rc == RTAS_BUSY)
- udelay(1);
- else if (rtas_is_extended_busy(rc)) {
- wait_time = rtas_extended_busy_delay_time(rc);
- udelay(wait_time * 1000);
- } else
- break;
- }
+ } while (rtas_busy_delay(rc));
if (rc < 0)
return rtas_error_rc(rc);
@@ -488,23 +484,14 @@ int rtas_get_sensor(int sensor, int index, int *state)
int rtas_set_indicator(int indicator, int index, int new_value)
{
int token = rtas_token("set-indicator");
- unsigned int wait_time;
int rc;
if (token == RTAS_UNKNOWN_SERVICE)
return -ENOENT;
- while (1) {
+ do {
rc = rtas_call(token, 3, 1, NULL, indicator, index, new_value);
- if (rc == RTAS_BUSY)
- udelay(1);
- else if (rtas_is_extended_busy(rc)) {
- wait_time = rtas_extended_busy_delay_time(rc);
- udelay(wait_time * 1000);
- }
- else
- break;
- }
+ } while (rtas_busy_delay(rc));
if (rc < 0)
return rtas_error_rc(rc);
@@ -555,13 +542,11 @@ void rtas_os_term(char *str)
do {
status = rtas_call(rtas_token("ibm,os-term"), 1, 1, NULL,
__pa(rtas_os_term_buf));
+ } while (rtas_busy_delay(status));
- if (status == RTAS_BUSY)
- udelay(1);
- else if (status != 0)
- printk(KERN_EMERG "ibm,os-term call failed %d\n",
+ if (status != 0)
+ printk(KERN_EMERG "ibm,os-term call failed %d\n",
status);
- } while (status == RTAS_BUSY);
}
static int ibm_suspend_me_token = RTAS_UNKNOWN_SERVICE;
@@ -608,9 +593,31 @@ out:
static int rtas_ibm_suspend_me(struct rtas_args *args)
{
int i;
+ long state;
+ long rc;
+ unsigned long dummy;
struct rtas_suspend_me_data data;
+ /* Make sure the state is valid */
+ rc = plpar_hcall(H_VASI_STATE,
+ ((u64)args->args[0] << 32) | args->args[1],
+ 0, 0, 0,
+ &state, &dummy, &dummy);
+
+ if (rc) {
+ printk(KERN_ERR "rtas_ibm_suspend_me: vasi_state returned %ld\n",rc);
+ return rc;
+ } else if (state == H_VASI_ENABLED) {
+ args->args[args->nargs] = RTAS_NOT_SUSPENDABLE;
+ return 0;
+ } else if (state != H_VASI_SUSPENDING) {
+ printk(KERN_ERR "rtas_ibm_suspend_me: vasi_state returned state %ld\n",
+ state);
+ args->args[args->nargs] = -1;
+ return 0;
+ }
+
data.waiting = 1;
data.args = args;
@@ -789,7 +796,8 @@ EXPORT_SYMBOL(rtas_token);
EXPORT_SYMBOL(rtas_call);
EXPORT_SYMBOL(rtas_data_buf);
EXPORT_SYMBOL(rtas_data_buf_lock);
-EXPORT_SYMBOL(rtas_extended_busy_delay_time);
+EXPORT_SYMBOL(rtas_busy_delay_time);
+EXPORT_SYMBOL(rtas_busy_delay);
EXPORT_SYMBOL(rtas_get_sensor);
EXPORT_SYMBOL(rtas_get_power_level);
EXPORT_SYMBOL(rtas_set_power_level);
diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c
index aaf384c3f04a..1442b63a75da 100644
--- a/arch/powerpc/kernel/rtas_flash.c
+++ b/arch/powerpc/kernel/rtas_flash.c
@@ -365,20 +365,12 @@ static int rtas_excl_release(struct inode *inode, struct file *file)
static void manage_flash(struct rtas_manage_flash_t *args_buf)
{
- unsigned int wait_time;
s32 rc;
- while (1) {
+ do {
rc = rtas_call(rtas_token("ibm,manage-flash-image"), 1,
1, NULL, args_buf->op);
- if (rc == RTAS_RC_BUSY)
- udelay(1);
- else if (rtas_is_extended_busy(rc)) {
- wait_time = rtas_extended_busy_delay_time(rc);
- udelay(wait_time * 1000);
- } else
- break;
- }
+ } while (rtas_busy_delay(rc));
args_buf->status = rc;
}
@@ -451,27 +443,18 @@ static ssize_t manage_flash_write(struct file *file, const char __user *buf,
static void validate_flash(struct rtas_validate_flash_t *args_buf)
{
int token = rtas_token("ibm,validate-flash-image");
- unsigned int wait_time;
int update_results;
s32 rc;
rc = 0;
- while(1) {
+ do {
spin_lock(&rtas_data_buf_lock);
memcpy(rtas_data_buf, args_buf->buf, VALIDATE_BUF_SIZE);
rc = rtas_call(token, 2, 2, &update_results,
(u32) __pa(rtas_data_buf), args_buf->buf_size);
memcpy(args_buf->buf, rtas_data_buf, VALIDATE_BUF_SIZE);
spin_unlock(&rtas_data_buf_lock);
-
- if (rc == RTAS_RC_BUSY)
- udelay(1);
- else if (rtas_is_extended_busy(rc)) {
- wait_time = rtas_extended_busy_delay_time(rc);
- udelay(wait_time * 1000);
- } else
- break;
- }
+ } while (rtas_busy_delay(rc));
args_buf->status = rc;
args_buf->update_results = update_results;
diff --git a/arch/powerpc/kernel/rtas_pci.c b/arch/powerpc/kernel/rtas_pci.c
index 57b539a03fa9..6eb7e49b394a 100644
--- a/arch/powerpc/kernel/rtas_pci.c
+++ b/arch/powerpc/kernel/rtas_pci.c
@@ -313,7 +313,9 @@ unsigned long __init find_and_init_phbs(void)
for (node = of_get_next_child(root, NULL);
node != NULL;
node = of_get_next_child(root, node)) {
- if (node->type == NULL || strcmp(node->type, "pci") != 0)
+
+ if (node->type == NULL || (strcmp(node->type, "pci") != 0 &&
+ strcmp(node->type, "pciex") != 0))
continue;
phb = pcibios_alloc_controller(node);
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 684ab1d49c65..bd328123af75 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -443,6 +443,7 @@ void __init smp_setup_cpu_maps(void)
}
#endif /* CONFIG_SMP */
+int __initdata do_early_xmon;
#ifdef CONFIG_XMON
static int __init early_xmon(char *p)
{
@@ -456,7 +457,7 @@ static int __init early_xmon(char *p)
return 0;
}
xmon_init(1);
- debugger(NULL);
+ do_early_xmon = 1;
return 0;
}
@@ -524,3 +525,20 @@ int check_legacy_ioport(unsigned long base_port)
return ppc_md.check_legacy_ioport(base_port);
}
EXPORT_SYMBOL(check_legacy_ioport);
+
+static int ppc_panic_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ ppc_md.panic(ptr); /* May not return */
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block ppc_panic_block = {
+ .notifier_call = ppc_panic_event,
+ .priority = INT_MIN /* may not return; must be done last */
+};
+
+void __init setup_panic(void)
+{
+ atomic_notifier_chain_register(&panic_notifier_list, &ppc_panic_block);
+}
diff --git a/arch/powerpc/kernel/setup.h b/arch/powerpc/kernel/setup.h
index 2ebba755272e..4c67ad7fae08 100644
--- a/arch/powerpc/kernel/setup.h
+++ b/arch/powerpc/kernel/setup.h
@@ -2,5 +2,8 @@
#define _POWERPC_KERNEL_SETUP_H
void check_for_initrd(void);
+void do_init_bootmem(void);
+void setup_panic(void);
+extern int do_early_xmon;
#endif /* _POWERPC_KERNEL_SETUP_H */
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index 69ac25701344..e5a44812441a 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -131,12 +131,6 @@ void __init machine_init(unsigned long dt_ptr, unsigned long phys)
/* Do some early initialization based on the flat device tree */
early_init_devtree(__va(dt_ptr));
- /* Check default command line */
-#ifdef CONFIG_CMDLINE
- if (cmd_line[0] == 0)
- strlcpy(cmd_line, CONFIG_CMDLINE, sizeof(cmd_line));
-#endif /* CONFIG_CMDLINE */
-
probe_machine();
#ifdef CONFIG_6xx
@@ -235,7 +229,7 @@ arch_initcall(ppc_init);
/* Warning, IO base is not yet inited */
void __init setup_arch(char **cmdline_p)
{
- extern void do_init_bootmem(void);
+ *cmdline_p = cmd_line;
/* so udelay does something sensible, assume <= 1000 bogomips */
loops_per_jiffy = 500000000 / HZ;
@@ -285,16 +279,16 @@ void __init setup_arch(char **cmdline_p)
/* reboot on panic */
panic_timeout = 180;
+ if (ppc_md.panic)
+ setup_panic();
+
init_mm.start_code = PAGE_OFFSET;
init_mm.end_code = (unsigned long) _etext;
init_mm.end_data = (unsigned long) _edata;
init_mm.brk = klimit;
- /* Save unparsed command line copy for /proc/cmdline */
- strlcpy(saved_command_line, cmd_line, COMMAND_LINE_SIZE);
- *cmdline_p = cmd_line;
-
- parse_early_param();
+ if (do_early_xmon)
+ debugger(NULL);
/* set up the bootmem stuff with available memory */
do_init_bootmem();
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 4467c49903b6..78f3a5fd43f6 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -100,12 +100,6 @@ unsigned long SYSRQ_KEY;
#endif /* CONFIG_MAGIC_SYSRQ */
-static int ppc64_panic_event(struct notifier_block *, unsigned long, void *);
-static struct notifier_block ppc64_panic_block = {
- .notifier_call = ppc64_panic_event,
- .priority = INT_MIN /* may not return; must be done last */
-};
-
#ifdef CONFIG_SMP
static int smt_enabled_cmdline;
@@ -199,9 +193,7 @@ void __init early_setup(unsigned long dt_ptr)
/* Probe the machine type */
probe_machine();
-#ifdef CONFIG_CRASH_DUMP
- kdump_setup();
-#endif
+ setup_kdump_trampoline();
DBG("Found, Initializing memory management...\n");
@@ -353,9 +345,6 @@ void __init setup_system(void)
{
DBG(" -> setup_system()\n");
-#ifdef CONFIG_KEXEC
- kdump_move_device_tree();
-#endif
/*
* Unflatten the device-tree passed by prom_init or kexec
*/
@@ -420,10 +409,8 @@ void __init setup_system(void)
*/
register_early_udbg_console();
- /* Save unparsed command line copy for /proc/cmdline */
- strlcpy(saved_command_line, cmd_line, COMMAND_LINE_SIZE);
-
- parse_early_param();
+ if (do_early_xmon)
+ debugger(NULL);
check_smt_enabled();
smp_setup_cpu_maps();
@@ -456,13 +443,6 @@ void __init setup_system(void)
DBG(" <- setup_system()\n");
}
-static int ppc64_panic_event(struct notifier_block *this,
- unsigned long event, void *ptr)
-{
- ppc_md.panic((char *)ptr); /* May not return */
- return NOTIFY_DONE;
-}
-
#ifdef CONFIG_IRQSTACKS
static void __init irqstack_early_init(void)
{
@@ -517,8 +497,6 @@ static void __init emergency_stack_init(void)
*/
void __init setup_arch(char **cmdline_p)
{
- extern void do_init_bootmem(void);
-
ppc64_boot_msg(0x12, "Setup Arch");
*cmdline_p = cmd_line;
@@ -535,8 +513,7 @@ void __init setup_arch(char **cmdline_p)
panic_timeout = 180;
if (ppc_md.panic)
- atomic_notifier_chain_register(&panic_notifier_list,
- &ppc64_panic_block);
+ setup_panic();
init_mm.start_code = PAGE_OFFSET;
init_mm.end_code = (unsigned long) _etext;
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 8fdeca2d4597..d73b25e22fca 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -419,9 +419,7 @@ static long restore_user_regs(struct pt_regs *regs,
{
long err;
unsigned int save_r2 = 0;
-#if defined(CONFIG_ALTIVEC) || defined(CONFIG_SPE)
unsigned long msr;
-#endif
/*
* restore general registers but not including MSR or SOFTE. Also
@@ -430,11 +428,16 @@ static long restore_user_regs(struct pt_regs *regs,
if (!sig)
save_r2 = (unsigned int)regs->gpr[2];
err = restore_general_regs(regs, sr);
+ err |= __get_user(msr, &sr->mc_gregs[PT_MSR]);
if (!sig)
regs->gpr[2] = (unsigned long) save_r2;
if (err)
return 1;
+ /* if doing signal return, restore the previous little-endian mode */
+ if (sig)
+ regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
+
/*
* Do this before updating the thread state in
* current->thread.fpr/vr/evr. That way, if we get preempted
@@ -455,7 +458,7 @@ static long restore_user_regs(struct pt_regs *regs,
/* force the process to reload the altivec registers from
current->thread when it next does altivec instructions */
regs->msr &= ~MSR_VEC;
- if (!__get_user(msr, &sr->mc_gregs[PT_MSR]) && (msr & MSR_VEC) != 0) {
+ if (msr & MSR_VEC) {
/* restore altivec registers from the stack */
if (__copy_from_user(current->thread.vr, &sr->mc_vregs,
sizeof(sr->mc_vregs)))
@@ -472,7 +475,7 @@ static long restore_user_regs(struct pt_regs *regs,
/* force the process to reload the spe registers from
current->thread when it next does spe instructions */
regs->msr &= ~MSR_SPE;
- if (!__get_user(msr, &sr->mc_gregs[PT_MSR]) && (msr & MSR_SPE) != 0) {
+ if (msr & MSR_SPE) {
/* restore spe registers from the stack */
if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
ELF_NEVRREG * sizeof(u32)))
@@ -757,10 +760,10 @@ static int handle_rt_signal(unsigned long sig, struct k_sigaction *ka,
/* Save user registers on the stack */
frame = &rt_sf->uc.uc_mcontext;
- if (vdso32_rt_sigtramp && current->thread.vdso_base) {
+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
if (save_user_regs(regs, frame, 0))
goto badframe;
- regs->link = current->thread.vdso_base + vdso32_rt_sigtramp;
+ regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
} else {
if (save_user_regs(regs, frame, __NR_rt_sigreturn))
goto badframe;
@@ -777,6 +780,8 @@ static int handle_rt_signal(unsigned long sig, struct k_sigaction *ka,
regs->gpr[5] = (unsigned long) &rt_sf->uc;
regs->gpr[6] = (unsigned long) rt_sf;
regs->nip = (unsigned long) ka->sa.sa_handler;
+ /* enter the signal handler in big-endian mode */
+ regs->msr &= ~MSR_LE;
regs->trap = 0;
return 1;
@@ -1038,10 +1043,10 @@ static int handle_signal(unsigned long sig, struct k_sigaction *ka,
|| __put_user(sig, &sc->signal))
goto badframe;
- if (vdso32_sigtramp && current->thread.vdso_base) {
+ if (vdso32_sigtramp && current->mm->context.vdso_base) {
if (save_user_regs(regs, &frame->mctx, 0))
goto badframe;
- regs->link = current->thread.vdso_base + vdso32_sigtramp;
+ regs->link = current->mm->context.vdso_base + vdso32_sigtramp;
} else {
if (save_user_regs(regs, &frame->mctx, __NR_sigreturn))
goto badframe;
@@ -1056,6 +1061,8 @@ static int handle_signal(unsigned long sig, struct k_sigaction *ka,
regs->gpr[3] = sig;
regs->gpr[4] = (unsigned long) sc;
regs->nip = (unsigned long) ka->sa.sa_handler;
+ /* enter the signal handler in big-endian mode */
+ regs->msr &= ~MSR_LE;
regs->trap = 0;
return 1;
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index c2db642f4cdd..6e75d7ab6d4d 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -141,9 +141,7 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig,
unsigned long err = 0;
unsigned long save_r13 = 0;
elf_greg_t *gregs = (elf_greg_t *)regs;
-#ifdef CONFIG_ALTIVEC
unsigned long msr;
-#endif
int i;
/* If this is not a signal return, we preserve the TLS in r13 */
@@ -154,7 +152,12 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig,
err |= __copy_from_user(regs, &sc->gp_regs,
PT_MSR*sizeof(unsigned long));
- /* skip MSR and SOFTE */
+ /* get MSR separately, transfer the LE bit if doing signal return */
+ err |= __get_user(msr, &sc->gp_regs[PT_MSR]);
+ if (sig)
+ regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
+
+ /* skip SOFTE */
for (i = PT_MSR+1; i <= PT_RESULT; i++) {
if (i == PT_SOFTE)
continue;
@@ -179,7 +182,6 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig,
#ifdef CONFIG_ALTIVEC
err |= __get_user(v_regs, &sc->v_regs);
- err |= __get_user(msr, &sc->gp_regs[PT_MSR]);
if (err)
return err;
if (v_regs && !access_ok(VERIFY_READ, v_regs, 34 * sizeof(vector128)))
@@ -396,8 +398,8 @@ static int setup_rt_frame(int signr, struct k_sigaction *ka, siginfo_t *info,
current->thread.fpscr.val = 0;
/* Set up to return from userspace. */
- if (vdso64_rt_sigtramp && current->thread.vdso_base) {
- regs->link = current->thread.vdso_base + vdso64_rt_sigtramp;
+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
+ regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
} else {
err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
if (err)
@@ -412,6 +414,8 @@ static int setup_rt_frame(int signr, struct k_sigaction *ka, siginfo_t *info,
/* Set up "regs" so we "return" to the signal handler. */
err |= get_user(regs->nip, &funct_desc_ptr->entry);
+ /* enter the signal handler in big-endian mode */
+ regs->msr &= ~MSR_LE;
regs->gpr[1] = newsp;
err |= get_user(regs->gpr[2], &funct_desc_ptr->toc);
regs->gpr[3] = signr;
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 530f7dba0bd2..c5d179d4f818 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -492,7 +492,7 @@ int __devinit __cpu_up(unsigned int cpu)
* -- Cort
*/
if (system_state < SYSTEM_RUNNING)
- for (c = 5000; c && !cpu_callin_map[cpu]; c--)
+ for (c = 50000; c && !cpu_callin_map[cpu]; c--)
udelay(100);
#ifdef CONFIG_HOTPLUG_CPU
else
diff --git a/arch/powerpc/kernel/systbl.S b/arch/powerpc/kernel/systbl.S
index 26ed1f5ef16e..ee75ccf1a802 100644
--- a/arch/powerpc/kernel/systbl.S
+++ b/arch/powerpc/kernel/systbl.S
@@ -32,6 +32,10 @@
#define SYS32ONLY(func) .long sys_##func
#define SYSX(f, f3264, f32) .long f32
#endif
+#define SYSCALL_SPU(func) SYSCALL(func)
+#define COMPAT_SYS_SPU(func) COMPAT_SYS(func)
+#define PPC_SYS_SPU(func) PPC_SYS(func)
+#define SYSX_SPU(f, f3264, f32) SYSX(f, f3264, f32)
#ifdef CONFIG_PPC64
#define sys_sigpending sys_ni_syscall
@@ -39,309 +43,4 @@
#endif
_GLOBAL(sys_call_table)
-SYSCALL(restart_syscall)
-SYSCALL(exit)
-PPC_SYS(fork)
-SYSCALL(read)
-SYSCALL(write)
-COMPAT_SYS(open)
-SYSCALL(close)
-COMPAT_SYS(waitpid)
-COMPAT_SYS(creat)
-SYSCALL(link)
-SYSCALL(unlink)
-COMPAT_SYS(execve)
-SYSCALL(chdir)
-COMPAT_SYS(time)
-SYSCALL(mknod)
-SYSCALL(chmod)
-SYSCALL(lchown)
-SYSCALL(ni_syscall)
-OLDSYS(stat)
-SYSX(sys_lseek,ppc32_lseek,sys_lseek)
-SYSCALL(getpid)
-COMPAT_SYS(mount)
-SYSX(sys_ni_syscall,sys_oldumount,sys_oldumount)
-SYSCALL(setuid)
-SYSCALL(getuid)
-COMPAT_SYS(stime)
-COMPAT_SYS(ptrace)
-SYSCALL(alarm)
-OLDSYS(fstat)
-COMPAT_SYS(pause)
-COMPAT_SYS(utime)
-SYSCALL(ni_syscall)
-SYSCALL(ni_syscall)
-COMPAT_SYS(access)
-COMPAT_SYS(nice)
-SYSCALL(ni_syscall)
-SYSCALL(sync)
-COMPAT_SYS(kill)
-SYSCALL(rename)
-COMPAT_SYS(mkdir)
-SYSCALL(rmdir)
-SYSCALL(dup)
-SYSCALL(pipe)
-COMPAT_SYS(times)
-SYSCALL(ni_syscall)
-SYSCALL(brk)
-SYSCALL(setgid)
-SYSCALL(getgid)
-SYSCALL(signal)
-SYSCALL(geteuid)
-SYSCALL(getegid)
-SYSCALL(acct)
-SYSCALL(umount)
-SYSCALL(ni_syscall)
-COMPAT_SYS(ioctl)
-COMPAT_SYS(fcntl)
-SYSCALL(ni_syscall)
-COMPAT_SYS(setpgid)
-SYSCALL(ni_syscall)
-SYSX(sys_ni_syscall,sys_olduname, sys_olduname)
-COMPAT_SYS(umask)
-SYSCALL(chroot)
-SYSCALL(ustat)
-SYSCALL(dup2)
-SYSCALL(getppid)
-SYSCALL(getpgrp)
-SYSCALL(setsid)
-SYS32ONLY(sigaction)
-SYSCALL(sgetmask)
-COMPAT_SYS(ssetmask)
-SYSCALL(setreuid)
-SYSCALL(setregid)
-SYS32ONLY(sigsuspend)
-COMPAT_SYS(sigpending)
-COMPAT_SYS(sethostname)
-COMPAT_SYS(setrlimit)
-COMPAT_SYS(old_getrlimit)
-COMPAT_SYS(getrusage)
-COMPAT_SYS(gettimeofday)
-COMPAT_SYS(settimeofday)
-COMPAT_SYS(getgroups)
-COMPAT_SYS(setgroups)
-SYSX(sys_ni_syscall,sys_ni_syscall,ppc_select)
-SYSCALL(symlink)
-OLDSYS(lstat)
-COMPAT_SYS(readlink)
-SYSCALL(uselib)
-SYSCALL(swapon)
-SYSCALL(reboot)
-SYSX(sys_ni_syscall,old32_readdir,old_readdir)
-SYSCALL(mmap)
-SYSCALL(munmap)
-SYSCALL(truncate)
-SYSCALL(ftruncate)
-SYSCALL(fchmod)
-SYSCALL(fchown)
-COMPAT_SYS(getpriority)
-COMPAT_SYS(setpriority)
-SYSCALL(ni_syscall)
-COMPAT_SYS(statfs)
-COMPAT_SYS(fstatfs)
-SYSCALL(ni_syscall)
-COMPAT_SYS(socketcall)
-COMPAT_SYS(syslog)
-COMPAT_SYS(setitimer)
-COMPAT_SYS(getitimer)
-COMPAT_SYS(newstat)
-COMPAT_SYS(newlstat)
-COMPAT_SYS(newfstat)
-SYSX(sys_ni_syscall,sys_uname,sys_uname)
-SYSCALL(ni_syscall)
-SYSCALL(vhangup)
-SYSCALL(ni_syscall)
-SYSCALL(ni_syscall)
-COMPAT_SYS(wait4)
-SYSCALL(swapoff)
-COMPAT_SYS(sysinfo)
-COMPAT_SYS(ipc)
-SYSCALL(fsync)
-SYS32ONLY(sigreturn)
-PPC_SYS(clone)
-COMPAT_SYS(setdomainname)
-PPC_SYS(newuname)
-SYSCALL(ni_syscall)
-COMPAT_SYS(adjtimex)
-SYSCALL(mprotect)
-SYSX(sys_ni_syscall,compat_sys_sigprocmask,sys_sigprocmask)
-SYSCALL(ni_syscall)
-SYSCALL(init_module)
-SYSCALL(delete_module)
-SYSCALL(ni_syscall)
-SYSCALL(quotactl)
-COMPAT_SYS(getpgid)
-SYSCALL(fchdir)
-SYSCALL(bdflush)
-COMPAT_SYS(sysfs)
-SYSX(ppc64_personality,ppc64_personality,sys_personality)
-SYSCALL(ni_syscall)
-SYSCALL(setfsuid)
-SYSCALL(setfsgid)
-SYSCALL(llseek)
-COMPAT_SYS(getdents)
-SYSX(sys_select,ppc32_select,ppc_select)
-SYSCALL(flock)
-SYSCALL(msync)
-COMPAT_SYS(readv)
-COMPAT_SYS(writev)
-COMPAT_SYS(getsid)
-SYSCALL(fdatasync)
-COMPAT_SYS(sysctl)
-SYSCALL(mlock)
-SYSCALL(munlock)
-SYSCALL(mlockall)
-SYSCALL(munlockall)
-COMPAT_SYS(sched_setparam)
-COMPAT_SYS(sched_getparam)
-COMPAT_SYS(sched_setscheduler)
-COMPAT_SYS(sched_getscheduler)
-SYSCALL(sched_yield)
-COMPAT_SYS(sched_get_priority_max)
-COMPAT_SYS(sched_get_priority_min)
-COMPAT_SYS(sched_rr_get_interval)
-COMPAT_SYS(nanosleep)
-SYSCALL(mremap)
-SYSCALL(setresuid)
-SYSCALL(getresuid)
-SYSCALL(ni_syscall)
-SYSCALL(poll)
-COMPAT_SYS(nfsservctl)
-SYSCALL(setresgid)
-SYSCALL(getresgid)
-COMPAT_SYS(prctl)
-COMPAT_SYS(rt_sigreturn)
-COMPAT_SYS(rt_sigaction)
-COMPAT_SYS(rt_sigprocmask)
-COMPAT_SYS(rt_sigpending)
-COMPAT_SYS(rt_sigtimedwait)
-COMPAT_SYS(rt_sigqueueinfo)
-COMPAT_SYS(rt_sigsuspend)
-COMPAT_SYS(pread64)
-COMPAT_SYS(pwrite64)
-SYSCALL(chown)
-SYSCALL(getcwd)
-SYSCALL(capget)
-SYSCALL(capset)
-COMPAT_SYS(sigaltstack)
-SYSX(sys_sendfile64,compat_sys_sendfile,sys_sendfile)
-SYSCALL(ni_syscall)
-SYSCALL(ni_syscall)
-PPC_SYS(vfork)
-COMPAT_SYS(getrlimit)
-COMPAT_SYS(readahead)
-SYS32ONLY(mmap2)
-SYS32ONLY(truncate64)
-SYS32ONLY(ftruncate64)
-SYSX(sys_ni_syscall,sys_stat64,sys_stat64)
-SYSX(sys_ni_syscall,sys_lstat64,sys_lstat64)
-SYSX(sys_ni_syscall,sys_fstat64,sys_fstat64)
-SYSCALL(pciconfig_read)
-SYSCALL(pciconfig_write)
-SYSCALL(pciconfig_iobase)
-SYSCALL(ni_syscall)
-SYSCALL(getdents64)
-SYSCALL(pivot_root)
-SYSX(sys_ni_syscall,compat_sys_fcntl64,sys_fcntl64)
-SYSCALL(madvise)
-SYSCALL(mincore)
-SYSCALL(gettid)
-SYSCALL(tkill)
-SYSCALL(setxattr)
-SYSCALL(lsetxattr)
-SYSCALL(fsetxattr)
-SYSCALL(getxattr)
-SYSCALL(lgetxattr)
-SYSCALL(fgetxattr)
-SYSCALL(listxattr)
-SYSCALL(llistxattr)
-SYSCALL(flistxattr)
-SYSCALL(removexattr)
-SYSCALL(lremovexattr)
-SYSCALL(fremovexattr)
-COMPAT_SYS(futex)
-COMPAT_SYS(sched_setaffinity)
-COMPAT_SYS(sched_getaffinity)
-SYSCALL(ni_syscall)
-SYSCALL(ni_syscall)
-SYS32ONLY(sendfile64)
-COMPAT_SYS(io_setup)
-SYSCALL(io_destroy)
-COMPAT_SYS(io_getevents)
-COMPAT_SYS(io_submit)
-SYSCALL(io_cancel)
-SYSCALL(set_tid_address)
-SYSX(sys_fadvise64,ppc32_fadvise64,sys_fadvise64)
-SYSCALL(exit_group)
-SYSX(sys_lookup_dcookie,ppc32_lookup_dcookie,sys_lookup_dcookie)
-SYSCALL(epoll_create)
-SYSCALL(epoll_ctl)
-SYSCALL(epoll_wait)
-SYSCALL(remap_file_pages)
-SYSX(sys_timer_create,compat_sys_timer_create,sys_timer_create)
-COMPAT_SYS(timer_settime)
-COMPAT_SYS(timer_gettime)
-SYSCALL(timer_getoverrun)
-SYSCALL(timer_delete)
-COMPAT_SYS(clock_settime)
-COMPAT_SYS(clock_gettime)
-COMPAT_SYS(clock_getres)
-COMPAT_SYS(clock_nanosleep)
-SYSX(ppc64_swapcontext,ppc32_swapcontext,ppc_swapcontext)
-COMPAT_SYS(tgkill)
-COMPAT_SYS(utimes)
-COMPAT_SYS(statfs64)
-COMPAT_SYS(fstatfs64)
-SYSX(sys_ni_syscall, ppc_fadvise64_64, ppc_fadvise64_64)
-PPC_SYS(rtas)
-OLDSYS(debug_setcontext)
-SYSCALL(ni_syscall)
-SYSCALL(ni_syscall)
-COMPAT_SYS(mbind)
-COMPAT_SYS(get_mempolicy)
-COMPAT_SYS(set_mempolicy)
-COMPAT_SYS(mq_open)
-SYSCALL(mq_unlink)
-COMPAT_SYS(mq_timedsend)
-COMPAT_SYS(mq_timedreceive)
-COMPAT_SYS(mq_notify)
-COMPAT_SYS(mq_getsetattr)
-COMPAT_SYS(kexec_load)
-COMPAT_SYS(add_key)
-COMPAT_SYS(request_key)
-COMPAT_SYS(keyctl)
-COMPAT_SYS(waitid)
-COMPAT_SYS(ioprio_set)
-COMPAT_SYS(ioprio_get)
-SYSCALL(inotify_init)
-SYSCALL(inotify_add_watch)
-SYSCALL(inotify_rm_watch)
-SYSCALL(spu_run)
-SYSCALL(spu_create)
-COMPAT_SYS(pselect6)
-COMPAT_SYS(ppoll)
-SYSCALL(unshare)
-SYSCALL(splice)
-SYSCALL(tee)
-SYSCALL(vmsplice)
-COMPAT_SYS(openat)
-SYSCALL(mkdirat)
-SYSCALL(mknodat)
-SYSCALL(fchownat)
-COMPAT_SYS(futimesat)
-SYSX(sys_newfstatat, sys_fstatat64, sys_fstatat64)
-SYSCALL(unlinkat)
-SYSCALL(renameat)
-SYSCALL(linkat)
-SYSCALL(symlinkat)
-SYSCALL(readlinkat)
-SYSCALL(fchmodat)
-SYSCALL(faccessat)
-COMPAT_SYS(get_robust_list)
-COMPAT_SYS(set_robust_list)
-
-/*
- * please add new calls to arch/powerpc/platforms/cell/spu_callbacks.c
- * as well when appropriate.
- */
+#include <asm/systbl.h>
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 24e3ad756de0..d20907561f46 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -76,7 +76,6 @@
/* keep track of when we need to update the rtc */
time_t last_rtc_update;
-extern int piranha_simulator;
#ifdef CONFIG_PPC_ISERIES
unsigned long iSeries_recal_titan = 0;
unsigned long iSeries_recal_tb = 0;
@@ -858,42 +857,50 @@ int do_settimeofday(struct timespec *tv)
EXPORT_SYMBOL(do_settimeofday);
-void __init generic_calibrate_decr(void)
+static int __init get_freq(char *name, int cells, unsigned long *val)
{
struct device_node *cpu;
unsigned int *fp;
- int node_found;
+ int found = 0;
- /*
- * The cpu node should have a timebase-frequency property
- * to tell us the rate at which the decrementer counts.
- */
+ /* The cpu node should have timebase and clock frequency properties */
cpu = of_find_node_by_type(NULL, "cpu");
- ppc_tb_freq = DEFAULT_TB_FREQ; /* hardcoded default */
- node_found = 0;
if (cpu) {
- fp = (unsigned int *)get_property(cpu, "timebase-frequency",
- NULL);
+ fp = (unsigned int *)get_property(cpu, name, NULL);
if (fp) {
- node_found = 1;
- ppc_tb_freq = *fp;
+ found = 1;
+ *val = 0;
+ while (cells--)
+ *val = (*val << 32) | *fp++;
}
+
+ of_node_put(cpu);
}
- if (!node_found)
+
+ return found;
+}
+
+void __init generic_calibrate_decr(void)
+{
+ ppc_tb_freq = DEFAULT_TB_FREQ; /* hardcoded default */
+
+ if (!get_freq("ibm,extended-timebase-frequency", 2, &ppc_tb_freq) &&
+ !get_freq("timebase-frequency", 1, &ppc_tb_freq)) {
+
printk(KERN_ERR "WARNING: Estimating decrementer frequency "
"(not found)\n");
+ }
- ppc_proc_freq = DEFAULT_PROC_FREQ;
- node_found = 0;
- if (cpu) {
- fp = (unsigned int *)get_property(cpu, "clock-frequency",
- NULL);
- if (fp) {
- node_found = 1;
- ppc_proc_freq = *fp;
- }
+ ppc_proc_freq = DEFAULT_PROC_FREQ; /* hardcoded default */
+
+ if (!get_freq("ibm,extended-clock-frequency", 2, &ppc_proc_freq) &&
+ !get_freq("clock-frequency", 1, &ppc_proc_freq)) {
+
+ printk(KERN_ERR "WARNING: Estimating processor frequency "
+ "(not found)\n");
}
+
#ifdef CONFIG_BOOKE
/* Set the time base to zero */
mtspr(SPRN_TBWL, 0);
@@ -905,11 +912,6 @@ void __init generic_calibrate_decr(void)
/* Enable decrementer interrupt */
mtspr(SPRN_TCR, TCR_DIE);
#endif
- if (!node_found)
- printk(KERN_ERR "WARNING: Estimating processor frequency "
- "(not found)\n");
-
- of_node_put(cpu);
}
unsigned long get_boot_time(void)
@@ -945,9 +947,9 @@ void __init time_init(void)
} else {
/* Normal PowerPC with timebase register */
ppc_md.calibrate_decr();
- printk(KERN_INFO "time_init: decrementer frequency = %lu.%.6lu MHz\n",
+ printk(KERN_DEBUG "time_init: decrementer frequency = %lu.%.6lu MHz\n",
ppc_tb_freq / 1000000, ppc_tb_freq % 1000000);
- printk(KERN_INFO "time_init: processor frequency = %lu.%.6lu MHz\n",
+ printk(KERN_DEBUG "time_init: processor frequency = %lu.%.6lu MHz\n",
ppc_proc_freq / 1000000, ppc_proc_freq % 1000000);
tb_last_stamp = tb_last_jiffy = get_tb();
}
@@ -1010,10 +1012,7 @@ void __init time_init(void)
tb_to_ns_scale = scale;
tb_to_ns_shift = shift;
-#ifdef CONFIG_PPC_ISERIES
- if (!piranha_simulator)
-#endif
- tm = get_boot_time();
+ tm = get_boot_time();
write_seqlock_irqsave(&xtime_lock, flags);
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 064a52564692..91a6e04d9741 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -658,7 +658,7 @@ static int emulate_instruction(struct pt_regs *regs)
u32 instword;
u32 rd;
- if (!user_mode(regs))
+ if (!user_mode(regs) || (regs->msr & MSR_LE))
return -EINVAL;
CHECK_FULL_REGS(regs);
@@ -805,9 +805,11 @@ void __kprobes program_check_exception(struct pt_regs *regs)
void alignment_exception(struct pt_regs *regs)
{
- int fixed;
+ int fixed = 0;
- fixed = fix_alignment(regs);
+ /* we don't implement logging of alignment exceptions */
+ if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS))
+ fixed = fix_alignment(regs);
if (fixed == 1) {
regs->nip += 4; /* skip over emulated instruction */
diff --git a/arch/powerpc/kernel/udbg.c b/arch/powerpc/kernel/udbg.c
index 3774e80094f5..67d9fd9ae2b5 100644
--- a/arch/powerpc/kernel/udbg.c
+++ b/arch/powerpc/kernel/udbg.c
@@ -14,6 +14,7 @@
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/console.h>
+#include <linux/init.h>
#include <asm/processor.h>
#include <asm/udbg.h>
@@ -141,12 +142,14 @@ static int early_console_initialized;
void __init disable_early_printk(void)
{
-#if 1
if (!early_console_initialized)
return;
+ if (strstr(saved_command_line, "udbg-immortal")) {
+ printk(KERN_INFO "early console immortal !\n");
+ return;
+ }
unregister_console(&udbg_console);
early_console_initialized = 0;
-#endif
}
/* called by setup_system */
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index 573afb68d69e..bc3e15be3087 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -223,6 +223,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
struct vm_area_struct *vma;
unsigned long vdso_pages;
unsigned long vdso_base;
+ int rc;
#ifdef CONFIG_PPC64
if (test_thread_flag(TIF_32BIT)) {
@@ -237,20 +238,13 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
vdso_base = VDSO32_MBASE;
#endif
- current->thread.vdso_base = 0;
+ current->mm->context.vdso_base = 0;
/* vDSO has a problem and was disabled, just don't "enable" it for the
* process
*/
if (vdso_pages == 0)
return 0;
-
- vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
- if (vma == NULL)
- return -ENOMEM;
-
- memset(vma, 0, sizeof(*vma));
-
/* Add a page to the vdso size for the data page */
vdso_pages ++;
@@ -259,17 +253,23 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
* at vdso_base which is the "natural" base for it, but we might fail
* and end up putting it elsewhere.
*/
+ down_write(&mm->mmap_sem);
vdso_base = get_unmapped_area(NULL, vdso_base,
vdso_pages << PAGE_SHIFT, 0, 0);
- if (vdso_base & ~PAGE_MASK) {
- kmem_cache_free(vm_area_cachep, vma);
- return (int)vdso_base;
+ if (IS_ERR_VALUE(vdso_base)) {
+ rc = vdso_base;
+ goto fail_mmapsem;
}
- current->thread.vdso_base = vdso_base;
+ /* Allocate a VMA structure and fill it up */
+ vma = kmem_cache_zalloc(vm_area_cachep, SLAB_KERNEL);
+ if (vma == NULL) {
+ rc = -ENOMEM;
+ goto fail_mmapsem;
+ }
vma->vm_mm = mm;
- vma->vm_start = current->thread.vdso_base;
+ vma->vm_start = vdso_base;
vma->vm_end = vma->vm_start + (vdso_pages << PAGE_SHIFT);
/*
@@ -282,23 +282,38 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
* It's fine to use that for setting breakpoints in the vDSO code
* pages though
*/
- vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
+ vma->vm_flags = VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC;
vma->vm_flags |= mm->def_flags;
vma->vm_page_prot = protection_map[vma->vm_flags & 0x7];
vma->vm_ops = &vdso_vmops;
- down_write(&mm->mmap_sem);
- if (insert_vm_struct(mm, vma)) {
- up_write(&mm->mmap_sem);
- kmem_cache_free(vm_area_cachep, vma);
- return -ENOMEM;
- }
+ /* Insert new VMA */
+ rc = insert_vm_struct(mm, vma);
+ if (rc)
+ goto fail_vma;
+
+ /* Put vDSO base into mm struct and account for memory usage */
+ current->mm->context.vdso_base = vdso_base;
mm->total_vm += (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
up_write(&mm->mmap_sem);
-
return 0;
+
+ fail_vma:
+ kmem_cache_free(vm_area_cachep, vma);
+ fail_mmapsem:
+ up_write(&mm->mmap_sem);
+ return rc;
+}
+
+const char *arch_vma_name(struct vm_area_struct *vma)
+{
+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso_base)
+ return "[vdso]";
+ return NULL;
}
+
+
static void * __init find_section32(Elf32_Ehdr *ehdr, const char *secname,
unsigned long *size)
{
diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S
index 66b3d03c5fa5..9416b4ab92ec 100644
--- a/arch/powerpc/kernel/vector.S
+++ b/arch/powerpc/kernel/vector.S
@@ -53,12 +53,12 @@ fpenable:
stfd fr31,8(r1)
LDCONST(fr1, fpzero)
mffs fr31
- mtfsf 0xff,fr1
+ MTFSF_L(fr1)
blr
fpdisable:
mtlr r12
- mtfsf 0xff,fr31
+ MTFSF_L(fr31)
lfd fr31,8(r1)
lfd fr1,16(r1)
lfd fr0,24(r1)
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index 971020cf3f7d..cdf5867838a6 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -13,27 +13,116 @@
* 2 of the License, or (at your option) any later version.
*/
+#include <linux/types.h>
+#include <linux/device.h>
#include <linux/init.h>
#include <linux/console.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/dma-mapping.h>
+#include <linux/kobject.h>
+
#include <asm/iommu.h>
#include <asm/dma.h>
#include <asm/vio.h>
#include <asm/prom.h>
-
-static const struct vio_device_id *vio_match_device(
- const struct vio_device_id *, const struct vio_dev *);
-
-struct vio_dev vio_bus_device = { /* fake "parent" device */
+#include <asm/firmware.h>
+#include <asm/tce.h>
+#include <asm/abs_addr.h>
+#include <asm/page.h>
+#include <asm/hvcall.h>
+#include <asm/iseries/vio.h>
+#include <asm/iseries/hv_types.h>
+#include <asm/iseries/hv_lp_config.h>
+#include <asm/iseries/hv_call_xm.h>
+#include <asm/iseries/iommu.h>
+
+extern struct subsystem devices_subsys; /* needed for vio_find_name() */
+
+static struct vio_dev vio_bus_device = { /* fake "parent" device */
.name = vio_bus_device.dev.bus_id,
.type = "",
.dev.bus_id = "vio",
.dev.bus = &vio_bus_type,
};
-static struct vio_bus_ops vio_bus_ops;
+#ifdef CONFIG_PPC_ISERIES
+struct device *iSeries_vio_dev = &vio_bus_device.dev;
+EXPORT_SYMBOL(iSeries_vio_dev);
+
+static struct iommu_table veth_iommu_table;
+static struct iommu_table vio_iommu_table;
+
+static void __init iommu_vio_init(void)
+{
+ iommu_table_getparms_iSeries(255, 0, 0xff, &veth_iommu_table);
+ veth_iommu_table.it_size /= 2;
+ vio_iommu_table = veth_iommu_table;
+ vio_iommu_table.it_offset += veth_iommu_table.it_size;
+
+ if (!iommu_init_table(&veth_iommu_table, -1))
+ printk("Virtual Bus VETH TCE table failed.\n");
+ if (!iommu_init_table(&vio_iommu_table, -1))
+ printk("Virtual Bus VIO TCE table failed.\n");
+}
+#endif
+
+static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev)
+{
+#ifdef CONFIG_PPC_ISERIES
+ if (firmware_has_feature(FW_FEATURE_ISERIES)) {
+ if (strcmp(dev->type, "network") == 0)
+ return &veth_iommu_table;
+ return &vio_iommu_table;
+ } else
+#endif
+ {
+ unsigned char *dma_window;
+ struct iommu_table *tbl;
+ unsigned long offset, size;
+
+ dma_window = get_property(dev->dev.platform_data,
+ "ibm,my-dma-window", NULL);
+ if (!dma_window)
+ return NULL;
+
+ tbl = kmalloc(sizeof(*tbl), GFP_KERNEL);
+
+ of_parse_dma_window(dev->dev.platform_data, dma_window,
+ &tbl->it_index, &offset, &size);
+
+ /* TCE table size - measured in tce entries */
+ tbl->it_size = size >> PAGE_SHIFT;
+ /* offset for VIO should always be 0 */
+ tbl->it_offset = offset >> PAGE_SHIFT;
+ tbl->it_busno = 0;
+ tbl->it_type = TCE_VB;
+
+ return iommu_init_table(tbl, -1);
+ }
+}
+
+/**
+ * vio_match_device: - Tell if a VIO device has a matching
+ * VIO device id structure.
+ * @ids: array of VIO device id structures to search in
+ * @dev: the VIO device structure to match against
+ *
+ * Used by a driver to check whether a VIO device present in the
+ * system is in its list of supported devices. Returns the matching
+ * vio_device_id structure or NULL if there is no match.
+ */
+static const struct vio_device_id *vio_match_device(
+ const struct vio_device_id *ids, const struct vio_dev *dev)
+{
+ while (ids->type[0] != '\0') {
+ if ((strncmp(dev->type, ids->type, strlen(ids->type)) == 0) &&
+ device_is_compatible(dev->dev.platform_data, ids->compat))
+ return ids;
+ ids++;
+ }
+ return NULL;
+}
/*
* Convert from struct device to struct vio_dev and pass to driver.
@@ -106,35 +195,110 @@ void vio_unregister_driver(struct vio_driver *viodrv)
}
EXPORT_SYMBOL(vio_unregister_driver);
+/* vio_dev refcount hit 0 */
+static void __devinit vio_dev_release(struct device *dev)
+{
+ if (dev->platform_data) {
+ /* XXX free TCE table */
+ of_node_put(dev->platform_data);
+ }
+ kfree(to_vio_dev(dev));
+}
+
/**
- * vio_match_device: - Tell if a VIO device has a matching
- * VIO device id structure.
- * @ids: array of VIO device id structures to search in
- * @dev: the VIO device structure to match against
+ * vio_register_device_node: - Register a new vio device.
+ * @of_node: The OF node for this device.
*
- * Used by a driver to check whether a VIO device present in the
- * system is in its list of supported devices. Returns the matching
- * vio_device_id structure or NULL if there is no match.
+ * Creates and initializes a vio_dev structure from the data in
+ * of_node (dev.platform_data) and adds it to the list of virtual devices.
+ * Returns a pointer to the created vio_dev or NULL if node has
+ * NULL device_type or compatible fields.
*/
-static const struct vio_device_id *vio_match_device(
- const struct vio_device_id *ids, const struct vio_dev *dev)
+struct vio_dev * __devinit vio_register_device_node(struct device_node *of_node)
{
- while (ids->type[0] != '\0') {
- if (vio_bus_ops.match(ids, dev))
- return ids;
- ids++;
+ struct vio_dev *viodev;
+ unsigned int *unit_address;
+ unsigned int *irq_p;
+
+ /* we need the 'device_type' property, in order to match with drivers */
+ if (of_node->type == NULL) {
+ printk(KERN_WARNING "%s: node %s missing 'device_type'\n",
+ __FUNCTION__,
+ of_node->name ? of_node->name : "<unknown>");
+ return NULL;
}
- return NULL;
+
+ unit_address = (unsigned int *)get_property(of_node, "reg", NULL);
+ if (unit_address == NULL) {
+ printk(KERN_WARNING "%s: node %s missing 'reg'\n",
+ __FUNCTION__,
+ of_node->name ? of_node->name : "<unknown>");
+ return NULL;
+ }
+
+ /* allocate a vio_dev for this node */
+ viodev = kzalloc(sizeof(struct vio_dev), GFP_KERNEL);
+ if (viodev == NULL)
+ return NULL;
+
+ viodev->dev.platform_data = of_node_get(of_node);
+
+ viodev->irq = NO_IRQ;
+ irq_p = (unsigned int *)get_property(of_node, "interrupts", NULL);
+ if (irq_p) {
+ int virq = virt_irq_create_mapping(*irq_p);
+ if (virq == NO_IRQ) {
+ printk(KERN_ERR "Unable to allocate interrupt "
+ "number for %s\n", of_node->full_name);
+ } else
+ viodev->irq = irq_offset_up(virq);
+ }
+
+ snprintf(viodev->dev.bus_id, BUS_ID_SIZE, "%x", *unit_address);
+ viodev->name = of_node->name;
+ viodev->type = of_node->type;
+ viodev->unit_address = *unit_address;
+ if (firmware_has_feature(FW_FEATURE_ISERIES)) {
+ unit_address = (unsigned int *)get_property(of_node,
+ "linux,unit_address", NULL);
+ if (unit_address != NULL)
+ viodev->unit_address = *unit_address;
+ }
+ viodev->iommu_table = vio_build_iommu_table(viodev);
+
+ /* init generic 'struct device' fields: */
+ viodev->dev.parent = &vio_bus_device.dev;
+ viodev->dev.bus = &vio_bus_type;
+ viodev->dev.release = vio_dev_release;
+
+ /* register with generic device framework */
+ if (device_register(&viodev->dev)) {
+ printk(KERN_ERR "%s: failed to register device %s\n",
+ __FUNCTION__, viodev->dev.bus_id);
+ /* XXX free TCE table */
+ kfree(viodev);
+ return NULL;
+ }
+
+ return viodev;
}
+EXPORT_SYMBOL(vio_register_device_node);
/**
* vio_bus_init: - Initialize the virtual IO bus
*/
-int __init vio_bus_init(struct vio_bus_ops *ops)
+static int __init vio_bus_init(void)
{
int err;
+ struct device_node *node_vroot;
- vio_bus_ops = *ops;
+#ifdef CONFIG_PPC_ISERIES
+ if (firmware_has_feature(FW_FEATURE_ISERIES)) {
+ iommu_vio_init();
+ vio_bus_device.iommu_table = &vio_iommu_table;
+ iSeries_vio_dev = &vio_bus_device.dev;
+ }
+#endif
err = bus_register(&vio_bus_type);
if (err) {
@@ -153,47 +317,48 @@ int __init vio_bus_init(struct vio_bus_ops *ops)
return err;
}
- return 0;
-}
+ node_vroot = find_devices("vdevice");
+ if (node_vroot) {
+ struct device_node *of_node;
+
+ /*
+ * Create struct vio_devices for each virtual device in
+ * the device tree. Drivers will associate with them later.
+ */
+ for (of_node = node_vroot->child; of_node != NULL;
+ of_node = of_node->sibling) {
+ printk(KERN_DEBUG "%s: processing %p\n",
+ __FUNCTION__, of_node);
+ vio_register_device_node(of_node);
+ }
+ }
-/* vio_dev refcount hit 0 */
-static void __devinit vio_dev_release(struct device *dev)
-{
- if (vio_bus_ops.release_device)
- vio_bus_ops.release_device(dev);
- kfree(to_vio_dev(dev));
+ return 0;
}
+__initcall(vio_bus_init);
-static ssize_t viodev_show_name(struct device *dev,
+static ssize_t name_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%s\n", to_vio_dev(dev)->name);
}
-DEVICE_ATTR(name, S_IRUSR | S_IRGRP | S_IROTH, viodev_show_name, NULL);
-struct vio_dev * __devinit vio_register_device(struct vio_dev *viodev)
+static ssize_t devspec_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- /* init generic 'struct device' fields: */
- viodev->dev.parent = &vio_bus_device.dev;
- viodev->dev.bus = &vio_bus_type;
- viodev->dev.release = vio_dev_release;
-
- /* register with generic device framework */
- if (device_register(&viodev->dev)) {
- printk(KERN_ERR "%s: failed to register device %s\n",
- __FUNCTION__, viodev->dev.bus_id);
- return NULL;
- }
- device_create_file(&viodev->dev, &dev_attr_name);
+ struct device_node *of_node = dev->platform_data;
- return viodev;
+ return sprintf(buf, "%s\n", of_node ? of_node->full_name : "none");
}
+static struct device_attribute vio_dev_attrs[] = {
+ __ATTR_RO(name),
+ __ATTR_RO(devspec),
+ __ATTR_NULL
+};
+
void __devinit vio_unregister_device(struct vio_dev *viodev)
{
- if (vio_bus_ops.unregister_device)
- vio_bus_ops.unregister_device(viodev);
- device_remove_file(&viodev->dev, &dev_attr_name);
device_unregister(&viodev->dev);
}
EXPORT_SYMBOL(vio_unregister_device);
@@ -229,7 +394,7 @@ static void *vio_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag)
{
return iommu_alloc_coherent(to_vio_dev(dev)->iommu_table, size,
- dma_handle, ~0ul, flag);
+ dma_handle, ~0ul, flag, -1);
}
static void vio_free_coherent(struct device *dev, size_t size,
@@ -267,22 +432,23 @@ static int vio_hotplug(struct device *dev, char **envp, int num_envp,
char *buffer, int buffer_size)
{
const struct vio_dev *vio_dev = to_vio_dev(dev);
+ struct device_node *dn = dev->platform_data;
char *cp;
int length;
if (!num_envp)
return -ENOMEM;
- if (!vio_dev->dev.platform_data)
+ if (!dn)
return -ENODEV;
- cp = (char *)get_property(vio_dev->dev.platform_data, "compatible", &length);
+ cp = (char *)get_property(dn, "compatible", &length);
if (!cp)
return -ENODEV;
envp[0] = buffer;
length = scnprintf(buffer, buffer_size, "MODALIAS=vio:T%sS%s",
vio_dev->type, cp);
- if (buffer_size - length <= 0)
+ if ((buffer_size - length) <= 0)
return -ENOMEM;
envp[1] = NULL;
return 0;
@@ -290,9 +456,81 @@ static int vio_hotplug(struct device *dev, char **envp, int num_envp,
struct bus_type vio_bus_type = {
.name = "vio",
+ .dev_attrs = vio_dev_attrs,
.uevent = vio_hotplug,
.match = vio_bus_match,
.probe = vio_bus_probe,
.remove = vio_bus_remove,
.shutdown = vio_bus_shutdown,
};
+
+/**
+ * vio_get_attribute: - get attribute for virtual device
+ * @vdev: The vio device to get property.
+ * @which: The property/attribute to be extracted.
+ * @length: Pointer to length of returned data size (unused if NULL).
+ *
+ * Calls prom.c's get_property() to return the value of the
+ * attribute specified by @which
+*/
+const void *vio_get_attribute(struct vio_dev *vdev, char *which, int *length)
+{
+ return get_property(vdev->dev.platform_data, which, length);
+}
+EXPORT_SYMBOL(vio_get_attribute);
+
+#ifdef CONFIG_PPC_PSERIES
+/* vio_find_name() - internal because only vio.c knows how we formatted the
+ * kobject name
+ * XXX once vio_bus_type.devices is actually used as a kset in
+ * drivers/base/bus.c, this function should be removed in favor of
+ * "device_find(kobj_name, &vio_bus_type)"
+ */
+static struct vio_dev *vio_find_name(const char *kobj_name)
+{
+ struct kobject *found;
+
+ found = kset_find_obj(&devices_subsys.kset, kobj_name);
+ if (!found)
+ return NULL;
+
+ return to_vio_dev(container_of(found, struct device, kobj));
+}
+
+/**
+ * vio_find_node - find an already-registered vio_dev
+ * @vnode: device_node of the virtual device we're looking for
+ */
+struct vio_dev *vio_find_node(struct device_node *vnode)
+{
+ uint32_t *unit_address;
+ char kobj_name[BUS_ID_SIZE];
+
+ /* construct the kobject name from the device node */
+ unit_address = (uint32_t *)get_property(vnode, "reg", NULL);
+ if (!unit_address)
+ return NULL;
+ snprintf(kobj_name, BUS_ID_SIZE, "%x", *unit_address);
+
+ return vio_find_name(kobj_name);
+}
+EXPORT_SYMBOL(vio_find_node);
+
+int vio_enable_interrupts(struct vio_dev *dev)
+{
+ int rc = h_vio_signal(dev->unit_address, VIO_IRQ_ENABLE);
+ if (rc != H_SUCCESS)
+ printk(KERN_ERR "vio: Error 0x%x enabling interrupts\n", rc);
+ return rc;
+}
+EXPORT_SYMBOL(vio_enable_interrupts);
+
+int vio_disable_interrupts(struct vio_dev *dev)
+{
+ int rc = h_vio_signal(dev->unit_address, VIO_IRQ_DISABLE);
+ if (rc != H_SUCCESS)
+ printk(KERN_ERR "vio: Error 0x%x disabling interrupts\n", rc);
+ return rc;
+}
+EXPORT_SYMBOL(vio_disable_interrupts);
+#endif /* CONFIG_PPC_PSERIES */
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index fe79c2584cb0..8b25953dc4f0 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -93,6 +93,11 @@ SECTIONS
__ptov_table_begin = .;
*(.ptov_fixup);
__ptov_table_end = .;
+#ifdef CONFIG_PPC_ISERIES
+ __dt_strings_start = .;
+ *(.dt_strings);
+ __dt_strings_end = .;
+#endif
}
. = ALIGN(16);
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
index 34f5c2e074c9..ff7096458249 100644
--- a/arch/powerpc/lib/Makefile
+++ b/arch/powerpc/lib/Makefile
@@ -2,12 +2,15 @@
# Makefile for ppc-specific library files..
#
+ifeq ($(CONFIG_PPC64),y)
+EXTRA_CFLAGS += -mno-minimal-toc
+endif
+
ifeq ($(CONFIG_PPC_MERGE),y)
obj-y := string.o strcase.o
obj-$(CONFIG_PPC32) += div64.o copy_32.o checksum_32.o
endif
-obj-y += bitops.o
obj-$(CONFIG_PPC64) += checksum_64.o copypage_64.o copyuser_64.o \
memcpy_64.o usercopy_64.o mem_64.o string.o \
strcase.o
diff --git a/arch/powerpc/lib/bitops.c b/arch/powerpc/lib/bitops.c
deleted file mode 100644
index f68ad71a0187..000000000000
--- a/arch/powerpc/lib/bitops.c
+++ /dev/null
@@ -1,150 +0,0 @@
-#include <linux/types.h>
-#include <linux/module.h>
-#include <asm/byteorder.h>
-#include <asm/bitops.h>
-
-/**
- * find_next_bit - find the next set bit in a memory region
- * @addr: The address to base the search on
- * @offset: The bitnumber to start searching at
- * @size: The maximum size to search
- */
-unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
- unsigned long offset)
-{
- const unsigned long *p = addr + BITOP_WORD(offset);
- unsigned long result = offset & ~(BITS_PER_LONG-1);
- unsigned long tmp;
-
- if (offset >= size)
- return size;
- size -= result;
- offset %= BITS_PER_LONG;
- if (offset) {
- tmp = *(p++);
- tmp &= (~0UL << offset);
- if (size < BITS_PER_LONG)
- goto found_first;
- if (tmp)
- goto found_middle;
- size -= BITS_PER_LONG;
- result += BITS_PER_LONG;
- }
- while (size & ~(BITS_PER_LONG-1)) {
- if ((tmp = *(p++)))
- goto found_middle;
- result += BITS_PER_LONG;
- size -= BITS_PER_LONG;
- }
- if (!size)
- return result;
- tmp = *p;
-
-found_first:
- tmp &= (~0UL >> (BITS_PER_LONG - size));
- if (tmp == 0UL) /* Are any bits set? */
- return result + size; /* Nope. */
-found_middle:
- return result + __ffs(tmp);
-}
-EXPORT_SYMBOL(find_next_bit);
-
-/*
- * This implementation of find_{first,next}_zero_bit was stolen from
- * Linus' asm-alpha/bitops.h.
- */
-unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
- unsigned long offset)
-{
- const unsigned long *p = addr + BITOP_WORD(offset);
- unsigned long result = offset & ~(BITS_PER_LONG-1);
- unsigned long tmp;
-
- if (offset >= size)
- return size;
- size -= result;
- offset %= BITS_PER_LONG;
- if (offset) {
- tmp = *(p++);
- tmp |= ~0UL >> (BITS_PER_LONG - offset);
- if (size < BITS_PER_LONG)
- goto found_first;
- if (~tmp)
- goto found_middle;
- size -= BITS_PER_LONG;
- result += BITS_PER_LONG;
- }
- while (size & ~(BITS_PER_LONG-1)) {
- if (~(tmp = *(p++)))
- goto found_middle;
- result += BITS_PER_LONG;
- size -= BITS_PER_LONG;
- }
- if (!size)
- return result;
- tmp = *p;
-
-found_first:
- tmp |= ~0UL << size;
- if (tmp == ~0UL) /* Are any bits zero? */
- return result + size; /* Nope. */
-found_middle:
- return result + ffz(tmp);
-}
-EXPORT_SYMBOL(find_next_zero_bit);
-
-static inline unsigned int ext2_ilog2(unsigned int x)
-{
- int lz;
-
- asm("cntlzw %0,%1": "=r"(lz):"r"(x));
- return 31 - lz;
-}
-
-static inline unsigned int ext2_ffz(unsigned int x)
-{
- u32 rc;
- if ((x = ~x) == 0)
- return 32;
- rc = ext2_ilog2(x & -x);
- return rc;
-}
-
-unsigned long find_next_zero_le_bit(const unsigned long *addr,
- unsigned long size, unsigned long offset)
-{
- const unsigned int *p = ((const unsigned int *)addr) + (offset >> 5);
- unsigned int result = offset & ~31;
- unsigned int tmp;
-
- if (offset >= size)
- return size;
- size -= result;
- offset &= 31;
- if (offset) {
- tmp = cpu_to_le32p(p++);
- tmp |= ~0U >> (32 - offset); /* bug or feature ? */
- if (size < 32)
- goto found_first;
- if (tmp != ~0)
- goto found_middle;
- size -= 32;
- result += 32;
- }
- while (size >= 32) {
- if ((tmp = cpu_to_le32p(p++)) != ~0)
- goto found_middle;
- result += 32;
- size -= 32;
- }
- if (!size)
- return result;
- tmp = cpu_to_le32p(p);
-found_first:
- tmp |= ~0 << size;
- if (tmp == ~0) /* Are any bits zero? */
- return result + size; /* Nope. */
-found_middle:
- return result + ext2_ffz(tmp);
-}
-EXPORT_SYMBOL(find_next_zero_le_bit);
diff --git a/arch/powerpc/mm/hash_low_32.S b/arch/powerpc/mm/hash_low_32.S
index ea469eefa146..94255beeecd3 100644
--- a/arch/powerpc/mm/hash_low_32.S
+++ b/arch/powerpc/mm/hash_low_32.S
@@ -74,12 +74,6 @@ _GLOBAL(hash_page_sync)
*/
.text
_GLOBAL(hash_page)
-#ifdef CONFIG_PPC64BRIDGE
- mfmsr r0
- clrldi r0,r0,1 /* make sure it's in 32-bit mode */
- MTMSRD(r0)
- isync
-#endif
tophys(r7,0) /* gets -KERNELBASE into r7 */
#ifdef CONFIG_SMP
addis r8,r7,mmu_hash_lock@h
@@ -285,7 +279,6 @@ Hash_base = 0xc0180000
Hash_bits = 12 /* e.g. 256kB hash table */
Hash_msk = (((1 << Hash_bits) - 1) * 64)
-#ifndef CONFIG_PPC64BRIDGE
/* defines for the PTE format for 32-bit PPCs */
#define PTE_SIZE 8
#define PTEG_SIZE 64
@@ -299,21 +292,6 @@ Hash_msk = (((1 << Hash_bits) - 1) * 64)
#define SET_V(r) oris r,r,PTE_V@h
#define CLR_V(r,t) rlwinm r,r,0,1,31
-#else
-/* defines for the PTE format for 64-bit PPCs */
-#define PTE_SIZE 16
-#define PTEG_SIZE 128
-#define LG_PTEG_SIZE 7
-#define LDPTEu ldu
-#define STPTE std
-#define CMPPTE cmpd
-#define PTE_H 2
-#define PTE_V 1
-#define TST_V(r) andi. r,r,PTE_V
-#define SET_V(r) ori r,r,PTE_V
-#define CLR_V(r,t) li t,PTE_V; andc r,r,t
-#endif /* CONFIG_PPC64BRIDGE */
-
#define HASH_LEFT 31-(LG_PTEG_SIZE+Hash_bits-1)
#define HASH_RIGHT 31-LG_PTEG_SIZE
@@ -331,14 +309,8 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_IFSET(CPU_FTR_NEED_COHERENT)
/* Construct the high word of the PPC-style PTE (r5) */
-#ifndef CONFIG_PPC64BRIDGE
rlwinm r5,r3,7,1,24 /* put VSID in 0x7fffff80 bits */
rlwimi r5,r4,10,26,31 /* put in API (abbrev page index) */
-#else /* CONFIG_PPC64BRIDGE */
- clrlwi r3,r3,8 /* reduce vsid to 24 bits */
- sldi r5,r3,12 /* shift vsid into position */
- rlwimi r5,r4,16,20,24 /* put in API (abbrev page index) */
-#endif /* CONFIG_PPC64BRIDGE */
SET_V(r5) /* set V (valid) bit */
/* Get the address of the primary PTE group in the hash table (r3) */
@@ -516,14 +488,8 @@ _GLOBAL(flush_hash_pages)
add r3,r3,r0 /* note code below trims to 24 bits */
/* Construct the high word of the PPC-style PTE (r11) */
-#ifndef CONFIG_PPC64BRIDGE
rlwinm r11,r3,7,1,24 /* put VSID in 0x7fffff80 bits */
rlwimi r11,r4,10,26,31 /* put in API (abbrev page index) */
-#else /* CONFIG_PPC64BRIDGE */
- clrlwi r3,r3,8 /* reduce vsid to 24 bits */
- sldi r11,r3,12 /* shift vsid into position */
- rlwimi r11,r4,16,20,24 /* put in API (abbrev page index) */
-#endif /* CONFIG_PPC64BRIDGE */
SET_V(r11) /* set V (valid) bit */
#ifdef CONFIG_SMP
diff --git a/arch/powerpc/mm/hash_low_64.S b/arch/powerpc/mm/hash_low_64.S
index e0d02c4a2615..52e914238959 100644
--- a/arch/powerpc/mm/hash_low_64.S
+++ b/arch/powerpc/mm/hash_low_64.S
@@ -136,6 +136,7 @@ _GLOBAL(__hash_page_4K)
and r0,r0,r4 /* _PAGE_RW & _PAGE_DIRTY ->r0 bit 30*/
andc r0,r30,r0 /* r0 = pte & ~r0 */
rlwimi r3,r0,32-1,31,31 /* Insert result into PP lsb */
+ ori r3,r3,HPTE_R_C /* Always add "C" bit for perf. */
/* We eventually do the icache sync here (maybe inline that
* code rather than call a C function...)
@@ -368,6 +369,7 @@ _GLOBAL(__hash_page_4K)
rlwinm r30,r4,32-9+7,31-7,31-7 /* _PAGE_RW -> _PAGE_DIRTY */
or r30,r30,r31
ori r30,r30,_PAGE_BUSY | _PAGE_ACCESSED | _PAGE_HASHPTE
+ oris r30,r30,_PAGE_COMBO@h
/* Write the linux PTE atomically (setting busy) */
stdcx. r30,0,r6
bne- 1b
@@ -400,6 +402,7 @@ _GLOBAL(__hash_page_4K)
and r0,r0,r4 /* _PAGE_RW & _PAGE_DIRTY ->r0 bit 30*/
andc r0,r30,r0 /* r0 = pte & ~r0 */
rlwimi r3,r0,32-1,31,31 /* Insert result into PP lsb */
+ ori r3,r3,HPTE_R_C /* Always add "C" bit for perf. */
/* We eventually do the icache sync here (maybe inline that
* code rather than call a C function...)
@@ -426,6 +429,14 @@ END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE)
andi. r0,r31,_PAGE_HASHPTE
li r26,0 /* Default hidx */
beq htab_insert_pte
+
+ /*
+ * Check if the pte was already inserted into the hash table
+ * as a 64k HW page, and invalidate the 64k HPTE if so.
+ */
+ andis. r0,r31,_PAGE_COMBO@h
+ beq htab_inval_old_hpte
+
ld r6,STK_PARM(r6)(r1)
ori r26,r6,0x8000 /* Load the hidx mask */
ld r26,0(r26)
@@ -496,6 +507,19 @@ _GLOBAL(htab_call_hpte_remove)
/* Try all again */
b htab_insert_pte
+ /*
+ * Call out to C code to invalidate an 64k HW HPTE that is
+ * useless now that the segment has been switched to 4k pages.
+ */
+htab_inval_old_hpte:
+ mr r3,r29 /* virtual addr */
+ mr r4,r31 /* PTE.pte */
+ li r5,0 /* PTE.hidx */
+ li r6,MMU_PAGE_64K /* psize */
+ ld r7,STK_PARM(r8)(r1) /* local */
+ bl .flush_hash_page
+ b htab_insert_pte
+
htab_bail_ok:
li r3,0
b htab_bail
@@ -636,6 +660,12 @@ _GLOBAL(__hash_page_64K)
* is changing this PTE anyway and might hash it.
*/
bne- ht64_bail_ok
+BEGIN_FTR_SECTION
+ /* Check if PTE has the cache-inhibit bit set */
+ andi. r0,r31,_PAGE_NO_CACHE
+ /* If so, bail out and refault as a 4k page */
+ bne- ht64_bail_ok
+END_FTR_SECTION_IFCLR(CPU_FTR_CI_LARGE_PAGE)
/* Prepare new PTE value (turn access RW into DIRTY, then
* add BUSY,HASHPTE and ACCESSED)
*/
@@ -671,6 +701,7 @@ _GLOBAL(__hash_page_64K)
and r0,r0,r4 /* _PAGE_RW & _PAGE_DIRTY ->r0 bit 30*/
andc r0,r30,r0 /* r0 = pte & ~r0 */
rlwimi r3,r0,32-1,31,31 /* Insert result into PP lsb */
+ ori r3,r3,HPTE_R_C /* Always add "C" bit for perf. */
/* We eventually do the icache sync here (maybe inline that
* code rather than call a C function...)
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index 994856e55b7c..a0f3cbd00d39 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -238,7 +238,7 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
DBG_LOW(" -> hit\n");
/* Update the HPTE */
hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) |
- (newpp & (HPTE_R_PP | HPTE_R_N));
+ (newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_C));
native_unlock_hpte(hptep);
}
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index c006d9039633..d03fd2b4445e 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -92,10 +92,15 @@ unsigned long htab_size_bytes;
unsigned long htab_hash_mask;
int mmu_linear_psize = MMU_PAGE_4K;
int mmu_virtual_psize = MMU_PAGE_4K;
+int mmu_vmalloc_psize = MMU_PAGE_4K;
+int mmu_io_psize = MMU_PAGE_4K;
#ifdef CONFIG_HUGETLB_PAGE
int mmu_huge_psize = MMU_PAGE_16M;
unsigned int HPAGE_SHIFT;
#endif
+#ifdef CONFIG_PPC_64K_PAGES
+int mmu_ci_restrictions;
+#endif
/* There are definitions of page sizes arrays to be used when none
* is provided by the firmware.
@@ -308,20 +313,31 @@ static void __init htab_init_page_sizes(void)
else if (mmu_psize_defs[MMU_PAGE_1M].shift)
mmu_linear_psize = MMU_PAGE_1M;
+#ifdef CONFIG_PPC_64K_PAGES
/*
* Pick a size for the ordinary pages. Default is 4K, we support
- * 64K if cache inhibited large pages are supported by the
- * processor
+ * 64K for user mappings and vmalloc if supported by the processor.
+ * We only use 64k for ioremap if the processor
+ * (and firmware) support cache-inhibited large pages.
+ * If not, we use 4k and set mmu_ci_restrictions so that
+ * hash_page knows to switch processes that use cache-inhibited
+ * mappings to 4k pages.
*/
-#ifdef CONFIG_PPC_64K_PAGES
- if (mmu_psize_defs[MMU_PAGE_64K].shift &&
- cpu_has_feature(CPU_FTR_CI_LARGE_PAGE))
+ if (mmu_psize_defs[MMU_PAGE_64K].shift) {
mmu_virtual_psize = MMU_PAGE_64K;
+ mmu_vmalloc_psize = MMU_PAGE_64K;
+ if (cpu_has_feature(CPU_FTR_CI_LARGE_PAGE))
+ mmu_io_psize = MMU_PAGE_64K;
+ else
+ mmu_ci_restrictions = 1;
+ }
#endif
- printk(KERN_INFO "Page orders: linear mapping = %d, others = %d\n",
+ printk(KERN_DEBUG "Page orders: linear mapping = %d, "
+ "virtual = %d, io = %d\n",
mmu_psize_defs[mmu_linear_psize].shift,
- mmu_psize_defs[mmu_virtual_psize].shift);
+ mmu_psize_defs[mmu_virtual_psize].shift,
+ mmu_psize_defs[mmu_io_psize].shift);
#ifdef CONFIG_HUGETLB_PAGE
/* Init large page size. Currently, we pick 16M or 1M depending
@@ -556,6 +572,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
pte_t *ptep;
cpumask_t tmp;
int rc, user_region = 0, local = 0;
+ int psize;
DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n",
ea, access, trap);
@@ -575,10 +592,15 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
return 1;
}
vsid = get_vsid(mm->context.id, ea);
+ psize = mm->context.user_psize;
break;
case VMALLOC_REGION_ID:
mm = &init_mm;
vsid = get_kernel_vsid(ea);
+ if (ea < VMALLOC_END)
+ psize = mmu_vmalloc_psize;
+ else
+ psize = mmu_io_psize;
break;
default:
/* Not a valid range
@@ -629,7 +651,40 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
#ifndef CONFIG_PPC_64K_PAGES
rc = __hash_page_4K(ea, access, vsid, ptep, trap, local);
#else
- if (mmu_virtual_psize == MMU_PAGE_64K)
+ if (mmu_ci_restrictions) {
+ /* If this PTE is non-cacheable, switch to 4k */
+ if (psize == MMU_PAGE_64K &&
+ (pte_val(*ptep) & _PAGE_NO_CACHE)) {
+ if (user_region) {
+ psize = MMU_PAGE_4K;
+ mm->context.user_psize = MMU_PAGE_4K;
+ mm->context.sllp = SLB_VSID_USER |
+ mmu_psize_defs[MMU_PAGE_4K].sllp;
+ } else if (ea < VMALLOC_END) {
+ /*
+ * some driver did a non-cacheable mapping
+ * in vmalloc space, so switch vmalloc
+ * to 4k pages
+ */
+ printk(KERN_ALERT "Reducing vmalloc segment "
+ "to 4kB pages because of "
+ "non-cacheable mapping\n");
+ psize = mmu_vmalloc_psize = MMU_PAGE_4K;
+ }
+ }
+ if (user_region) {
+ if (psize != get_paca()->context.user_psize) {
+ get_paca()->context = mm->context;
+ slb_flush_and_rebolt();
+ }
+ } else if (get_paca()->vmalloc_sllp !=
+ mmu_psize_defs[mmu_vmalloc_psize].sllp) {
+ get_paca()->vmalloc_sllp =
+ mmu_psize_defs[mmu_vmalloc_psize].sllp;
+ slb_flush_and_rebolt();
+ }
+ }
+ if (psize == MMU_PAGE_64K)
rc = __hash_page_64K(ea, access, vsid, ptep, trap, local);
else
rc = __hash_page_4K(ea, access, vsid, ptep, trap, local);
@@ -681,7 +736,18 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
#ifndef CONFIG_PPC_64K_PAGES
__hash_page_4K(ea, access, vsid, ptep, trap, local);
#else
- if (mmu_virtual_psize == MMU_PAGE_64K)
+ if (mmu_ci_restrictions) {
+ /* If this PTE is non-cacheable, switch to 4k */
+ if (mm->context.user_psize == MMU_PAGE_64K &&
+ (pte_val(*ptep) & _PAGE_NO_CACHE)) {
+ mm->context.user_psize = MMU_PAGE_4K;
+ mm->context.sllp = SLB_VSID_USER |
+ mmu_psize_defs[MMU_PAGE_4K].sllp;
+ get_paca()->context = mm->context;
+ slb_flush_and_rebolt();
+ }
+ }
+ if (mm->context.user_psize == MMU_PAGE_64K)
__hash_page_64K(ea, access, vsid, ptep, trap, local);
else
__hash_page_4K(ea, access, vsid, ptep, trap, local);
diff --git a/arch/powerpc/mm/lmb.c b/arch/powerpc/mm/lmb.c
index 417d58518558..8b6f522655a6 100644
--- a/arch/powerpc/mm/lmb.c
+++ b/arch/powerpc/mm/lmb.c
@@ -89,20 +89,25 @@ static long __init lmb_regions_adjacent(struct lmb_region *rgn,
return lmb_addrs_adjacent(base1, size1, base2, size2);
}
-/* Assumption: base addr of region 1 < base addr of region 2 */
-static void __init lmb_coalesce_regions(struct lmb_region *rgn,
- unsigned long r1, unsigned long r2)
+static void __init lmb_remove_region(struct lmb_region *rgn, unsigned long r)
{
unsigned long i;
- rgn->region[r1].size += rgn->region[r2].size;
- for (i=r2; i < rgn->cnt-1; i++) {
- rgn->region[i].base = rgn->region[i+1].base;
- rgn->region[i].size = rgn->region[i+1].size;
+ for (i = r; i < rgn->cnt - 1; i++) {
+ rgn->region[i].base = rgn->region[i + 1].base;
+ rgn->region[i].size = rgn->region[i + 1].size;
}
rgn->cnt--;
}
+/* Assumption: base addr of region 1 < base addr of region 2 */
+static void __init lmb_coalesce_regions(struct lmb_region *rgn,
+ unsigned long r1, unsigned long r2)
+{
+ rgn->region[r1].size += rgn->region[r2].size;
+ lmb_remove_region(rgn, r2);
+}
+
/* This routine called with relocation disabled. */
void __init lmb_init(void)
{
@@ -294,17 +299,16 @@ unsigned long __init lmb_end_of_DRAM(void)
return (lmb.memory.region[idx].base + lmb.memory.region[idx].size);
}
-/*
- * Truncate the lmb list to memory_limit if it's set
- * You must call lmb_analyze() after this.
- */
+/* You must call lmb_analyze() after this. */
void __init lmb_enforce_memory_limit(unsigned long memory_limit)
{
unsigned long i, limit;
+ struct lmb_property *p;
if (! memory_limit)
return;
+ /* Truncate the lmb regions to satisfy the memory limit. */
limit = memory_limit;
for (i = 0; i < lmb.memory.cnt; i++) {
if (limit > lmb.memory.region[i].size) {
@@ -316,4 +320,21 @@ void __init lmb_enforce_memory_limit(unsigned long memory_limit)
lmb.memory.cnt = i + 1;
break;
}
+
+ lmb.rmo_size = lmb.memory.region[0].size;
+
+ /* And truncate any reserves above the limit also. */
+ for (i = 0; i < lmb.reserved.cnt; i++) {
+ p = &lmb.reserved.region[i];
+
+ if (p->base > memory_limit)
+ p->size = 0;
+ else if ((p->base + p->size) > memory_limit)
+ p->size = memory_limit - p->base;
+
+ if (p->size == 0) {
+ lmb_remove_region(&lmb.reserved, i);
+ i--;
+ }
+ }
}
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 741dd8802d49..69f3b9a20beb 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -299,9 +299,9 @@ void __init paging_init(void)
kmap_prot = PAGE_KERNEL;
#endif /* CONFIG_HIGHMEM */
- printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
+ printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
top_of_ram, total_ram);
- printk(KERN_INFO "Memory hole size: %ldMB\n",
+ printk(KERN_DEBUG "Memory hole size: %ldMB\n",
(top_of_ram - total_ram) >> 20);
/*
* All pages are DMA-able so we put them all in the DMA zone.
@@ -380,7 +380,7 @@ void __init mem_init(void)
totalhigh_pages++;
}
totalram_pages += totalhigh_pages;
- printk(KERN_INFO "High memory: %luk\n",
+ printk(KERN_DEBUG "High memory: %luk\n",
totalhigh_pages << (PAGE_SHIFT-10));
}
#endif /* CONFIG_HIGHMEM */
diff --git a/arch/powerpc/mm/mmu_context_32.c b/arch/powerpc/mm/mmu_context_32.c
index a8816e0f6a86..e326e4249e1a 100644
--- a/arch/powerpc/mm/mmu_context_32.c
+++ b/arch/powerpc/mm/mmu_context_32.c
@@ -30,7 +30,7 @@
#include <asm/mmu_context.h>
#include <asm/tlbflush.h>
-mm_context_t next_mmu_context;
+unsigned long next_mmu_context;
unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1];
#ifdef FEW_CONTEXTS
atomic_t nr_free_contexts;
diff --git a/arch/powerpc/mm/mmu_context_64.c b/arch/powerpc/mm/mmu_context_64.c
index 714a84dd8d5d..65d18dca266f 100644
--- a/arch/powerpc/mm/mmu_context_64.c
+++ b/arch/powerpc/mm/mmu_context_64.c
@@ -49,6 +49,9 @@ again:
}
mm->context.id = index;
+ mm->context.user_psize = mmu_virtual_psize;
+ mm->context.sllp = SLB_VSID_USER |
+ mmu_psize_defs[mmu_virtual_psize].sllp;
return 0;
}
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 092355f37399..aa98cb3b59d8 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -487,9 +487,9 @@ static void __init setup_nonnuma(void)
unsigned long total_ram = lmb_phys_mem_size();
unsigned int i;
- printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
+ printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
top_of_ram, total_ram);
- printk(KERN_INFO "Memory hole size: %ldMB\n",
+ printk(KERN_DEBUG "Memory hole size: %ldMB\n",
(top_of_ram - total_ram) >> 20);
for (i = 0; i < lmb.memory.cnt; ++i)
@@ -507,7 +507,7 @@ void __init dump_numa_cpu_topology(void)
return;
for_each_online_node(node) {
- printk(KERN_INFO "Node %d CPUs:", node);
+ printk(KERN_DEBUG "Node %d CPUs:", node);
count = 0;
/*
@@ -543,7 +543,7 @@ static void __init dump_numa_memory_topology(void)
for_each_online_node(node) {
unsigned long i;
- printk(KERN_INFO "Node %d Memory:", node);
+ printk(KERN_DEBUG "Node %d Memory:", node);
count = 0;
diff --git a/arch/powerpc/mm/ppc_mmu_32.c b/arch/powerpc/mm/ppc_mmu_32.c
index ed7fcfe5fd37..2ed43a493b31 100644
--- a/arch/powerpc/mm/ppc_mmu_32.c
+++ b/arch/powerpc/mm/ppc_mmu_32.c
@@ -42,18 +42,14 @@ unsigned long _SDR1;
union ubat { /* BAT register values to be loaded */
BAT bat;
-#ifdef CONFIG_PPC64BRIDGE
- u64 word[2];
-#else
u32 word[2];
-#endif
-} BATS[4][2]; /* 4 pairs of IBAT, DBAT */
+} BATS[8][2]; /* 8 pairs of IBAT, DBAT */
struct batrange { /* stores address ranges mapped by BATs */
unsigned long start;
unsigned long limit;
unsigned long phys;
-} bat_addrs[4];
+} bat_addrs[8];
/*
* Return PA for this VA if it is mapped by a BAT, or 0
@@ -190,7 +186,7 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
return;
pmd = pmd_offset(pgd_offset(mm, ea), ea);
if (!pmd_none(*pmd))
- add_hash_page(mm->context, ea, pmd_val(*pmd));
+ add_hash_page(mm->context.id, ea, pmd_val(*pmd));
}
/*
@@ -220,15 +216,9 @@ void __init MMU_init_hw(void)
if ( ppc_md.progress ) ppc_md.progress("hash:enter", 0x105);
-#ifdef CONFIG_PPC64BRIDGE
-#define LG_HPTEG_SIZE 7 /* 128 bytes per HPTEG */
-#define SDR1_LOW_BITS (lg_n_hpteg - 11)
-#define MIN_N_HPTEG 2048 /* min 256kB hash table */
-#else
#define LG_HPTEG_SIZE 6 /* 64 bytes per HPTEG */
#define SDR1_LOW_BITS ((n_hpteg - 1) >> 10)
#define MIN_N_HPTEG 1024 /* min 64kB hash table */
-#endif
/*
* Allow 1 HPTE (1/8 HPTEG) for each page of memory.
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index ffc8ed4de62d..6a8bf6c6000e 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -60,19 +60,19 @@ static inline void create_slbe(unsigned long ea, unsigned long flags,
: "memory" );
}
-static void slb_flush_and_rebolt(void)
+void slb_flush_and_rebolt(void)
{
/* If you change this make sure you change SLB_NUM_BOLTED
* appropriately too. */
- unsigned long linear_llp, virtual_llp, lflags, vflags;
+ unsigned long linear_llp, vmalloc_llp, lflags, vflags;
unsigned long ksp_esid_data;
WARN_ON(!irqs_disabled());
linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
- virtual_llp = mmu_psize_defs[mmu_virtual_psize].sllp;
+ vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp;
lflags = SLB_VSID_KERNEL | linear_llp;
- vflags = SLB_VSID_KERNEL | virtual_llp;
+ vflags = SLB_VSID_KERNEL | vmalloc_llp;
ksp_esid_data = mk_esid_data(get_paca()->kstack, 2);
if ((ksp_esid_data & ESID_MASK) == PAGE_OFFSET)
@@ -122,9 +122,6 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
get_paca()->slb_cache_ptr = 0;
get_paca()->context = mm->context;
-#ifdef CONFIG_PPC_64K_PAGES
- get_paca()->pgdir = mm->pgd;
-#endif /* CONFIG_PPC_64K_PAGES */
/*
* preload some userspace segments into the SLB.
@@ -167,11 +164,10 @@ static inline void patch_slb_encoding(unsigned int *insn_addr,
void slb_initialize(void)
{
- unsigned long linear_llp, virtual_llp;
+ unsigned long linear_llp, vmalloc_llp, io_llp;
static int slb_encoding_inited;
extern unsigned int *slb_miss_kernel_load_linear;
- extern unsigned int *slb_miss_kernel_load_virtual;
- extern unsigned int *slb_miss_user_load_normal;
+ extern unsigned int *slb_miss_kernel_load_io;
#ifdef CONFIG_HUGETLB_PAGE
extern unsigned int *slb_miss_user_load_huge;
unsigned long huge_llp;
@@ -181,18 +177,19 @@ void slb_initialize(void)
/* Prepare our SLB miss handler based on our page size */
linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
- virtual_llp = mmu_psize_defs[mmu_virtual_psize].sllp;
+ io_llp = mmu_psize_defs[mmu_io_psize].sllp;
+ vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp;
+ get_paca()->vmalloc_sllp = SLB_VSID_KERNEL | vmalloc_llp;
+
if (!slb_encoding_inited) {
slb_encoding_inited = 1;
patch_slb_encoding(slb_miss_kernel_load_linear,
SLB_VSID_KERNEL | linear_llp);
- patch_slb_encoding(slb_miss_kernel_load_virtual,
- SLB_VSID_KERNEL | virtual_llp);
- patch_slb_encoding(slb_miss_user_load_normal,
- SLB_VSID_USER | virtual_llp);
+ patch_slb_encoding(slb_miss_kernel_load_io,
+ SLB_VSID_KERNEL | io_llp);
DBG("SLB: linear LLP = %04x\n", linear_llp);
- DBG("SLB: virtual LLP = %04x\n", virtual_llp);
+ DBG("SLB: io LLP = %04x\n", io_llp);
#ifdef CONFIG_HUGETLB_PAGE
patch_slb_encoding(slb_miss_user_load_huge,
SLB_VSID_USER | huge_llp);
@@ -207,7 +204,7 @@ void slb_initialize(void)
unsigned long lflags, vflags;
lflags = SLB_VSID_KERNEL | linear_llp;
- vflags = SLB_VSID_KERNEL | virtual_llp;
+ vflags = SLB_VSID_KERNEL | vmalloc_llp;
/* Invalidate the entire SLB (even slot 0) & all the ERATS */
asm volatile("isync":::"memory");
@@ -215,7 +212,6 @@ void slb_initialize(void)
asm volatile("isync; slbia; isync":::"memory");
create_slbe(PAGE_OFFSET, lflags, 0);
- /* VMALLOC space has 4K pages always for now */
create_slbe(VMALLOC_START, vflags, 1);
/* We don't bolt the stack for the time being - we're in boot,
diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S
index abfaabf667bf..8548dcf8ef8b 100644
--- a/arch/powerpc/mm/slb_low.S
+++ b/arch/powerpc/mm/slb_low.S
@@ -59,10 +59,19 @@ _GLOBAL(slb_miss_kernel_load_linear)
li r11,0
b slb_finish_load
-1: /* vmalloc/ioremap mapping encoding bits, the "li" instruction below
+1: /* vmalloc/ioremap mapping encoding bits, the "li" instructions below
* will be patched by the kernel at boot
*/
-_GLOBAL(slb_miss_kernel_load_virtual)
+BEGIN_FTR_SECTION
+ /* check whether this is in vmalloc or ioremap space */
+ clrldi r11,r10,48
+ cmpldi r11,(VMALLOC_SIZE >> 28) - 1
+ bgt 5f
+ lhz r11,PACAVMALLOCSLLP(r13)
+ b slb_finish_load
+5:
+END_FTR_SECTION_IFCLR(CPU_FTR_CI_LARGE_PAGE)
+_GLOBAL(slb_miss_kernel_load_io)
li r11,0
b slb_finish_load
@@ -96,9 +105,7 @@ _GLOBAL(slb_miss_user_load_huge)
1:
#endif /* CONFIG_HUGETLB_PAGE */
-_GLOBAL(slb_miss_user_load_normal)
- li r11,0
-
+ lhz r11,PACACONTEXTSLLP(r13)
2:
ld r9,PACACONTEXTID(r13)
rldimi r10,r9,USER_ESID_BITS,0
diff --git a/arch/powerpc/mm/stab.c b/arch/powerpc/mm/stab.c
index 4a9291d9fef8..691320c90b78 100644
--- a/arch/powerpc/mm/stab.c
+++ b/arch/powerpc/mm/stab.c
@@ -200,10 +200,6 @@ void switch_stab(struct task_struct *tsk, struct mm_struct *mm)
__get_cpu_var(stab_cache_ptr) = 0;
-#ifdef CONFIG_PPC_64K_PAGES
- get_paca()->pgdir = mm->pgd;
-#endif /* CONFIG_PPC_64K_PAGES */
-
/* Now preload some entries for the new task */
if (test_tsk_thread_flag(tsk, TIF_32BIT))
unmapped_base = TASK_UNMAPPED_BASE_USER32;
diff --git a/arch/powerpc/mm/tlb_32.c b/arch/powerpc/mm/tlb_32.c
index ad580f3742e5..02eb23e036d5 100644
--- a/arch/powerpc/mm/tlb_32.c
+++ b/arch/powerpc/mm/tlb_32.c
@@ -42,7 +42,7 @@ void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, unsigned long addr)
if (Hash != 0) {
ptephys = __pa(ptep) & PAGE_MASK;
- flush_hash_pages(mm->context, addr, ptephys, 1);
+ flush_hash_pages(mm->context.id, addr, ptephys, 1);
}
}
@@ -102,7 +102,7 @@ static void flush_range(struct mm_struct *mm, unsigned long start,
pmd_t *pmd;
unsigned long pmd_end;
int count;
- unsigned int ctx = mm->context;
+ unsigned int ctx = mm->context.id;
if (Hash == 0) {
_tlbia();
@@ -172,7 +172,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
mm = (vmaddr < TASK_SIZE)? vma->vm_mm: &init_mm;
pmd = pmd_offset(pgd_offset(mm, vmaddr), vmaddr);
if (!pmd_none(*pmd))
- flush_hash_pages(mm->context, vmaddr, pmd_val(*pmd), 1);
+ flush_hash_pages(mm->context.id, vmaddr, pmd_val(*pmd), 1);
FINISH_FLUSH;
}
diff --git a/arch/powerpc/mm/tlb_64.c b/arch/powerpc/mm/tlb_64.c
index f734b11566c2..e7449b068c82 100644
--- a/arch/powerpc/mm/tlb_64.c
+++ b/arch/powerpc/mm/tlb_64.c
@@ -131,7 +131,7 @@ void hpte_update(struct mm_struct *mm, unsigned long addr,
{
struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
unsigned long vsid;
- unsigned int psize = mmu_virtual_psize;
+ unsigned int psize;
int i;
i = batch->index;
@@ -148,7 +148,8 @@ void hpte_update(struct mm_struct *mm, unsigned long addr,
#else
BUG();
#endif
- }
+ } else
+ psize = pte_pagesize_index(pte);
/*
* This can happen when we are in the middle of a TLB batch and
diff --git a/arch/powerpc/oprofile/Kconfig b/arch/powerpc/oprofile/Kconfig
index d03c0e5ca870..eb2dece76a54 100644
--- a/arch/powerpc/oprofile/Kconfig
+++ b/arch/powerpc/oprofile/Kconfig
@@ -1,5 +1,4 @@
config PROFILING
- depends on !PPC_ISERIES
bool "Profiling support (EXPERIMENTAL)"
help
Say Y here to enable the extended profiling support mechanisms used
diff --git a/arch/powerpc/oprofile/Makefile b/arch/powerpc/oprofile/Makefile
index f5f9859a8338..3145d610b5b0 100644
--- a/arch/powerpc/oprofile/Makefile
+++ b/arch/powerpc/oprofile/Makefile
@@ -1,3 +1,7 @@
+ifeq ($(CONFIG_PPC64),y)
+EXTRA_CFLAGS += -mno-minimal-toc
+endif
+
obj-$(CONFIG_OPROFILE) += oprofile.o
DRIVER_OBJS := $(addprefix ../../../drivers/oprofile/, \
diff --git a/arch/powerpc/oprofile/common.c b/arch/powerpc/oprofile/common.c
index 5b1de7e8041e..27ad56bd227e 100644
--- a/arch/powerpc/oprofile/common.c
+++ b/arch/powerpc/oprofile/common.c
@@ -22,6 +22,7 @@
#include <asm/pmc.h>
#include <asm/cputable.h>
#include <asm/oprofile_impl.h>
+#include <asm/firmware.h>
static struct op_powerpc_model *model;
@@ -130,6 +131,9 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
if (!cur_cpu_spec->oprofile_cpu_type)
return -ENODEV;
+ if (firmware_has_feature(FW_FEATURE_ISERIES))
+ return -ENODEV;
+
switch (cur_cpu_spec->oprofile_type) {
#ifdef CONFIG_PPC64
case PPC_OPROFILE_RS64:
@@ -162,7 +166,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
ops->stop = op_powerpc_stop;
ops->backtrace = op_powerpc_backtrace;
- printk(KERN_INFO "oprofile: using %s performance monitoring.\n",
+ printk(KERN_DEBUG "oprofile: using %s performance monitoring.\n",
ops->cpu_type);
return 0;
diff --git a/arch/powerpc/oprofile/op_model_power4.c b/arch/powerpc/oprofile/op_model_power4.c
index 4c2beab1fdc1..506f6b79f893 100644
--- a/arch/powerpc/oprofile/op_model_power4.c
+++ b/arch/powerpc/oprofile/op_model_power4.c
@@ -24,10 +24,6 @@
static unsigned long reset_value[OP_MAX_COUNTER];
static int oprofile_running;
-static int mmcra_has_sihv;
-/* Unfortunately these bits vary between CPUs */
-static unsigned long mmcra_sihv = MMCRA_SIHV;
-static unsigned long mmcra_sipr = MMCRA_SIPR;
/* mmcr values are set in power4_reg_setup, used in power4_cpu_setup */
static u32 mmcr0_val;
@@ -41,16 +37,6 @@ static void power4_reg_setup(struct op_counter_config *ctr,
int i;
/*
- * SIHV / SIPR bits are only implemented on POWER4+ (GQ) and above.
- * However we disable it on all POWER4 until we verify it works
- * (I was seeing some strange behaviour last time I tried).
- *
- * It has been verified to work on POWER5 so we enable it there.
- */
- if (cpu_has_feature(CPU_FTR_MMCRA_SIHV))
- mmcra_has_sihv = 1;
-
- /*
* The performance counter event settings are given in the mmcr0,
* mmcr1 and mmcra values passed from the user in the
* op_system_config structure (sys variable).
@@ -202,18 +188,19 @@ static unsigned long get_pc(struct pt_regs *regs)
unsigned long mmcra;
/* Cant do much about it */
- if (!mmcra_has_sihv)
+ if (!cur_cpu_spec->oprofile_mmcra_sihv)
return pc;
mmcra = mfspr(SPRN_MMCRA);
/* Were we in the hypervisor? */
- if (firmware_has_feature(FW_FEATURE_LPAR) && (mmcra & mmcra_sihv))
+ if (firmware_has_feature(FW_FEATURE_LPAR) &&
+ (mmcra & cur_cpu_spec->oprofile_mmcra_sihv))
/* function descriptor madness */
return *((unsigned long *)hypervisor_bucket);
/* We were in userspace, nothing to do */
- if (mmcra & mmcra_sipr)
+ if (mmcra & cur_cpu_spec->oprofile_mmcra_sipr)
return pc;
#ifdef CONFIG_PPC_RTAS
@@ -235,15 +222,14 @@ static unsigned long get_pc(struct pt_regs *regs)
return pc;
}
-static int get_kernel(unsigned long pc)
+static int get_kernel(unsigned long pc, unsigned long mmcra)
{
int is_kernel;
- if (!mmcra_has_sihv) {
+ if (!cur_cpu_spec->oprofile_mmcra_sihv) {
is_kernel = is_kernel_addr(pc);
} else {
- unsigned long mmcra = mfspr(SPRN_MMCRA);
- is_kernel = ((mmcra & mmcra_sipr) == 0);
+ is_kernel = ((mmcra & cur_cpu_spec->oprofile_mmcra_sipr) == 0);
}
return is_kernel;
@@ -257,9 +243,12 @@ static void power4_handle_interrupt(struct pt_regs *regs,
int val;
int i;
unsigned int mmcr0;
+ unsigned long mmcra;
+
+ mmcra = mfspr(SPRN_MMCRA);
pc = get_pc(regs);
- is_kernel = get_kernel(pc);
+ is_kernel = get_kernel(pc, mmcra);
/* set the PMM bit (see comment below) */
mtmsrd(mfmsr() | MSR_PMM);
@@ -287,6 +276,10 @@ static void power4_handle_interrupt(struct pt_regs *regs,
*/
mmcr0 &= ~MMCR0_PMAO;
+ /* Clear the appropriate bits in the MMCRA */
+ mmcra &= ~cur_cpu_spec->oprofile_mmcra_clear;
+ mtspr(SPRN_MMCRA, mmcra);
+
/*
* now clear the freeze bit, counting will not start until we
* rfid from this exception, because only at that point will
diff --git a/arch/powerpc/platforms/85xx/Kconfig b/arch/powerpc/platforms/85xx/Kconfig
index 06e371282f57..454fc53289ab 100644
--- a/arch/powerpc/platforms/85xx/Kconfig
+++ b/arch/powerpc/platforms/85xx/Kconfig
@@ -11,13 +11,20 @@ config MPC8540_ADS
help
This option enables support for the MPC 8540 ADS board
+config MPC85xx_CDS
+ bool "Freescale MPC85xx CDS"
+ select DEFAULT_UIMAGE
+ select PPC_I8259 if PCI
+ help
+ This option enables support for the MPC85xx CDS board
+
endchoice
config MPC8540
bool
select PPC_UDBG_16550
select PPC_INDIRECT_PCI
- default y if MPC8540_ADS
+ default y if MPC8540_ADS || MPC85xx_CDS
config PPC_INDIRECT_PCI_BE
bool
diff --git a/arch/powerpc/platforms/85xx/Makefile b/arch/powerpc/platforms/85xx/Makefile
index ffc4139cb214..7615aa59c78b 100644
--- a/arch/powerpc/platforms/85xx/Makefile
+++ b/arch/powerpc/platforms/85xx/Makefile
@@ -3,3 +3,4 @@
#
obj-$(CONFIG_PPC_85xx) += misc.o pci.o
obj-$(CONFIG_MPC8540_ADS) += mpc85xx_ads.o
+obj-$(CONFIG_MPC85xx_CDS) += mpc85xx_cds.o
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_cds.c b/arch/powerpc/platforms/85xx/mpc85xx_cds.c
new file mode 100644
index 000000000000..18e6e11f7020
--- /dev/null
+++ b/arch/powerpc/platforms/85xx/mpc85xx_cds.c
@@ -0,0 +1,359 @@
+/*
+ * MPC85xx setup and early boot code plus other random bits.
+ *
+ * Maintained by Kumar Gala (see MAINTAINERS for contact information)
+ *
+ * Copyright 2005 Freescale Semiconductor Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/config.h>
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/reboot.h>
+#include <linux/pci.h>
+#include <linux/kdev_t.h>
+#include <linux/major.h>
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/seq_file.h>
+#include <linux/root_dev.h>
+#include <linux/initrd.h>
+#include <linux/module.h>
+#include <linux/fsl_devices.h>
+
+#include <asm/system.h>
+#include <asm/pgtable.h>
+#include <asm/page.h>
+#include <asm/atomic.h>
+#include <asm/time.h>
+#include <asm/io.h>
+#include <asm/machdep.h>
+#include <asm/ipic.h>
+#include <asm/bootinfo.h>
+#include <asm/pci-bridge.h>
+#include <asm/mpc85xx.h>
+#include <asm/irq.h>
+#include <mm/mmu_decl.h>
+#include <asm/prom.h>
+#include <asm/udbg.h>
+#include <asm/mpic.h>
+#include <asm/i8259.h>
+
+#include <sysdev/fsl_soc.h>
+#include "mpc85xx.h"
+
+#ifndef CONFIG_PCI
+unsigned long isa_io_base = 0;
+unsigned long isa_mem_base = 0;
+#endif
+
+static int cds_pci_slot = 2;
+static volatile u8 *cadmus;
+
+/*
+ * Internal interrupts are all Level Sensitive, and Positive Polarity
+ *
+ * Note: Likely, this table and the following function should be
+ * obtained and derived from the OF Device Tree.
+ */
+static u_char mpc85xx_cds_openpic_initsenses[] __initdata = {
+ MPC85XX_INTERNAL_IRQ_SENSES,
+#if defined(CONFIG_PCI)
+ (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Ext 0: PCI slot 0 */
+ (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* Ext 1: PCI slot 1 */
+ (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* Ext 2: PCI slot 2 */
+ (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* Ext 3: PCI slot 3 */
+#else
+ 0x0, /* External 0: */
+ 0x0, /* External 1: */
+ 0x0, /* External 2: */
+ 0x0, /* External 3: */
+#endif
+ (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* External 5: PHY */
+ 0x0, /* External 6: */
+ 0x0, /* External 7: */
+ 0x0, /* External 8: */
+ 0x0, /* External 9: */
+ 0x0, /* External 10: */
+#ifdef CONFIG_PCI
+ (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* Ext 11: PCI2 slot 0 */
+#else
+ 0x0, /* External 11: */
+#endif
+};
+
+
+#ifdef CONFIG_PCI
+/*
+ * interrupt routing
+ */
+int
+mpc85xx_map_irq(struct pci_dev *dev, unsigned char idsel, unsigned char pin)
+{
+ struct pci_controller *hose = pci_bus_to_hose(dev->bus->number);
+
+ if (!hose->index)
+ {
+ /* Handle PCI1 interrupts */
+ char pci_irq_table[][4] =
+ /*
+ * PCI IDSEL/INTPIN->INTLINE
+ * A B C D
+ */
+
+ /* Note IRQ assignment for slots is based on which slot the elysium is
+ * in -- in this setup elysium is in slot #2 (this PIRQA as first
+ * interrupt on slot */
+ {
+ { 0, 1, 2, 3 }, /* 16 - PMC */
+ { 0, 1, 2, 3 }, /* 17 P2P (Tsi320) */
+ { 0, 1, 2, 3 }, /* 18 - Slot 1 */
+ { 1, 2, 3, 0 }, /* 19 - Slot 2 */
+ { 2, 3, 0, 1 }, /* 20 - Slot 3 */
+ { 3, 0, 1, 2 }, /* 21 - Slot 4 */
+ };
+
+ const long min_idsel = 16, max_idsel = 21, irqs_per_slot = 4;
+ int i, j;
+
+ for (i = 0; i < 6; i++)
+ for (j = 0; j < 4; j++)
+ pci_irq_table[i][j] =
+ ((pci_irq_table[i][j] + 5 -
+ cds_pci_slot) & 0x3) + PIRQ0A;
+
+ return PCI_IRQ_TABLE_LOOKUP;
+ } else {
+ /* Handle PCI2 interrupts (if we have one) */
+ char pci_irq_table[][4] =
+ {
+ /*
+ * We only have one slot and one interrupt
+ * going to PIRQA - PIRQD */
+ { PIRQ1A, PIRQ1A, PIRQ1A, PIRQ1A }, /* 21 - slot 0 */
+ };
+
+ const long min_idsel = 21, max_idsel = 21, irqs_per_slot = 4;
+
+ return PCI_IRQ_TABLE_LOOKUP;
+ }
+}
+
+#define ARCADIA_HOST_BRIDGE_IDSEL 17
+#define ARCADIA_2ND_BRIDGE_IDSEL 3
+
+extern int mpc85xx_pci2_busno;
+
+int
+mpc85xx_exclude_device(u_char bus, u_char devfn)
+{
+ if (bus == 0 && PCI_SLOT(devfn) == 0)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ if (mpc85xx_pci2_busno)
+ if (bus == (mpc85xx_pci2_busno) && PCI_SLOT(devfn) == 0)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ /* We explicitly do not go past the Tundra 320 Bridge */
+ if ((bus == 1) && (PCI_SLOT(devfn) == ARCADIA_2ND_BRIDGE_IDSEL))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ if ((bus == 0) && (PCI_SLOT(devfn) == ARCADIA_2ND_BRIDGE_IDSEL))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ else
+ return PCIBIOS_SUCCESSFUL;
+}
+
+void __init
+mpc85xx_cds_pcibios_fixup(void)
+{
+ struct pci_dev *dev;
+ u_char c;
+
+ if ((dev = pci_get_device(PCI_VENDOR_ID_VIA,
+ PCI_DEVICE_ID_VIA_82C586_1, NULL))) {
+ /*
+ * U-Boot does not set the enable bits
+ * for the IDE device. Force them on here.
+ */
+ pci_read_config_byte(dev, 0x40, &c);
+ c |= 0x03; /* IDE: Chip Enable Bits */
+ pci_write_config_byte(dev, 0x40, c);
+
+ /*
+ * Since only primary interface works, force the
+ * IDE function to standard primary IDE interrupt
+ * w/ 8259 offset
+ */
+ dev->irq = 14;
+ pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
+ pci_dev_put(dev);
+ }
+
+ /*
+ * Force legacy USB interrupt routing
+ */
+ if ((dev = pci_get_device(PCI_VENDOR_ID_VIA,
+ PCI_DEVICE_ID_VIA_82C586_2, NULL))) {
+ dev->irq = 10;
+ pci_write_config_byte(dev, PCI_INTERRUPT_LINE, 10);
+ pci_dev_put(dev);
+ }
+
+ if ((dev = pci_get_device(PCI_VENDOR_ID_VIA,
+ PCI_DEVICE_ID_VIA_82C586_2, dev))) {
+ dev->irq = 11;
+ pci_write_config_byte(dev, PCI_INTERRUPT_LINE, 11);
+ pci_dev_put(dev);
+ }
+}
+#endif /* CONFIG_PCI */
+
+void __init mpc85xx_cds_pic_init(void)
+{
+ struct mpic *mpic1;
+ phys_addr_t OpenPIC_PAddr;
+
+ /* Determine the Physical Address of the OpenPIC regs */
+ OpenPIC_PAddr = get_immrbase() + MPC85xx_OPENPIC_OFFSET;
+
+ mpic1 = mpic_alloc(OpenPIC_PAddr,
+ MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN,
+ 4, MPC85xx_OPENPIC_IRQ_OFFSET, 0, 250,
+ mpc85xx_cds_openpic_initsenses,
+ sizeof(mpc85xx_cds_openpic_initsenses), " OpenPIC ");
+ BUG_ON(mpic1 == NULL);
+ mpic_assign_isu(mpic1, 0, OpenPIC_PAddr + 0x10200);
+ mpic_assign_isu(mpic1, 1, OpenPIC_PAddr + 0x10280);
+ mpic_assign_isu(mpic1, 2, OpenPIC_PAddr + 0x10300);
+ mpic_assign_isu(mpic1, 3, OpenPIC_PAddr + 0x10380);
+ mpic_assign_isu(mpic1, 4, OpenPIC_PAddr + 0x10400);
+ mpic_assign_isu(mpic1, 5, OpenPIC_PAddr + 0x10480);
+ mpic_assign_isu(mpic1, 6, OpenPIC_PAddr + 0x10500);
+ mpic_assign_isu(mpic1, 7, OpenPIC_PAddr + 0x10580);
+
+ /* dummy mappings to get to 48 */
+ mpic_assign_isu(mpic1, 8, OpenPIC_PAddr + 0x10600);
+ mpic_assign_isu(mpic1, 9, OpenPIC_PAddr + 0x10680);
+ mpic_assign_isu(mpic1, 10, OpenPIC_PAddr + 0x10700);
+ mpic_assign_isu(mpic1, 11, OpenPIC_PAddr + 0x10780);
+
+ /* External ints */
+ mpic_assign_isu(mpic1, 12, OpenPIC_PAddr + 0x10000);
+ mpic_assign_isu(mpic1, 13, OpenPIC_PAddr + 0x10080);
+ mpic_assign_isu(mpic1, 14, OpenPIC_PAddr + 0x10100);
+
+ mpic_init(mpic1);
+
+#ifdef CONFIG_PCI
+ mpic_setup_cascade(PIRQ0A, i8259_irq_cascade, NULL);
+
+ i8259_init(0,0);
+#endif
+}
+
+
+/*
+ * Setup the architecture
+ */
+static void __init
+mpc85xx_cds_setup_arch(void)
+{
+ struct device_node *cpu;
+#ifdef CONFIG_PCI
+ struct device_node *np;
+#endif
+
+ if (ppc_md.progress)
+ ppc_md.progress("mpc85xx_cds_setup_arch()", 0);
+
+ cpu = of_find_node_by_type(NULL, "cpu");
+ if (cpu != 0) {
+ unsigned int *fp;
+
+ fp = (int *)get_property(cpu, "clock-frequency", NULL);
+ if (fp != 0)
+ loops_per_jiffy = *fp / HZ;
+ else
+ loops_per_jiffy = 500000000 / HZ;
+ of_node_put(cpu);
+ }
+
+ cadmus = ioremap(CADMUS_BASE, CADMUS_SIZE);
+ cds_pci_slot = ((cadmus[CM_CSR] >> 6) & 0x3) + 1;
+
+ if (ppc_md.progress) {
+ char buf[40];
+ snprintf(buf, 40, "CDS Version = 0x%x in slot %d\n",
+ cadmus[CM_VER], cds_pci_slot);
+ ppc_md.progress(buf, 0);
+ }
+
+#ifdef CONFIG_PCI
+ for (np = NULL; (np = of_find_node_by_type(np, "pci")) != NULL;)
+ add_bridge(np);
+
+ ppc_md.pcibios_fixup = mpc85xx_cds_pcibios_fixup;
+ ppc_md.pci_swizzle = common_swizzle;
+ ppc_md.pci_map_irq = mpc85xx_map_irq;
+ ppc_md.pci_exclude_device = mpc85xx_exclude_device;
+#endif
+
+#ifdef CONFIG_ROOT_NFS
+ ROOT_DEV = Root_NFS;
+#else
+ ROOT_DEV = Root_HDA1;
+#endif
+}
+
+
+void
+mpc85xx_cds_show_cpuinfo(struct seq_file *m)
+{
+ uint pvid, svid, phid1;
+ uint memsize = total_memory;
+
+ pvid = mfspr(SPRN_PVR);
+ svid = mfspr(SPRN_SVR);
+
+ seq_printf(m, "Vendor\t\t: Freescale Semiconductor\n");
+ seq_printf(m, "Machine\t\t: MPC85xx CDS (0x%x)\n", cadmus[CM_VER]);
+ seq_printf(m, "PVR\t\t: 0x%x\n", pvid);
+ seq_printf(m, "SVR\t\t: 0x%x\n", svid);
+
+ /* Display cpu Pll setting */
+ phid1 = mfspr(SPRN_HID1);
+ seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f));
+
+ /* Display the amount of memory */
+ seq_printf(m, "Memory\t\t: %d MB\n", memsize / (1024 * 1024));
+}
+
+
+/*
+ * Called very early, device-tree isn't unflattened
+ */
+static int __init mpc85xx_cds_probe(void)
+{
+ /* We always match for now, eventually we should look at
+ * the flat dev tree to ensure this is the board we are
+ * supposed to run on
+ */
+ return 1;
+}
+
+define_machine(mpc85xx_cds) {
+ .name = "MPC85xx CDS",
+ .probe = mpc85xx_cds_probe,
+ .setup_arch = mpc85xx_cds_setup_arch,
+ .init_IRQ = mpc85xx_cds_pic_init,
+ .show_cpuinfo = mpc85xx_cds_show_cpuinfo,
+ .get_irq = mpic_get_irq,
+ .restart = mpc85xx_restart,
+ .calibrate_decr = generic_calibrate_decr,
+ .progress = udbg_progress,
+};
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_cds.h b/arch/powerpc/platforms/85xx/mpc85xx_cds.h
new file mode 100644
index 000000000000..671f54ff185a
--- /dev/null
+++ b/arch/powerpc/platforms/85xx/mpc85xx_cds.h
@@ -0,0 +1,43 @@
+/*
+ * arch/ppc/platforms/85xx/mpc85xx_cds_common.h
+ *
+ * MPC85xx CDS board definitions
+ *
+ * Maintainer: Kumar Gala <galak@kernel.crashing.org>
+ *
+ * Copyright 2004 Freescale Semiconductor, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef __MACH_MPC85XX_CDS_H__
+#define __MACH_MPC85XX_CDS_H__
+
+/* CADMUS info */
+#define CADMUS_BASE (0xf8004000)
+#define CADMUS_SIZE (256)
+#define CM_VER (0)
+#define CM_CSR (1)
+#define CM_RST (2)
+
+/* CDS NVRAM/RTC */
+#define CDS_RTC_ADDR (0xf8000000)
+#define CDS_RTC_SIZE (8 * 1024)
+
+/* PCI interrupt controller */
+#define PIRQ0A MPC85xx_IRQ_EXT0
+#define PIRQ0B MPC85xx_IRQ_EXT1
+#define PIRQ0C MPC85xx_IRQ_EXT2
+#define PIRQ0D MPC85xx_IRQ_EXT3
+#define PIRQ1A MPC85xx_IRQ_EXT11
+
+#define NR_8259_INTS 16
+#define CPM_IRQ_OFFSET NR_8259_INTS
+
+#define MPC85xx_OPENPIC_IRQ_OFFSET 80
+
+#endif /* __MACH_MPC85XX_CDS_H__ */
diff --git a/arch/powerpc/platforms/86xx/Kconfig b/arch/powerpc/platforms/86xx/Kconfig
new file mode 100644
index 000000000000..3a87863d2876
--- /dev/null
+++ b/arch/powerpc/platforms/86xx/Kconfig
@@ -0,0 +1,36 @@
+menu "Platform Support"
+ depends on PPC_86xx
+
+choice
+ prompt "Machine Type"
+ default MPC8641_HPCN
+
+config MPC8641_HPCN
+ bool "Freescale MPC8641 HPCN"
+ help
+ This option enables support for the MPC8641 HPCN board.
+
+endchoice
+
+
+config MPC8641
+ bool
+ select PPC_INDIRECT_PCI
+ select PPC_UDBG_16550
+ default y if MPC8641_HPCN
+
+config MPIC
+ bool
+ default y
+
+config PPC_INDIRECT_PCI_BE
+ bool
+ depends on PPC_86xx
+ default y
+
+config PPC_STD_MMU
+ bool
+ depends on PPC_86xx
+ default y
+
+endmenu
diff --git a/arch/powerpc/platforms/86xx/Makefile b/arch/powerpc/platforms/86xx/Makefile
new file mode 100644
index 000000000000..7be796c5d5c9
--- /dev/null
+++ b/arch/powerpc/platforms/86xx/Makefile
@@ -0,0 +1,10 @@
+#
+# Makefile for the PowerPC 86xx linux kernel.
+#
+
+
+ifeq ($(CONFIG_PPC_86xx),y)
+obj-$(CONFIG_SMP) += mpc86xx_smp.o
+endif
+obj-$(CONFIG_MPC8641_HPCN) += mpc86xx_hpcn.o
+obj-$(CONFIG_PCI) += pci.o mpc86xx_pcie.o
diff --git a/arch/powerpc/platforms/86xx/mpc8641_hpcn.h b/arch/powerpc/platforms/86xx/mpc8641_hpcn.h
new file mode 100644
index 000000000000..5042253758b7
--- /dev/null
+++ b/arch/powerpc/platforms/86xx/mpc8641_hpcn.h
@@ -0,0 +1,54 @@
+/*
+ * MPC8641 HPCN board definitions
+ *
+ * Copyright 2006 Freescale Semiconductor Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * Author: Xianghua Xiao <x.xiao@freescale.com>
+ */
+
+#ifndef __MPC8641_HPCN_H__
+#define __MPC8641_HPCN_H__
+
+#include <linux/config.h>
+#include <linux/init.h>
+
+/* PCI interrupt controller */
+#define PIRQA 3
+#define PIRQB 4
+#define PIRQC 5
+#define PIRQD 6
+#define PIRQ7 7
+#define PIRQE 9
+#define PIRQF 10
+#define PIRQG 11
+#define PIRQH 12
+
+/* PCI-Express memory map */
+#define MPC86XX_PCIE_LOWER_IO 0x00000000
+#define MPC86XX_PCIE_UPPER_IO 0x00ffffff
+
+#define MPC86XX_PCIE_LOWER_MEM 0x80000000
+#define MPC86XX_PCIE_UPPER_MEM 0x9fffffff
+
+#define MPC86XX_PCIE_IO_BASE 0xe2000000
+#define MPC86XX_PCIE_MEM_OFFSET 0x00000000
+
+#define MPC86XX_PCIE_IO_SIZE 0x01000000
+
+#define PCIE1_CFG_ADDR_OFFSET (0x8000)
+#define PCIE1_CFG_DATA_OFFSET (0x8004)
+
+#define PCIE2_CFG_ADDR_OFFSET (0x9000)
+#define PCIE2_CFG_DATA_OFFSET (0x9004)
+
+#define MPC86xx_PCIE_OFFSET PCIE1_CFG_ADDR_OFFSET
+#define MPC86xx_PCIE_SIZE (0x1000)
+
+#define MPC86XX_RSTCR_OFFSET (0xe00b0) /* Reset Control Register */
+
+#endif /* __MPC8641_HPCN_H__ */
diff --git a/arch/powerpc/platforms/86xx/mpc86xx.h b/arch/powerpc/platforms/86xx/mpc86xx.h
new file mode 100644
index 000000000000..e3c9e4f417d3
--- /dev/null
+++ b/arch/powerpc/platforms/86xx/mpc86xx.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2006 Freescale Semiconductor Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __MPC86XX_H__
+#define __MPC86XX_H__
+
+/*
+ * Declaration for the various functions exported by the
+ * mpc86xx_* files. Mostly for use by mpc86xx_setup().
+ */
+
+extern int __init add_bridge(struct device_node *dev);
+
+extern void __init setup_indirect_pcie(struct pci_controller *hose,
+ u32 cfg_addr, u32 cfg_data);
+extern void __init setup_indirect_pcie_nomap(struct pci_controller *hose,
+ void __iomem *cfg_addr,
+ void __iomem *cfg_data);
+
+extern void __init mpc86xx_smp_init(void);
+
+#endif /* __MPC86XX_H__ */
diff --git a/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c b/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
new file mode 100644
index 000000000000..483c21df181e
--- /dev/null
+++ b/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
@@ -0,0 +1,326 @@
+/*
+ * MPC86xx HPCN board specific routines
+ *
+ * Recode: ZHANG WEI <wei.zhang@freescale.com>
+ * Initial author: Xianghua Xiao <x.xiao@freescale.com>
+ *
+ * Copyright 2006 Freescale Semiconductor Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/config.h>
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/kdev_t.h>
+#include <linux/delay.h>
+#include <linux/seq_file.h>
+#include <linux/root_dev.h>
+
+#include <asm/system.h>
+#include <asm/time.h>
+#include <asm/machdep.h>
+#include <asm/pci-bridge.h>
+#include <asm/mpc86xx.h>
+#include <asm/prom.h>
+#include <mm/mmu_decl.h>
+#include <asm/udbg.h>
+#include <asm/i8259.h>
+
+#include <asm/mpic.h>
+
+#include <sysdev/fsl_soc.h>
+
+#include "mpc86xx.h"
+
+#ifndef CONFIG_PCI
+unsigned long isa_io_base = 0;
+unsigned long isa_mem_base = 0;
+unsigned long pci_dram_offset = 0;
+#endif
+
+
+/*
+ * Internal interrupts are all Level Sensitive, and Positive Polarity
+ */
+
+static u_char mpc86xx_hpcn_openpic_initsenses[] __initdata = {
+ (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 0: Reserved */
+ (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 1: MCM */
+ (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 2: DDR DRAM */
+ (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 3: LBIU */
+ (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 4: DMA 0 */
+ (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 5: DMA 1 */
+ (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 6: DMA 2 */
+ (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 7: DMA 3 */
+ (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 8: PCIE1 */
+ (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 9: PCIE2 */
+ (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 10: Reserved */
+ (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 11: Reserved */
+ (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 12: DUART2 */
+ (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 13: TSEC 1 Transmit */
+ (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 14: TSEC 1 Receive */
+ (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 15: TSEC 3 transmit */
+ (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 16: TSEC 3 receive */
+ (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 17: TSEC 3 error */
+ (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 18: TSEC 1 Receive/Transmit Error */
+ (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 19: TSEC 2 Transmit */
+ (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 20: TSEC 2 Receive */
+ (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 21: TSEC 4 transmit */
+ (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 22: TSEC 4 receive */
+ (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 23: TSEC 4 error */
+ (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 24: TSEC 2 Receive/Transmit Error */
+ (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 25: Unused */
+ (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 26: DUART1 */
+ (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 27: I2C */
+ (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 28: Performance Monitor */
+ (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 29: Unused */
+ (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 30: Unused */
+ (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 31: Unused */
+ (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 32: SRIO error/write-port unit */
+ (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 33: SRIO outbound doorbell */
+ (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 34: SRIO inbound doorbell */
+ (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 35: Unused */
+ (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 36: Unused */
+ (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 37: SRIO outbound message unit 1 */
+ (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 38: SRIO inbound message unit 1 */
+ (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 39: SRIO outbound message unit 2 */
+ (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 40: SRIO inbound message unit 2 */
+ (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 41: Unused */
+ (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 42: Unused */
+ (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 43: Unused */
+ (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 44: Unused */
+ (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 45: Unused */
+ (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 46: Unused */
+ (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 47: Unused */
+ 0x0, /* External 0: */
+ 0x0, /* External 1: */
+ 0x0, /* External 2: */
+ 0x0, /* External 3: */
+ 0x0, /* External 4: */
+ 0x0, /* External 5: */
+ 0x0, /* External 6: */
+ 0x0, /* External 7: */
+ (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* External 8: Pixis FPGA */
+ (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* External 9: ULI 8259 INTR Cascade */
+ (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* External 10: Quad ETH PHY */
+ 0x0, /* External 11: */
+ 0x0,
+ 0x0,
+ 0x0,
+ 0x0,
+};
+
+
+void __init
+mpc86xx_hpcn_init_irq(void)
+{
+ struct mpic *mpic1;
+ phys_addr_t openpic_paddr;
+
+ /* Determine the Physical Address of the OpenPIC regs */
+ openpic_paddr = get_immrbase() + MPC86xx_OPENPIC_OFFSET;
+
+ /* Alloc mpic structure and per isu has 16 INT entries. */
+ mpic1 = mpic_alloc(openpic_paddr,
+ MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN,
+ 16, MPC86xx_OPENPIC_IRQ_OFFSET, 0, 250,
+ mpc86xx_hpcn_openpic_initsenses,
+ sizeof(mpc86xx_hpcn_openpic_initsenses),
+ " MPIC ");
+ BUG_ON(mpic1 == NULL);
+
+ /* 48 Internal Interrupts */
+ mpic_assign_isu(mpic1, 0, openpic_paddr + 0x10200);
+ mpic_assign_isu(mpic1, 1, openpic_paddr + 0x10400);
+ mpic_assign_isu(mpic1, 2, openpic_paddr + 0x10600);
+
+ /* 16 External interrupts */
+ mpic_assign_isu(mpic1, 3, openpic_paddr + 0x10000);
+
+ mpic_init(mpic1);
+
+#ifdef CONFIG_PCI
+ mpic_setup_cascade(MPC86xx_IRQ_EXT9, i8259_irq_cascade, NULL);
+ i8259_init(0, I8259_OFFSET);
+#endif
+}
+
+
+
+#ifdef CONFIG_PCI
+/*
+ * interrupt routing
+ */
+
+int
+mpc86xx_map_irq(struct pci_dev *dev, unsigned char idsel, unsigned char pin)
+{
+ static char pci_irq_table[][4] = {
+ /*
+ * PCI IDSEL/INTPIN->INTLINE
+ * A B C D
+ */
+ {PIRQA, PIRQB, PIRQC, PIRQD}, /* IDSEL 17 -- PCI Slot 1 */
+ {PIRQB, PIRQC, PIRQD, PIRQA}, /* IDSEL 18 -- PCI Slot 2 */
+ {0, 0, 0, 0}, /* IDSEL 19 */
+ {0, 0, 0, 0}, /* IDSEL 20 */
+ {0, 0, 0, 0}, /* IDSEL 21 */
+ {0, 0, 0, 0}, /* IDSEL 22 */
+ {0, 0, 0, 0}, /* IDSEL 23 */
+ {0, 0, 0, 0}, /* IDSEL 24 */
+ {0, 0, 0, 0}, /* IDSEL 25 */
+ {PIRQD, PIRQA, PIRQB, PIRQC}, /* IDSEL 26 -- PCI Bridge*/
+ {PIRQC, 0, 0, 0}, /* IDSEL 27 -- LAN */
+ {PIRQE, PIRQF, PIRQH, PIRQ7}, /* IDSEL 28 -- USB 1.1 */
+ {PIRQE, PIRQF, PIRQG, 0}, /* IDSEL 29 -- Audio & Modem */
+ {PIRQH, 0, 0, 0}, /* IDSEL 30 -- LPC & PMU*/
+ {PIRQD, 0, 0, 0}, /* IDSEL 31 -- ATA */
+ };
+
+ const long min_idsel = 17, max_idsel = 31, irqs_per_slot = 4;
+ return PCI_IRQ_TABLE_LOOKUP + I8259_OFFSET;
+}
+
+
+int
+mpc86xx_exclude_device(u_char bus, u_char devfn)
+{
+#if !defined(CONFIG_PCI)
+ if (bus == 0 && PCI_SLOT(devfn) == 0)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+#endif
+
+ return PCIBIOS_SUCCESSFUL;
+}
+#endif /* CONFIG_PCI */
+
+
+static void __init
+mpc86xx_hpcn_setup_arch(void)
+{
+ struct device_node *np;
+
+ if (ppc_md.progress)
+ ppc_md.progress("mpc86xx_hpcn_setup_arch()", 0);
+
+ np = of_find_node_by_type(NULL, "cpu");
+ if (np != 0) {
+ unsigned int *fp;
+
+ fp = (int *)get_property(np, "clock-frequency", NULL);
+ if (fp != 0)
+ loops_per_jiffy = *fp / HZ;
+ else
+ loops_per_jiffy = 50000000 / HZ;
+ of_node_put(np);
+ }
+
+#ifdef CONFIG_PCI
+ for (np = NULL; (np = of_find_node_by_type(np, "pci")) != NULL;)
+ add_bridge(np);
+
+ ppc_md.pci_swizzle = common_swizzle;
+ ppc_md.pci_map_irq = mpc86xx_map_irq;
+ ppc_md.pci_exclude_device = mpc86xx_exclude_device;
+#endif
+
+ printk("MPC86xx HPCN board from Freescale Semiconductor\n");
+
+#ifdef CONFIG_ROOT_NFS
+ ROOT_DEV = Root_NFS;
+#else
+ ROOT_DEV = Root_HDA1;
+#endif
+
+#ifdef CONFIG_SMP
+ mpc86xx_smp_init();
+#endif
+}
+
+
+void
+mpc86xx_hpcn_show_cpuinfo(struct seq_file *m)
+{
+ struct device_node *root;
+ uint memsize = total_memory;
+ const char *model = "";
+ uint svid = mfspr(SPRN_SVR);
+
+ seq_printf(m, "Vendor\t\t: Freescale Semiconductor\n");
+
+ root = of_find_node_by_path("/");
+ if (root)
+ model = get_property(root, "model", NULL);
+ seq_printf(m, "Machine\t\t: %s\n", model);
+ of_node_put(root);
+
+ seq_printf(m, "SVR\t\t: 0x%x\n", svid);
+ seq_printf(m, "Memory\t\t: %d MB\n", memsize / (1024 * 1024));
+}
+
+
+/*
+ * Called very early, device-tree isn't unflattened
+ */
+static int __init mpc86xx_hpcn_probe(void)
+{
+ unsigned long root = of_get_flat_dt_root();
+
+ if (of_flat_dt_is_compatible(root, "mpc86xx"))
+ return 1; /* Looks good */
+
+ return 0;
+}
+
+
+void
+mpc86xx_restart(char *cmd)
+{
+ void __iomem *rstcr;
+
+ rstcr = ioremap(get_immrbase() + MPC86XX_RSTCR_OFFSET, 0x100);
+
+ local_irq_disable();
+
+ /* Assert reset request to Reset Control Register */
+ out_be32(rstcr, 0x2);
+
+ /* not reached */
+}
+
+
+long __init
+mpc86xx_time_init(void)
+{
+ unsigned int temp;
+
+ /* Set the time base to zero */
+ mtspr(SPRN_TBWL, 0);
+ mtspr(SPRN_TBWU, 0);
+
+ temp = mfspr(SPRN_HID0);
+ temp |= HID0_TBEN;
+ mtspr(SPRN_HID0, temp);
+ asm volatile("isync");
+
+ return 0;
+}
+
+
+define_machine(mpc86xx_hpcn) {
+ .name = "MPC86xx HPCN",
+ .probe = mpc86xx_hpcn_probe,
+ .setup_arch = mpc86xx_hpcn_setup_arch,
+ .init_IRQ = mpc86xx_hpcn_init_irq,
+ .show_cpuinfo = mpc86xx_hpcn_show_cpuinfo,
+ .get_irq = mpic_get_irq,
+ .restart = mpc86xx_restart,
+ .time_init = mpc86xx_time_init,
+ .calibrate_decr = generic_calibrate_decr,
+ .progress = udbg_progress,
+};
diff --git a/arch/powerpc/platforms/86xx/mpc86xx_pcie.c b/arch/powerpc/platforms/86xx/mpc86xx_pcie.c
new file mode 100644
index 000000000000..a2f4f730213e
--- /dev/null
+++ b/arch/powerpc/platforms/86xx/mpc86xx_pcie.c
@@ -0,0 +1,173 @@
+/*
+ * Support for indirect PCI bridges.
+ *
+ * Copyright (C) 1998 Gabriel Paubert.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * "Temporary" MPC8548 Errata file -
+ * The standard indirect_pci code should work with future silicon versions.
+ */
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/bootmem.h>
+
+#include <asm/io.h>
+#include <asm/prom.h>
+#include <asm/pci-bridge.h>
+#include <asm/machdep.h>
+
+#include "mpc86xx.h"
+
+#define PCI_CFG_OUT out_be32
+
+/* ERRATA PCI-Ex 14 PCIE Controller timeout */
+#define PCIE_FIX out_be32(hose->cfg_addr+0x4, 0x0400ffff)
+
+
+static int
+indirect_read_config_pcie(struct pci_bus *bus, unsigned int devfn, int offset,
+ int len, u32 *val)
+{
+ struct pci_controller *hose = bus->sysdata;
+ volatile void __iomem *cfg_data;
+ u32 temp;
+
+ if (ppc_md.pci_exclude_device)
+ if (ppc_md.pci_exclude_device(bus->number, devfn))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ /* Possible artifact of CDCpp50937 needs further investigation */
+ if (devfn != 0x0 && bus->number == 0xff)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ PCIE_FIX;
+ if (bus->number == 0xff) {
+ PCI_CFG_OUT(hose->cfg_addr,
+ (0x80000000 | ((offset & 0xf00) << 16) |
+ ((bus->number - hose->bus_offset) << 16)
+ | (devfn << 8) | ((offset & 0xfc) )));
+ } else {
+ PCI_CFG_OUT(hose->cfg_addr,
+ (0x80000001 | ((offset & 0xf00) << 16) |
+ ((bus->number - hose->bus_offset) << 16)
+ | (devfn << 8) | ((offset & 0xfc) )));
+ }
+
+ /*
+ * Note: the caller has already checked that offset is
+ * suitably aligned and that len is 1, 2 or 4.
+ */
+ /* ERRATA PCI-Ex 12 - Configuration Address/Data Alignment */
+ cfg_data = hose->cfg_data;
+ PCIE_FIX;
+ temp = in_le32(cfg_data);
+ switch (len) {
+ case 1:
+ *val = (temp >> (((offset & 3))*8)) & 0xff;
+ break;
+ case 2:
+ *val = (temp >> (((offset & 3))*8)) & 0xffff;
+ break;
+ default:
+ *val = temp;
+ break;
+ }
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int
+indirect_write_config_pcie(struct pci_bus *bus, unsigned int devfn, int offset,
+ int len, u32 val)
+{
+ struct pci_controller *hose = bus->sysdata;
+ volatile void __iomem *cfg_data;
+ u32 temp;
+
+ if (ppc_md.pci_exclude_device)
+ if (ppc_md.pci_exclude_device(bus->number, devfn))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ /* Possible artifact of CDCpp50937 needs further investigation */
+ if (devfn != 0x0 && bus->number == 0xff)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ PCIE_FIX;
+ if (bus->number == 0xff) {
+ PCI_CFG_OUT(hose->cfg_addr,
+ (0x80000000 | ((offset & 0xf00) << 16) |
+ ((bus->number - hose->bus_offset) << 16)
+ | (devfn << 8) | ((offset & 0xfc) )));
+ } else {
+ PCI_CFG_OUT(hose->cfg_addr,
+ (0x80000001 | ((offset & 0xf00) << 16) |
+ ((bus->number - hose->bus_offset) << 16)
+ | (devfn << 8) | ((offset & 0xfc) )));
+ }
+
+ /*
+ * Note: the caller has already checked that offset is
+ * suitably aligned and that len is 1, 2 or 4.
+ */
+ /* ERRATA PCI-Ex 12 - Configuration Address/Data Alignment */
+ cfg_data = hose->cfg_data;
+ switch (len) {
+ case 1:
+ PCIE_FIX;
+ temp = in_le32(cfg_data);
+ temp = (temp & ~(0xff << ((offset & 3) * 8))) |
+ (val << ((offset & 3) * 8));
+ PCIE_FIX;
+ out_le32(cfg_data, temp);
+ break;
+ case 2:
+ PCIE_FIX;
+ temp = in_le32(cfg_data);
+ temp = (temp & ~(0xffff << ((offset & 3) * 8)));
+ temp |= (val << ((offset & 3) * 8)) ;
+ PCIE_FIX;
+ out_le32(cfg_data, temp);
+ break;
+ default:
+ PCIE_FIX;
+ out_le32(cfg_data, val);
+ break;
+ }
+ PCIE_FIX;
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static struct pci_ops indirect_pcie_ops = {
+ indirect_read_config_pcie,
+ indirect_write_config_pcie
+};
+
+void __init
+setup_indirect_pcie_nomap(struct pci_controller* hose, void __iomem * cfg_addr,
+ void __iomem * cfg_data)
+{
+ hose->cfg_addr = cfg_addr;
+ hose->cfg_data = cfg_data;
+ hose->ops = &indirect_pcie_ops;
+}
+
+void __init
+setup_indirect_pcie(struct pci_controller* hose, u32 cfg_addr, u32 cfg_data)
+{
+ unsigned long base = cfg_addr & PAGE_MASK;
+ void __iomem *mbase, *addr, *data;
+
+ mbase = ioremap(base, PAGE_SIZE);
+ addr = mbase + (cfg_addr & ~PAGE_MASK);
+ if ((cfg_data & PAGE_MASK) != base)
+ mbase = ioremap(cfg_data & PAGE_MASK, PAGE_SIZE);
+ data = mbase + (cfg_data & ~PAGE_MASK);
+ setup_indirect_pcie_nomap(hose, addr, data);
+}
diff --git a/arch/powerpc/platforms/86xx/mpc86xx_smp.c b/arch/powerpc/platforms/86xx/mpc86xx_smp.c
new file mode 100644
index 000000000000..944ec4b71416
--- /dev/null
+++ b/arch/powerpc/platforms/86xx/mpc86xx_smp.c
@@ -0,0 +1,117 @@
+/*
+ * Author: Xianghua Xiao <x.xiao@freescale.com>
+ * Zhang Wei <wei.zhang@freescale.com>
+ *
+ * Copyright 2006 Freescale Semiconductor Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/config.h>
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+
+#include <asm/pgtable.h>
+#include <asm/page.h>
+#include <asm/pci-bridge.h>
+#include <asm-powerpc/mpic.h>
+#include <asm/mpc86xx.h>
+#include <asm/cacheflush.h>
+
+#include <sysdev/fsl_soc.h>
+
+#include "mpc86xx.h"
+
+extern void __secondary_start_mpc86xx(void);
+extern unsigned long __secondary_hold_acknowledge;
+
+
+static void __init
+smp_86xx_release_core(int nr)
+{
+ void *mcm_vaddr;
+ unsigned long vaddr, pcr;
+
+ if (nr < 0 || nr >= NR_CPUS)
+ return;
+
+ /*
+ * Startup Core #nr.
+ */
+ mcm_vaddr = ioremap(get_immrbase() + MPC86xx_MCM_OFFSET,
+ MPC86xx_MCM_SIZE);
+ vaddr = (unsigned long)mcm_vaddr + MCM_PORT_CONFIG_OFFSET;
+ pcr = in_be32((volatile unsigned *)vaddr);
+ pcr |= 1 << (nr + 24);
+ out_be32((volatile unsigned *)vaddr, pcr);
+}
+
+
+static void __init
+smp_86xx_kick_cpu(int nr)
+{
+ unsigned int save_vector;
+ unsigned long target, flags;
+ int n = 0;
+ volatile unsigned int *vector
+ = (volatile unsigned int *)(KERNELBASE + 0x100);
+
+ if (nr < 0 || nr >= NR_CPUS)
+ return;
+
+ pr_debug("smp_86xx_kick_cpu: kick CPU #%d\n", nr);
+
+ local_irq_save(flags);
+ local_irq_disable();
+
+ /* Save reset vector */
+ save_vector = *vector;
+
+ /* Setup fake reset vector to call __secondary_start_mpc86xx. */
+ target = (unsigned long) __secondary_start_mpc86xx;
+ create_branch((unsigned long)vector, target, BRANCH_SET_LINK);
+
+ /* Kick that CPU */
+ smp_86xx_release_core(nr);
+
+ /* Wait a bit for the CPU to take the exception. */
+ while ((__secondary_hold_acknowledge != nr) && (n++, n < 1000))
+ mdelay(1);
+
+ /* Restore the exception vector */
+ *vector = save_vector;
+ flush_icache_range((unsigned long) vector, (unsigned long) vector + 4);
+
+ local_irq_restore(flags);
+
+ pr_debug("wait CPU #%d for %d msecs.\n", nr, n);
+}
+
+
+static void __init
+smp_86xx_setup_cpu(int cpu_nr)
+{
+ mpic_setup_this_cpu();
+}
+
+
+struct smp_ops_t smp_86xx_ops = {
+ .message_pass = smp_mpic_message_pass,
+ .probe = smp_mpic_probe,
+ .kick_cpu = smp_86xx_kick_cpu,
+ .setup_cpu = smp_86xx_setup_cpu,
+ .take_timebase = smp_generic_take_timebase,
+ .give_timebase = smp_generic_give_timebase,
+};
+
+
+void __init
+mpc86xx_smp_init(void)
+{
+ smp_ops = &smp_86xx_ops;
+}
diff --git a/arch/powerpc/platforms/86xx/pci.c b/arch/powerpc/platforms/86xx/pci.c
new file mode 100644
index 000000000000..5180df7c75bc
--- /dev/null
+++ b/arch/powerpc/platforms/86xx/pci.c
@@ -0,0 +1,325 @@
+/*
+ * MPC86XX pci setup code
+ *
+ * Recode: ZHANG WEI <wei.zhang@freescale.com>
+ * Initial author: Xianghua Xiao <x.xiao@freescale.com>
+ *
+ * Copyright 2006 Freescale Semiconductor Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/serial.h>
+
+#include <asm/system.h>
+#include <asm/atomic.h>
+#include <asm/io.h>
+#include <asm/prom.h>
+#include <asm/immap_86xx.h>
+#include <asm/pci-bridge.h>
+#include <sysdev/fsl_soc.h>
+
+#include "mpc86xx.h"
+
+#undef DEBUG
+
+#ifdef DEBUG
+#define DBG(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args)
+#else
+#define DBG(fmt, args...)
+#endif
+
+struct pcie_outbound_window_regs {
+ uint pexotar; /* 0x.0 - PCI Express outbound translation address register */
+ uint pexotear; /* 0x.4 - PCI Express outbound translation extended address register */
+ uint pexowbar; /* 0x.8 - PCI Express outbound window base address register */
+ char res1[4];
+ uint pexowar; /* 0x.10 - PCI Express outbound window attributes register */
+ char res2[12];
+};
+
+struct pcie_inbound_window_regs {
+ uint pexitar; /* 0x.0 - PCI Express inbound translation address register */
+ char res1[4];
+ uint pexiwbar; /* 0x.8 - PCI Express inbound window base address register */
+ uint pexiwbear; /* 0x.c - PCI Express inbound window base extended address register */
+ uint pexiwar; /* 0x.10 - PCI Express inbound window attributes register */
+ char res2[12];
+};
+
+static void __init setup_pcie_atmu(struct pci_controller *hose, struct resource *rsrc)
+{
+ volatile struct ccsr_pex *pcie;
+ volatile struct pcie_outbound_window_regs *pcieow;
+ volatile struct pcie_inbound_window_regs *pcieiw;
+ int i = 0;
+
+ DBG("PCIE memory map start 0x%x, size 0x%x\n", rsrc->start,
+ rsrc->end - rsrc->start + 1);
+ pcie = ioremap(rsrc->start, rsrc->end - rsrc->start + 1);
+
+ /* Disable all windows (except pexowar0 since its ignored) */
+ pcie->pexowar1 = 0;
+ pcie->pexowar2 = 0;
+ pcie->pexowar3 = 0;
+ pcie->pexowar4 = 0;
+ pcie->pexiwar1 = 0;
+ pcie->pexiwar2 = 0;
+ pcie->pexiwar3 = 0;
+
+ pcieow = (struct pcie_outbound_window_regs *)&pcie->pexotar1;
+ pcieiw = (struct pcie_inbound_window_regs *)&pcie->pexitar1;
+
+ /* Setup outbound MEM window */
+ for(i = 0; i < 3; i++)
+ if (hose->mem_resources[i].flags & IORESOURCE_MEM){
+ DBG("PCIE MEM resource start 0x%08x, size 0x%08x.\n",
+ hose->mem_resources[i].start,
+ hose->mem_resources[i].end
+ - hose->mem_resources[i].start + 1);
+ pcieow->pexotar = (hose->mem_resources[i].start) >> 12
+ & 0x000fffff;
+ pcieow->pexotear = 0;
+ pcieow->pexowbar = (hose->mem_resources[i].start) >> 12
+ & 0x000fffff;
+ /* Enable, Mem R/W */
+ pcieow->pexowar = 0x80044000 |
+ (__ilog2(hose->mem_resources[i].end
+ - hose->mem_resources[i].start + 1)
+ - 1);
+ pcieow++;
+ }
+
+ /* Setup outbound IO window */
+ if (hose->io_resource.flags & IORESOURCE_IO){
+ DBG("PCIE IO resource start 0x%08x, size 0x%08x, phy base 0x%08x.\n",
+ hose->io_resource.start,
+ hose->io_resource.end - hose->io_resource.start + 1,
+ hose->io_base_phys);
+ pcieow->pexotar = (hose->io_resource.start) >> 12 & 0x000fffff;
+ pcieow->pexotear = 0;
+ pcieow->pexowbar = (hose->io_base_phys) >> 12 & 0x000fffff;
+ /* Enable, IO R/W */
+ pcieow->pexowar = 0x80088000 | (__ilog2(hose->io_resource.end
+ - hose->io_resource.start + 1) - 1);
+ }
+
+ /* Setup 2G inbound Memory Window @ 0 */
+ pcieiw->pexitar = 0x00000000;
+ pcieiw->pexiwbar = 0x00000000;
+ /* Enable, Prefetch, Local Mem, Snoop R/W, 2G */
+ pcieiw->pexiwar = 0xa0f5501e;
+}
+
+static void __init
+mpc86xx_setup_pcie(struct pci_controller *hose, u32 pcie_offset, u32 pcie_size)
+{
+ volatile struct ccsr_pex *pcie;
+ u16 cmd;
+ unsigned int temps;
+
+ DBG("PCIE host controller register offset 0x%08x, size 0x%08x.\n",
+ pcie_offset, pcie_size);
+
+ pcie = ioremap(pcie_offset, pcie_size);
+
+ early_read_config_word(hose, 0, 0, PCI_COMMAND, &cmd);
+ cmd |= PCI_COMMAND_SERR | PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY
+ | PCI_COMMAND_IO;
+ early_write_config_word(hose, 0, 0, PCI_COMMAND, cmd);
+
+ early_write_config_byte(hose, 0, 0, PCI_LATENCY_TIMER, 0x80);
+
+ /* PCIE Bus, Fix the MPC8641D host bridge's location to bus 0xFF. */
+ early_read_config_dword(hose, 0, 0, PCI_PRIMARY_BUS, &temps);
+ temps = (temps & 0xff000000) | (0xff) | (0x0 << 8) | (0xfe << 16);
+ early_write_config_dword(hose, 0, 0, PCI_PRIMARY_BUS, temps);
+}
+
+int __init add_bridge(struct device_node *dev)
+{
+ int len;
+ struct pci_controller *hose;
+ struct resource rsrc;
+ int *bus_range;
+ int has_address = 0;
+ int primary = 0;
+
+ DBG("Adding PCIE host bridge %s\n", dev->full_name);
+
+ /* Fetch host bridge registers address */
+ has_address = (of_address_to_resource(dev, 0, &rsrc) == 0);
+
+ /* Get bus range if any */
+ bus_range = (int *) get_property(dev, "bus-range", &len);
+ if (bus_range == NULL || len < 2 * sizeof(int))
+ printk(KERN_WARNING "Can't get bus-range for %s, assume"
+ " bus 0\n", dev->full_name);
+
+ hose = pcibios_alloc_controller();
+ if (!hose)
+ return -ENOMEM;
+ hose->arch_data = dev;
+ hose->set_cfg_type = 1;
+
+ /* last_busno = 0xfe cause by MPC8641 PCIE bug */
+ hose->first_busno = bus_range ? bus_range[0] : 0x0;
+ hose->last_busno = bus_range ? bus_range[1] : 0xfe;
+
+ setup_indirect_pcie(hose, rsrc.start, rsrc.start + 0x4);
+
+ /* Setup the PCIE host controller. */
+ mpc86xx_setup_pcie(hose, rsrc.start, rsrc.end - rsrc.start + 1);
+
+ if ((rsrc.start & 0xfffff) == 0x8000)
+ primary = 1;
+
+ printk(KERN_INFO "Found MPC86xx PCIE host bridge at 0x%08lx. "
+ "Firmware bus number: %d->%d\n",
+ rsrc.start, hose->first_busno, hose->last_busno);
+
+ DBG(" ->Hose at 0x%p, cfg_addr=0x%p,cfg_data=0x%p\n",
+ hose, hose->cfg_addr, hose->cfg_data);
+
+ /* Interpret the "ranges" property */
+ /* This also maps the I/O region and sets isa_io/mem_base */
+ pci_process_bridge_OF_ranges(hose, dev, primary);
+
+ /* Setup PEX window registers */
+ setup_pcie_atmu(hose, &rsrc);
+
+ return 0;
+}
+
+static void __devinit quirk_ali1575(struct pci_dev *dev)
+{
+ unsigned short temp;
+
+ /*
+ * ALI1575 interrupts route table setup:
+ *
+ * IRQ pin IRQ#
+ * PIRQA ---- 3
+ * PIRQB ---- 4
+ * PIRQC ---- 5
+ * PIRQD ---- 6
+ * PIRQE ---- 9
+ * PIRQF ---- 10
+ * PIRQG ---- 11
+ * PIRQH ---- 12
+ *
+ * interrupts for PCI slot0 -- PIRQA / PIRQB / PIRQC / PIRQD
+ * PCI slot1 -- PIRQB / PIRQC / PIRQD / PIRQA
+ */
+ pci_write_config_dword(dev, 0x48, 0xb9317542);
+
+ /* USB 1.1 OHCI controller 1, interrupt: PIRQE */
+ pci_write_config_byte(dev, 0x86, 0x0c);
+
+ /* USB 1.1 OHCI controller 2, interrupt: PIRQF */
+ pci_write_config_byte(dev, 0x87, 0x0d);
+
+ /* USB 1.1 OHCI controller 3, interrupt: PIRQH */
+ pci_write_config_byte(dev, 0x88, 0x0f);
+
+ /* USB 2.0 controller, interrupt: PIRQ7 */
+ pci_write_config_byte(dev, 0x74, 0x06);
+
+ /* Audio controller, interrupt: PIRQE */
+ pci_write_config_byte(dev, 0x8a, 0x0c);
+
+ /* Modem controller, interrupt: PIRQF */
+ pci_write_config_byte(dev, 0x8b, 0x0d);
+
+ /* HD audio controller, interrupt: PIRQG */
+ pci_write_config_byte(dev, 0x8c, 0x0e);
+
+ /* Serial ATA interrupt: PIRQD */
+ pci_write_config_byte(dev, 0x8d, 0x0b);
+
+ /* SMB interrupt: PIRQH */
+ pci_write_config_byte(dev, 0x8e, 0x0f);
+
+ /* PMU ACPI SCI interrupt: PIRQH */
+ pci_write_config_byte(dev, 0x8f, 0x0f);
+
+ /* Primary PATA IDE IRQ: 14
+ * Secondary PATA IDE IRQ: 15
+ */
+ pci_write_config_byte(dev, 0x44, 0x3d);
+ pci_write_config_byte(dev, 0x75, 0x0f);
+
+ /* Set IRQ14 and IRQ15 to legacy IRQs */
+ pci_read_config_word(dev, 0x46, &temp);
+ temp |= 0xc000;
+ pci_write_config_word(dev, 0x46, temp);
+
+ /* Set i8259 interrupt trigger
+ * IRQ 3: Level
+ * IRQ 4: Level
+ * IRQ 5: Level
+ * IRQ 6: Level
+ * IRQ 7: Level
+ * IRQ 9: Level
+ * IRQ 10: Level
+ * IRQ 11: Level
+ * IRQ 12: Level
+ * IRQ 14: Edge
+ * IRQ 15: Edge
+ */
+ outb(0xfa, 0x4d0);
+ outb(0x1e, 0x4d1);
+}
+
+static void __devinit quirk_uli5288(struct pci_dev *dev)
+{
+ unsigned char c;
+
+ pci_read_config_byte(dev,0x83,&c);
+ c |= 0x80;
+ pci_write_config_byte(dev, 0x83, c);
+
+ pci_write_config_byte(dev, 0x09, 0x01);
+ pci_write_config_byte(dev, 0x0a, 0x06);
+
+ pci_read_config_byte(dev,0x83,&c);
+ c &= 0x7f;
+ pci_write_config_byte(dev, 0x83, c);
+
+ pci_read_config_byte(dev,0x84,&c);
+ c |= 0x01;
+ pci_write_config_byte(dev, 0x84, c);
+}
+
+static void __devinit quirk_uli5229(struct pci_dev *dev)
+{
+ unsigned short temp;
+ pci_write_config_word(dev, 0x04, 0x0405);
+ pci_read_config_word(dev, 0x4a, &temp);
+ temp |= 0x1000;
+ pci_write_config_word(dev, 0x4a, temp);
+}
+
+static void __devinit early_uli5249(struct pci_dev *dev)
+{
+ unsigned char temp;
+ pci_write_config_word(dev, 0x04, 0x0007);
+ pci_read_config_byte(dev, 0x7c, &temp);
+ pci_write_config_byte(dev, 0x7c, 0x80);
+ pci_write_config_byte(dev, 0x09, 0x01);
+ pci_write_config_byte(dev, 0x7c, temp);
+ dev->class |= 0x1;
+}
+
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AL, 0x1575, quirk_ali1575);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AL, 0x5288, quirk_uli5288);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AL, 0x5229, quirk_uli5229);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AL, 0x5249, early_uli5249);
diff --git a/arch/powerpc/platforms/Makefile b/arch/powerpc/platforms/Makefile
index c4f6b0d2d140..292863694562 100644
--- a/arch/powerpc/platforms/Makefile
+++ b/arch/powerpc/platforms/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_PPC_CHRP) += chrp/
obj-$(CONFIG_4xx) += 4xx/
obj-$(CONFIG_PPC_83xx) += 83xx/
obj-$(CONFIG_PPC_85xx) += 85xx/
+obj-$(CONFIG_PPC_86xx) += 86xx/
obj-$(CONFIG_PPC_PSERIES) += pseries/
obj-$(CONFIG_PPC_ISERIES) += iseries/
obj-$(CONFIG_PPC_MAPLE) += maple/
diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig
index 6a02d51086c8..352bbbacde9a 100644
--- a/arch/powerpc/platforms/cell/Kconfig
+++ b/arch/powerpc/platforms/cell/Kconfig
@@ -5,15 +5,24 @@ config SPU_FS
tristate "SPU file system"
default m
depends on PPC_CELL
+ select SPU_BASE
help
The SPU file system is used to access Synergistic Processing
Units on machines implementing the Broadband Processor
Architecture.
+config SPU_BASE
+ bool
+ default n
+
config SPUFS_MMAP
bool
depends on SPU_FS && SPARSEMEM
select MEMORY_HOTPLUG
default y
+config CBE_RAS
+ bool "RAS features for bare metal Cell BE"
+ default y
+
endmenu
diff --git a/arch/powerpc/platforms/cell/Makefile b/arch/powerpc/platforms/cell/Makefile
index e570bad06394..c89cdd67383b 100644
--- a/arch/powerpc/platforms/cell/Makefile
+++ b/arch/powerpc/platforms/cell/Makefile
@@ -1,16 +1,15 @@
-obj-y += interrupt.o iommu.o setup.o spider-pic.o
-obj-y += pervasive.o
+obj-$(CONFIG_PPC_CELL_NATIVE) += interrupt.o iommu.o setup.o \
+ cbe_regs.o spider-pic.o pervasive.o
+obj-$(CONFIG_CBE_RAS) += ras.o
-obj-$(CONFIG_SMP) += smp.o
-obj-$(CONFIG_SPU_FS) += spu-base.o spufs/
-
-spu-base-y += spu_base.o spu_priv1.o
+ifeq ($(CONFIG_SMP),y)
+obj-$(CONFIG_PPC_CELL_NATIVE) += smp.o
+endif
# needed only when building loadable spufs.ko
-spufs-modular-$(CONFIG_SPU_FS) += spu_syscalls.o
-obj-y += $(spufs-modular-m)
-
-# always needed in kernel
-spufs-builtin-$(CONFIG_SPU_FS) += spu_callbacks.o
-obj-y += $(spufs-builtin-y) $(spufs-builtin-m)
+spufs-modular-$(CONFIG_SPU_FS) += spu_syscalls.o
+spu-priv1-$(CONFIG_PPC_CELL_NATIVE) += spu_priv1_mmio.o
+obj-$(CONFIG_SPU_BASE) += spu_callbacks.o spu_base.o \
+ $(spufs-modular-m) \
+ $(spu-priv1-y) spufs/
diff --git a/arch/powerpc/platforms/cell/cbe_regs.c b/arch/powerpc/platforms/cell/cbe_regs.c
new file mode 100644
index 000000000000..2dfde61c8412
--- /dev/null
+++ b/arch/powerpc/platforms/cell/cbe_regs.c
@@ -0,0 +1,128 @@
+/*
+ * cbe_regs.c
+ *
+ * Accessor routines for the various MMIO register blocks of the CBE
+ *
+ * (c) 2006 Benjamin Herrenschmidt <benh@kernel.crashing.org>, IBM Corp.
+ */
+
+
+#include <linux/config.h>
+#include <linux/percpu.h>
+#include <linux/types.h>
+
+#include <asm/io.h>
+#include <asm/pgtable.h>
+#include <asm/prom.h>
+#include <asm/ptrace.h>
+
+#include "cbe_regs.h"
+
+#define MAX_CBE 2
+
+/*
+ * Current implementation uses "cpu" nodes. We build our own mapping
+ * array of cpu numbers to cpu nodes locally for now to allow interrupt
+ * time code to have a fast path rather than call of_get_cpu_node(). If
+ * we implement cpu hotplug, we'll have to install an appropriate norifier
+ * in order to release references to the cpu going away
+ */
+static struct cbe_regs_map
+{
+ struct device_node *cpu_node;
+ struct cbe_pmd_regs __iomem *pmd_regs;
+ struct cbe_iic_regs __iomem *iic_regs;
+} cbe_regs_maps[MAX_CBE];
+static int cbe_regs_map_count;
+
+static struct cbe_thread_map
+{
+ struct device_node *cpu_node;
+ struct cbe_regs_map *regs;
+} cbe_thread_map[NR_CPUS];
+
+static struct cbe_regs_map *cbe_find_map(struct device_node *np)
+{
+ int i;
+
+ for (i = 0; i < cbe_regs_map_count; i++)
+ if (cbe_regs_maps[i].cpu_node == np)
+ return &cbe_regs_maps[i];
+ return NULL;
+}
+
+struct cbe_pmd_regs __iomem *cbe_get_pmd_regs(struct device_node *np)
+{
+ struct cbe_regs_map *map = cbe_find_map(np);
+ if (map == NULL)
+ return NULL;
+ return map->pmd_regs;
+}
+
+struct cbe_pmd_regs __iomem *cbe_get_cpu_pmd_regs(int cpu)
+{
+ struct cbe_regs_map *map = cbe_thread_map[cpu].regs;
+ if (map == NULL)
+ return NULL;
+ return map->pmd_regs;
+}
+
+
+struct cbe_iic_regs __iomem *cbe_get_iic_regs(struct device_node *np)
+{
+ struct cbe_regs_map *map = cbe_find_map(np);
+ if (map == NULL)
+ return NULL;
+ return map->iic_regs;
+}
+struct cbe_iic_regs __iomem *cbe_get_cpu_iic_regs(int cpu)
+{
+ struct cbe_regs_map *map = cbe_thread_map[cpu].regs;
+ if (map == NULL)
+ return NULL;
+ return map->iic_regs;
+}
+
+void __init cbe_regs_init(void)
+{
+ int i;
+ struct device_node *cpu;
+
+ /* Build local fast map of CPUs */
+ for_each_cpu(i)
+ cbe_thread_map[i].cpu_node = of_get_cpu_node(i, NULL);
+
+ /* Find maps for each device tree CPU */
+ for_each_node_by_type(cpu, "cpu") {
+ struct cbe_regs_map *map = &cbe_regs_maps[cbe_regs_map_count++];
+
+ /* That hack must die die die ! */
+ struct address_prop {
+ unsigned long address;
+ unsigned int len;
+ } __attribute__((packed)) *prop;
+
+
+ if (cbe_regs_map_count > MAX_CBE) {
+ printk(KERN_ERR "cbe_regs: More BE chips than supported"
+ "!\n");
+ cbe_regs_map_count--;
+ return;
+ }
+ map->cpu_node = cpu;
+ for_each_cpu(i)
+ if (cbe_thread_map[i].cpu_node == cpu)
+ cbe_thread_map[i].regs = map;
+
+ prop = (struct address_prop *)get_property(cpu, "pervasive",
+ NULL);
+ if (prop != NULL)
+ map->pmd_regs = ioremap(prop->address, prop->len);
+
+ prop = (struct address_prop *)get_property(cpu, "iic",
+ NULL);
+ if (prop != NULL)
+ map->iic_regs = ioremap(prop->address, prop->len);
+ }
+}
+
diff --git a/arch/powerpc/platforms/cell/cbe_regs.h b/arch/powerpc/platforms/cell/cbe_regs.h
new file mode 100644
index 000000000000..e76e4a6af5bc
--- /dev/null
+++ b/arch/powerpc/platforms/cell/cbe_regs.h
@@ -0,0 +1,129 @@
+/*
+ * cbe_regs.h
+ *
+ * This file is intended to hold the various register definitions for CBE
+ * on-chip system devices (memory controller, IO controller, etc...)
+ *
+ * (c) 2006 Benjamin Herrenschmidt <benh@kernel.crashing.org>, IBM Corp.
+ */
+
+#ifndef CBE_REGS_H
+#define CBE_REGS_H
+
+/*
+ *
+ * Some HID register definitions
+ *
+ */
+
+/* CBE specific HID0 bits */
+#define HID0_CBE_THERM_WAKEUP 0x0000020000000000ul
+#define HID0_CBE_SYSERR_WAKEUP 0x0000008000000000ul
+#define HID0_CBE_THERM_INT_EN 0x0000000400000000ul
+#define HID0_CBE_SYSERR_INT_EN 0x0000000200000000ul
+
+
+/*
+ *
+ * Pervasive unit register definitions
+ *
+ */
+
+struct cbe_pmd_regs {
+ u8 pad_0x0000_0x0800[0x0800 - 0x0000]; /* 0x0000 */
+
+ /* Thermal Sensor Registers */
+ u64 ts_ctsr1; /* 0x0800 */
+ u64 ts_ctsr2; /* 0x0808 */
+ u64 ts_mtsr1; /* 0x0810 */
+ u64 ts_mtsr2; /* 0x0818 */
+ u64 ts_itr1; /* 0x0820 */
+ u64 ts_itr2; /* 0x0828 */
+ u64 ts_gitr; /* 0x0830 */
+ u64 ts_isr; /* 0x0838 */
+ u64 ts_imr; /* 0x0840 */
+ u64 tm_cr1; /* 0x0848 */
+ u64 tm_cr2; /* 0x0850 */
+ u64 tm_simr; /* 0x0858 */
+ u64 tm_tpr; /* 0x0860 */
+ u64 tm_str1; /* 0x0868 */
+ u64 tm_str2; /* 0x0870 */
+ u64 tm_tsr; /* 0x0878 */
+
+ /* Power Management */
+ u64 pm_control; /* 0x0880 */
+#define CBE_PMD_PAUSE_ZERO_CONTROL 0x10000
+ u64 pm_status; /* 0x0888 */
+
+ /* Time Base Register */
+ u64 tbr; /* 0x0890 */
+
+ u8 pad_0x0898_0x0c00 [0x0c00 - 0x0898]; /* 0x0898 */
+
+ /* Fault Isolation Registers */
+ u64 checkstop_fir; /* 0x0c00 */
+ u64 recoverable_fir;
+ u64 spec_att_mchk_fir;
+ u64 fir_mode_reg;
+ u64 fir_enable_mask;
+
+ u8 pad_0x0c28_0x1000 [0x1000 - 0x0c28]; /* 0x0c28 */
+};
+
+extern struct cbe_pmd_regs __iomem *cbe_get_pmd_regs(struct device_node *np);
+extern struct cbe_pmd_regs __iomem *cbe_get_cpu_pmd_regs(int cpu);
+
+/*
+ *
+ * IIC unit register definitions
+ *
+ */
+
+struct cbe_iic_pending_bits {
+ u32 data;
+ u8 flags;
+ u8 class;
+ u8 source;
+ u8 prio;
+};
+
+#define CBE_IIC_IRQ_VALID 0x80
+#define CBE_IIC_IRQ_IPI 0x40
+
+struct cbe_iic_thread_regs {
+ struct cbe_iic_pending_bits pending;
+ struct cbe_iic_pending_bits pending_destr;
+ u64 generate;
+ u64 prio;
+};
+
+struct cbe_iic_regs {
+ u8 pad_0x0000_0x0400[0x0400 - 0x0000]; /* 0x0000 */
+
+ /* IIC interrupt registers */
+ struct cbe_iic_thread_regs thread[2]; /* 0x0400 */
+ u64 iic_ir; /* 0x0440 */
+ u64 iic_is; /* 0x0448 */
+
+ u8 pad_0x0450_0x0500[0x0500 - 0x0450]; /* 0x0450 */
+
+ /* IOC FIR */
+ u64 ioc_fir_reset; /* 0x0500 */
+ u64 ioc_fir_set;
+ u64 ioc_checkstop_enable;
+ u64 ioc_fir_error_mask;
+ u64 ioc_syserr_enable;
+ u64 ioc_fir;
+
+ u8 pad_0x0530_0x1000[0x1000 - 0x0530]; /* 0x0530 */
+};
+
+extern struct cbe_iic_regs __iomem *cbe_get_iic_regs(struct device_node *np);
+extern struct cbe_iic_regs __iomem *cbe_get_cpu_iic_regs(int cpu);
+
+
+/* Init this module early */
+extern void cbe_regs_init(void);
+
+
+#endif /* CBE_REGS_H */
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c
index 978be1c30c1b..f4e2d8805c9e 100644
--- a/arch/powerpc/platforms/cell/interrupt.c
+++ b/arch/powerpc/platforms/cell/interrupt.c
@@ -33,29 +33,10 @@
#include <asm/ptrace.h>
#include "interrupt.h"
-
-struct iic_pending_bits {
- u32 data;
- u8 flags;
- u8 class;
- u8 source;
- u8 prio;
-};
-
-enum iic_pending_flags {
- IIC_VALID = 0x80,
- IIC_IPI = 0x40,
-};
-
-struct iic_regs {
- struct iic_pending_bits pending;
- struct iic_pending_bits pending_destr;
- u64 generate;
- u64 prio;
-};
+#include "cbe_regs.h"
struct iic {
- struct iic_regs __iomem *regs;
+ struct cbe_iic_thread_regs __iomem *regs;
u8 target_id;
};
@@ -115,7 +96,7 @@ static struct hw_interrupt_type iic_pic = {
.end = iic_end,
};
-static int iic_external_get_irq(struct iic_pending_bits pending)
+static int iic_external_get_irq(struct cbe_iic_pending_bits pending)
{
int irq;
unsigned char node, unit;
@@ -136,8 +117,7 @@ static int iic_external_get_irq(struct iic_pending_bits pending)
* One of these units can be connected
* to an external interrupt controller.
*/
- if (pending.prio > 0x3f ||
- pending.class != 2)
+ if (pending.class != 2)
break;
irq = IIC_EXT_OFFSET
+ spider_get_irq(node)
@@ -168,15 +148,15 @@ int iic_get_irq(struct pt_regs *regs)
{
struct iic *iic;
int irq;
- struct iic_pending_bits pending;
+ struct cbe_iic_pending_bits pending;
iic = &__get_cpu_var(iic);
*(unsigned long *) &pending =
in_be64((unsigned long __iomem *) &iic->regs->pending_destr);
irq = -1;
- if (pending.flags & IIC_VALID) {
- if (pending.flags & IIC_IPI) {
+ if (pending.flags & CBE_IIC_IRQ_VALID) {
+ if (pending.flags & CBE_IIC_IRQ_IPI) {
irq = IIC_IPI_OFFSET + (pending.prio >> 4);
/*
if (irq > 0x80)
@@ -226,7 +206,7 @@ static int setup_iic_hardcoded(void)
regs += 0x20;
printk(KERN_INFO "IIC for CPU %d at %lx\n", cpu, regs);
- iic->regs = ioremap(regs, sizeof(struct iic_regs));
+ iic->regs = ioremap(regs, sizeof(struct cbe_iic_thread_regs));
iic->target_id = (nodeid << 4) + ((cpu & 1) ? 0xf : 0xe);
}
@@ -267,12 +247,12 @@ static int setup_iic(void)
}
iic = &per_cpu(iic, np[0]);
- iic->regs = ioremap(regs[0], sizeof(struct iic_regs));
+ iic->regs = ioremap(regs[0], sizeof(struct cbe_iic_thread_regs));
iic->target_id = ((np[0] & 2) << 3) + ((np[0] & 1) ? 0xf : 0xe);
printk("IIC for CPU %d at %lx mapped to %p\n", np[0], regs[0], iic->regs);
iic = &per_cpu(iic, np[1]);
- iic->regs = ioremap(regs[2], sizeof(struct iic_regs));
+ iic->regs = ioremap(regs[2], sizeof(struct cbe_iic_thread_regs));
iic->target_id = ((np[1] & 2) << 3) + ((np[1] & 1) ? 0xf : 0xe);
printk("IIC for CPU %d at %lx mapped to %p\n", np[1], regs[2], iic->regs);
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
index a49ceb799a8e..a35004e14c69 100644
--- a/arch/powerpc/platforms/cell/iommu.c
+++ b/arch/powerpc/platforms/cell/iommu.c
@@ -473,6 +473,16 @@ static int cell_dma_supported(struct device *dev, u64 mask)
return mask < 0x100000000ull;
}
+static struct dma_mapping_ops cell_iommu_ops = {
+ .alloc_coherent = cell_alloc_coherent,
+ .free_coherent = cell_free_coherent,
+ .map_single = cell_map_single,
+ .unmap_single = cell_unmap_single,
+ .map_sg = cell_map_sg,
+ .unmap_sg = cell_unmap_sg,
+ .dma_supported = cell_dma_supported,
+};
+
void cell_init_iommu(void)
{
int setup_bus = 0;
@@ -498,11 +508,5 @@ void cell_init_iommu(void)
}
}
- pci_dma_ops.alloc_coherent = cell_alloc_coherent;
- pci_dma_ops.free_coherent = cell_free_coherent;
- pci_dma_ops.map_single = cell_map_single;
- pci_dma_ops.unmap_single = cell_unmap_single;
- pci_dma_ops.map_sg = cell_map_sg;
- pci_dma_ops.unmap_sg = cell_unmap_sg;
- pci_dma_ops.dma_supported = cell_dma_supported;
+ pci_dma_ops = cell_iommu_ops;
}
diff --git a/arch/powerpc/platforms/cell/pervasive.c b/arch/powerpc/platforms/cell/pervasive.c
index 7eed8c624517..695ac4e1617e 100644
--- a/arch/powerpc/platforms/cell/pervasive.c
+++ b/arch/powerpc/platforms/cell/pervasive.c
@@ -37,36 +37,28 @@
#include <asm/reg.h>
#include "pervasive.h"
+#include "cbe_regs.h"
static DEFINE_SPINLOCK(cbe_pervasive_lock);
-struct cbe_pervasive {
- struct pmd_regs __iomem *regs;
- unsigned int thread;
-};
-
-/* can't use per_cpu from setup_arch */
-static struct cbe_pervasive cbe_pervasive[NR_CPUS];
static void __init cbe_enable_pause_zero(void)
{
unsigned long thread_switch_control;
unsigned long temp_register;
- struct cbe_pervasive *p;
- int thread;
+ struct cbe_pmd_regs __iomem *pregs;
spin_lock_irq(&cbe_pervasive_lock);
- p = &cbe_pervasive[smp_processor_id()];
-
- if (!cbe_pervasive->regs)
+ pregs = cbe_get_cpu_pmd_regs(smp_processor_id());
+ if (pregs == NULL)
goto out;
pr_debug("Power Management: CPU %d\n", smp_processor_id());
/* Enable Pause(0) control bit */
- temp_register = in_be64(&p->regs->pm_control);
+ temp_register = in_be64(&pregs->pm_control);
- out_be64(&p->regs->pm_control,
- temp_register|PMD_PAUSE_ZERO_CONTROL);
+ out_be64(&pregs->pm_control,
+ temp_register | CBE_PMD_PAUSE_ZERO_CONTROL);
/* Enable DEC and EE interrupt request */
thread_switch_control = mfspr(SPRN_TSC_CELL);
@@ -75,25 +67,16 @@ static void __init cbe_enable_pause_zero(void)
switch ((mfspr(SPRN_CTRLF) & CTRL_CT)) {
case CTRL_CT0:
thread_switch_control |= TSC_CELL_DEC_ENABLE_0;
- thread = 0;
break;
case CTRL_CT1:
thread_switch_control |= TSC_CELL_DEC_ENABLE_1;
- thread = 1;
break;
default:
printk(KERN_WARNING "%s: unknown configuration\n",
__FUNCTION__);
- thread = -1;
break;
}
- if (p->thread != thread)
- printk(KERN_WARNING "%s: device tree inconsistant, "
- "cpu %i: %d/%d\n", __FUNCTION__,
- smp_processor_id(),
- p->thread, thread);
-
mtspr(SPRN_TSC_CELL, thread_switch_control);
out:
@@ -104,6 +87,11 @@ static void cbe_idle(void)
{
unsigned long ctrl;
+ /* Why do we do that on every idle ? Couldn't that be done once for
+ * all or do we lose the state some way ? Also, the pm_control
+ * register setting, that can't be set once at boot ? We really want
+ * to move that away in order to implement a simple powersave
+ */
cbe_enable_pause_zero();
while (1) {
@@ -152,8 +140,15 @@ static int cbe_system_reset_exception(struct pt_regs *regs)
timer_interrupt(regs);
break;
case SRR1_WAKEMT:
- /* no action required */
break;
+#ifdef CONFIG_CBE_RAS
+ case SRR1_WAKESYSERR:
+ cbe_system_error_exception(regs);
+ break;
+ case SRR1_WAKETHERM:
+ cbe_thermal_exception(regs);
+ break;
+#endif /* CONFIG_CBE_RAS */
default:
/* do system reset */
return 0;
@@ -162,68 +157,11 @@ static int cbe_system_reset_exception(struct pt_regs *regs)
return 1;
}
-static int __init cbe_find_pmd_mmio(int cpu, struct cbe_pervasive *p)
-{
- struct device_node *node;
- unsigned int *int_servers;
- char *addr;
- unsigned long real_address;
- unsigned int size;
-
- struct pmd_regs __iomem *pmd_mmio_area;
- int hardid, thread;
- int proplen;
-
- pmd_mmio_area = NULL;
- hardid = get_hard_smp_processor_id(cpu);
- for (node = NULL; (node = of_find_node_by_type(node, "cpu"));) {
- int_servers = (void *) get_property(node,
- "ibm,ppc-interrupt-server#s", &proplen);
- if (!int_servers) {
- printk(KERN_WARNING "%s misses "
- "ibm,ppc-interrupt-server#s property",
- node->full_name);
- continue;
- }
- for (thread = 0; thread < proplen / sizeof (int); thread++) {
- if (hardid == int_servers[thread]) {
- addr = get_property(node, "pervasive", NULL);
- goto found;
- }
- }
- }
-
- printk(KERN_WARNING "%s: CPU %d not found\n", __FUNCTION__, cpu);
- return -EINVAL;
-
-found:
- real_address = *(unsigned long*) addr;
- addr += sizeof (unsigned long);
- size = *(unsigned int*) addr;
-
- pr_debug("pervasive area for CPU %d at %lx, size %x\n",
- cpu, real_address, size);
- p->regs = ioremap(real_address, size);
- p->thread = thread;
- return 0;
-}
-
-void __init cell_pervasive_init(void)
+void __init cbe_pervasive_init(void)
{
- struct cbe_pervasive *p;
- int cpu;
- int ret;
-
if (!cpu_has_feature(CPU_FTR_PAUSE_ZERO))
return;
- for_each_possible_cpu(cpu) {
- p = &cbe_pervasive[cpu];
- ret = cbe_find_pmd_mmio(cpu, p);
- if (ret)
- return;
- }
-
ppc_md.idle_loop = cbe_idle;
ppc_md.system_reset_exception = cbe_system_reset_exception;
}
diff --git a/arch/powerpc/platforms/cell/pervasive.h b/arch/powerpc/platforms/cell/pervasive.h
index da1fb85ca3e8..7b50947f8044 100644
--- a/arch/powerpc/platforms/cell/pervasive.h
+++ b/arch/powerpc/platforms/cell/pervasive.h
@@ -25,38 +25,9 @@
#ifndef PERVASIVE_H
#define PERVASIVE_H
-struct pmd_regs {
- u8 pad_0x0000_0x0800[0x0800 - 0x0000]; /* 0x0000 */
-
- /* Thermal Sensor Registers */
- u64 ts_ctsr1; /* 0x0800 */
- u64 ts_ctsr2; /* 0x0808 */
- u64 ts_mtsr1; /* 0x0810 */
- u64 ts_mtsr2; /* 0x0818 */
- u64 ts_itr1; /* 0x0820 */
- u64 ts_itr2; /* 0x0828 */
- u64 ts_gitr; /* 0x0830 */
- u64 ts_isr; /* 0x0838 */
- u64 ts_imr; /* 0x0840 */
- u64 tm_cr1; /* 0x0848 */
- u64 tm_cr2; /* 0x0850 */
- u64 tm_simr; /* 0x0858 */
- u64 tm_tpr; /* 0x0860 */
- u64 tm_str1; /* 0x0868 */
- u64 tm_str2; /* 0x0870 */
- u64 tm_tsr; /* 0x0878 */
-
- /* Power Management */
- u64 pm_control; /* 0x0880 */
-#define PMD_PAUSE_ZERO_CONTROL 0x10000
- u64 pm_status; /* 0x0888 */
-
- /* Time Base Register */
- u64 tbr; /* 0x0890 */
-
- u8 pad_0x0898_0x1000 [0x1000 - 0x0898]; /* 0x0898 */
-};
-
-void __init cell_pervasive_init(void);
+extern void cbe_pervasive_init(void);
+extern void cbe_system_error_exception(struct pt_regs *regs);
+extern void cbe_maintenance_exception(struct pt_regs *regs);
+extern void cbe_thermal_exception(struct pt_regs *regs);
#endif
diff --git a/arch/powerpc/platforms/cell/ras.c b/arch/powerpc/platforms/cell/ras.c
new file mode 100644
index 000000000000..033ad6e2827b
--- /dev/null
+++ b/arch/powerpc/platforms/cell/ras.c
@@ -0,0 +1,112 @@
+#define DEBUG
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/smp.h>
+
+#include <asm/reg.h>
+#include <asm/io.h>
+#include <asm/prom.h>
+#include <asm/machdep.h>
+
+#include "ras.h"
+#include "cbe_regs.h"
+
+
+static void dump_fir(int cpu)
+{
+ struct cbe_pmd_regs __iomem *pregs = cbe_get_cpu_pmd_regs(cpu);
+ struct cbe_iic_regs __iomem *iregs = cbe_get_cpu_iic_regs(cpu);
+
+ if (pregs == NULL)
+ return;
+
+ /* Todo: do some nicer parsing of bits and based on them go down
+ * to other sub-units FIRs and not only IIC
+ */
+ printk(KERN_ERR "Global Checkstop FIR : 0x%016lx\n",
+ in_be64(&pregs->checkstop_fir));
+ printk(KERN_ERR "Global Recoverable FIR : 0x%016lx\n",
+ in_be64(&pregs->checkstop_fir));
+ printk(KERN_ERR "Global MachineCheck FIR : 0x%016lx\n",
+ in_be64(&pregs->spec_att_mchk_fir));
+
+ if (iregs == NULL)
+ return;
+ printk(KERN_ERR "IOC FIR : 0x%016lx\n",
+ in_be64(&iregs->ioc_fir));
+
+}
+
+void cbe_system_error_exception(struct pt_regs *regs)
+{
+ int cpu = smp_processor_id();
+
+ printk(KERN_ERR "System Error Interrupt on CPU %d !\n", cpu);
+ dump_fir(cpu);
+ dump_stack();
+}
+
+void cbe_maintenance_exception(struct pt_regs *regs)
+{
+ int cpu = smp_processor_id();
+
+ /*
+ * Nothing implemented for the maintenance interrupt at this point
+ */
+
+ printk(KERN_ERR "Unhandled Maintenance interrupt on CPU %d !\n", cpu);
+ dump_stack();
+}
+
+void cbe_thermal_exception(struct pt_regs *regs)
+{
+ int cpu = smp_processor_id();
+
+ /*
+ * Nothing implemented for the thermal interrupt at this point
+ */
+
+ printk(KERN_ERR "Unhandled Thermal interrupt on CPU %d !\n", cpu);
+ dump_stack();
+}
+
+static int cbe_machine_check_handler(struct pt_regs *regs)
+{
+ int cpu = smp_processor_id();
+
+ printk(KERN_ERR "Machine Check Interrupt on CPU %d !\n", cpu);
+ dump_fir(cpu);
+
+ /* No recovery from this code now, lets continue */
+ return 0;
+}
+
+void __init cbe_ras_init(void)
+{
+ unsigned long hid0;
+
+ /*
+ * Enable System Error & thermal interrupts and wakeup conditions
+ */
+
+ hid0 = mfspr(SPRN_HID0);
+ hid0 |= HID0_CBE_THERM_INT_EN | HID0_CBE_THERM_WAKEUP |
+ HID0_CBE_SYSERR_INT_EN | HID0_CBE_SYSERR_WAKEUP;
+ mtspr(SPRN_HID0, hid0);
+ mb();
+
+ /*
+ * Install machine check handler. Leave setting of precise mode to
+ * what the firmware did for now
+ */
+ ppc_md.machine_check_exception = cbe_machine_check_handler;
+ mb();
+
+ /*
+ * For now, we assume that IOC_FIR is already set to forward some
+ * error conditions to the System Error handler. If that is not true
+ * then it will have to be fixed up here.
+ */
+}
diff --git a/arch/powerpc/platforms/cell/ras.h b/arch/powerpc/platforms/cell/ras.h
new file mode 100644
index 000000000000..eb7ee54c82a0
--- /dev/null
+++ b/arch/powerpc/platforms/cell/ras.h
@@ -0,0 +1,9 @@
+#ifndef RAS_H
+#define RAS_H
+
+extern void cbe_system_error_exception(struct pt_regs *regs);
+extern void cbe_maintenance_exception(struct pt_regs *regs);
+extern void cbe_thermal_exception(struct pt_regs *regs);
+extern void cbe_ras_init(void);
+
+#endif /* RAS_H */
diff --git a/arch/powerpc/platforms/cell/setup.c b/arch/powerpc/platforms/cell/setup.c
index fd3e5609e3e0..3d1831d331e5 100644
--- a/arch/powerpc/platforms/cell/setup.c
+++ b/arch/powerpc/platforms/cell/setup.c
@@ -49,10 +49,13 @@
#include <asm/ppc-pci.h>
#include <asm/irq.h>
#include <asm/spu.h>
+#include <asm/spu_priv1.h>
#include "interrupt.h"
#include "iommu.h"
+#include "cbe_regs.h"
#include "pervasive.h"
+#include "ras.h"
#ifdef DEBUG
#define DBG(fmt...) udbg_printf(fmt)
@@ -81,6 +84,15 @@ static void __init cell_setup_arch(void)
{
ppc_md.init_IRQ = iic_init_IRQ;
ppc_md.get_irq = iic_get_irq;
+#ifdef CONFIG_SPU_BASE
+ spu_priv1_ops = &spu_priv1_mmio_ops;
+#endif
+
+ cbe_regs_init();
+
+#ifdef CONFIG_CBE_RAS
+ cbe_ras_init();
+#endif
#ifdef CONFIG_SMP
smp_init_cell();
@@ -98,7 +110,7 @@ static void __init cell_setup_arch(void)
init_pci_config_tokens();
find_and_init_phbs();
spider_init_IRQ();
- cell_pervasive_init();
+ cbe_pervasive_init();
#ifdef CONFIG_DUMMY_CONSOLE
conswitchp = &dummy_con;
#endif
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index ad141fe8d52d..db82f503ba2c 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -34,10 +34,15 @@
#include <asm/prom.h>
#include <linux/mutex.h>
#include <asm/spu.h>
+#include <asm/spu_priv1.h>
#include <asm/mmu_context.h>
#include "interrupt.h"
+const struct spu_priv1_ops *spu_priv1_ops;
+
+EXPORT_SYMBOL_GPL(spu_priv1_ops);
+
static int __spu_trap_invalid_dma(struct spu *spu)
{
pr_debug("%s\n", __FUNCTION__);
@@ -71,7 +76,7 @@ static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
struct mm_struct *mm = spu->mm;
- u64 esid, vsid;
+ u64 esid, vsid, llp;
pr_debug("%s\n", __FUNCTION__);
@@ -91,9 +96,14 @@ static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
}
esid = (ea & ESID_MASK) | SLB_ESID_V;
- vsid = (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT) | SLB_VSID_USER;
+#ifdef CONFIG_HUGETLB_PAGE
if (in_hugepage_area(mm->context, ea))
- vsid |= SLB_VSID_L;
+ llp = mmu_psize_defs[mmu_huge_psize].sllp;
+ else
+#endif
+ llp = mmu_psize_defs[mmu_virtual_psize].sllp;
+ vsid = (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT) |
+ SLB_VSID_USER | llp;
out_be64(&priv2->slb_index_W, spu->slb_replace);
out_be64(&priv2->slb_vsid_RW, vsid);
@@ -130,57 +140,7 @@ static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr)
spu->dar = ea;
spu->dsisr = dsisr;
mb();
- if (spu->stop_callback)
- spu->stop_callback(spu);
- return 0;
-}
-
-static int __spu_trap_mailbox(struct spu *spu)
-{
- if (spu->ibox_callback)
- spu->ibox_callback(spu);
-
- /* atomically disable SPU mailbox interrupts */
- spin_lock(&spu->register_lock);
- spu_int_mask_and(spu, 2, ~0x1);
- spin_unlock(&spu->register_lock);
- return 0;
-}
-
-static int __spu_trap_stop(struct spu *spu)
-{
- pr_debug("%s\n", __FUNCTION__);
- spu->stop_code = in_be32(&spu->problem->spu_status_R);
- if (spu->stop_callback)
- spu->stop_callback(spu);
- return 0;
-}
-
-static int __spu_trap_halt(struct spu *spu)
-{
- pr_debug("%s\n", __FUNCTION__);
- spu->stop_code = in_be32(&spu->problem->spu_status_R);
- if (spu->stop_callback)
- spu->stop_callback(spu);
- return 0;
-}
-
-static int __spu_trap_tag_group(struct spu *spu)
-{
- pr_debug("%s\n", __FUNCTION__);
- spu->mfc_callback(spu);
- return 0;
-}
-
-static int __spu_trap_spubox(struct spu *spu)
-{
- if (spu->wbox_callback)
- spu->wbox_callback(spu);
-
- /* atomically disable SPU mailbox interrupts */
- spin_lock(&spu->register_lock);
- spu_int_mask_and(spu, 2, ~0x10);
- spin_unlock(&spu->register_lock);
+ spu->stop_callback(spu);
return 0;
}
@@ -191,8 +151,7 @@ spu_irq_class_0(int irq, void *data, struct pt_regs *regs)
spu = data;
spu->class_0_pending = 1;
- if (spu->stop_callback)
- spu->stop_callback(spu);
+ spu->stop_callback(spu);
return IRQ_HANDLED;
}
@@ -270,29 +229,38 @@ spu_irq_class_2(int irq, void *data, struct pt_regs *regs)
unsigned long mask;
spu = data;
+ spin_lock(&spu->register_lock);
stat = spu_int_stat_get(spu, 2);
mask = spu_int_mask_get(spu, 2);
+ /* ignore interrupts we're not waiting for */
+ stat &= mask;
+ /*
+ * mailbox interrupts (0x1 and 0x10) are level triggered.
+ * mask them now before acknowledging.
+ */
+ if (stat & 0x11)
+ spu_int_mask_and(spu, 2, ~(stat & 0x11));
+ /* acknowledge all interrupts before the callbacks */
+ spu_int_stat_clear(spu, 2, stat);
+ spin_unlock(&spu->register_lock);
pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask);
- stat &= mask;
-
if (stat & 1) /* PPC core mailbox */
- __spu_trap_mailbox(spu);
+ spu->ibox_callback(spu);
if (stat & 2) /* SPU stop-and-signal */
- __spu_trap_stop(spu);
+ spu->stop_callback(spu);
if (stat & 4) /* SPU halted */
- __spu_trap_halt(spu);
+ spu->stop_callback(spu);
if (stat & 8) /* DMA tag group complete */
- __spu_trap_tag_group(spu);
+ spu->mfc_callback(spu);
if (stat & 0x10) /* SPU mailbox threshold */
- __spu_trap_spubox(spu);
+ spu->wbox_callback(spu);
- spu_int_stat_clear(spu, 2, stat);
return stat ? IRQ_HANDLED : IRQ_NONE;
}
@@ -512,14 +480,6 @@ int spu_irq_class_1_bottom(struct spu *spu)
return ret;
}
-void spu_irq_setaffinity(struct spu *spu, int cpu)
-{
- u64 target = iic_get_target_id(cpu);
- u64 route = target << 48 | target << 32 | target << 16;
- spu_int_route_set(spu, route);
-}
-EXPORT_SYMBOL_GPL(spu_irq_setaffinity);
-
static int __init find_spu_node_id(struct device_node *spe)
{
unsigned int *id;
@@ -649,6 +609,46 @@ out:
return ret;
}
+struct sysdev_class spu_sysdev_class = {
+ set_kset_name("spu")
+};
+
+static ssize_t spu_show_isrc(struct sys_device *sysdev, char *buf)
+{
+ struct spu *spu = container_of(sysdev, struct spu, sysdev);
+ return sprintf(buf, "%d\n", spu->isrc);
+
+}
+static SYSDEV_ATTR(isrc, 0400, spu_show_isrc, NULL);
+
+extern int attach_sysdev_to_node(struct sys_device *dev, int nid);
+
+static int spu_create_sysdev(struct spu *spu)
+{
+ int ret;
+
+ spu->sysdev.id = spu->number;
+ spu->sysdev.cls = &spu_sysdev_class;
+ ret = sysdev_register(&spu->sysdev);
+ if (ret) {
+ printk(KERN_ERR "Can't register SPU %d with sysfs\n",
+ spu->number);
+ return ret;
+ }
+
+ sysdev_create_file(&spu->sysdev, &attr_isrc);
+ sysfs_add_device_to_node(&spu->sysdev, spu->nid);
+
+ return 0;
+}
+
+static void spu_destroy_sysdev(struct spu *spu)
+{
+ sysdev_remove_file(&spu->sysdev, &attr_isrc);
+ sysfs_remove_device_from_node(&spu->sysdev, spu->nid);
+ sysdev_unregister(&spu->sysdev);
+}
+
static int __init create_spu(struct device_node *spe)
{
struct spu *spu;
@@ -656,7 +656,7 @@ static int __init create_spu(struct device_node *spe)
static int number;
ret = -ENOMEM;
- spu = kmalloc(sizeof (*spu), GFP_KERNEL);
+ spu = kzalloc(sizeof (*spu), GFP_KERNEL);
if (!spu)
goto out;
@@ -668,33 +668,20 @@ static int __init create_spu(struct device_node *spe)
spu->nid = of_node_to_nid(spe);
if (spu->nid == -1)
spu->nid = 0;
-
- spu->stop_code = 0;
- spu->slb_replace = 0;
- spu->mm = NULL;
- spu->ctx = NULL;
- spu->rq = NULL;
- spu->pid = 0;
- spu->class_0_pending = 0;
- spu->flags = 0UL;
- spu->dar = 0UL;
- spu->dsisr = 0UL;
spin_lock_init(&spu->register_lock);
-
spu_mfc_sdr_set(spu, mfspr(SPRN_SDR1));
spu_mfc_sr1_set(spu, 0x33);
-
- spu->ibox_callback = NULL;
- spu->wbox_callback = NULL;
- spu->stop_callback = NULL;
- spu->mfc_callback = NULL;
-
mutex_lock(&spu_mutex);
+
spu->number = number++;
ret = spu_request_irqs(spu);
if (ret)
goto out_unmap;
+ ret = spu_create_sysdev(spu);
+ if (ret)
+ goto out_free_irqs;
+
list_add(&spu->list, &spu_list);
mutex_unlock(&spu_mutex);
@@ -703,6 +690,9 @@ static int __init create_spu(struct device_node *spe)
spu->problem, spu->priv1, spu->priv2, spu->number);
goto out;
+out_free_irqs:
+ spu_free_irqs(spu);
+
out_unmap:
mutex_unlock(&spu_mutex);
spu_unmap(spu);
@@ -716,6 +706,7 @@ static void destroy_spu(struct spu *spu)
{
list_del_init(&spu->list);
+ spu_destroy_sysdev(spu);
spu_free_irqs(spu);
spu_unmap(spu);
kfree(spu);
@@ -728,6 +719,7 @@ static void cleanup_spu_base(void)
list_for_each_entry_safe(spu, tmp, &spu_list, list)
destroy_spu(spu);
mutex_unlock(&spu_mutex);
+ sysdev_class_unregister(&spu_sysdev_class);
}
module_exit(cleanup_spu_base);
@@ -736,6 +728,11 @@ static int __init init_spu_base(void)
struct device_node *node;
int ret;
+ /* create sysdev class for spus */
+ ret = sysdev_class_register(&spu_sysdev_class);
+ if (ret)
+ return ret;
+
ret = -ENODEV;
for (node = of_find_node_by_type(NULL, "spe");
node; node = of_find_node_by_type(node, "spe")) {
diff --git a/arch/powerpc/platforms/cell/spu_callbacks.c b/arch/powerpc/platforms/cell/spu_callbacks.c
index b47fcc5ddb78..47ec3be3edcd 100644
--- a/arch/powerpc/platforms/cell/spu_callbacks.c
+++ b/arch/powerpc/platforms/cell/spu_callbacks.c
@@ -34,307 +34,19 @@
*/
void *spu_syscall_table[] = {
- [__NR_restart_syscall] sys_ni_syscall, /* sys_restart_syscall */
- [__NR_exit] sys_ni_syscall, /* sys_exit */
- [__NR_fork] sys_ni_syscall, /* ppc_fork */
- [__NR_read] sys_read,
- [__NR_write] sys_write,
- [__NR_open] sys_open,
- [__NR_close] sys_close,
- [__NR_waitpid] sys_waitpid,
- [__NR_creat] sys_creat,
- [__NR_link] sys_link,
- [__NR_unlink] sys_unlink,
- [__NR_execve] sys_ni_syscall, /* sys_execve */
- [__NR_chdir] sys_chdir,
- [__NR_time] sys_time,
- [__NR_mknod] sys_mknod,
- [__NR_chmod] sys_chmod,
- [__NR_lchown] sys_lchown,
- [__NR_break] sys_ni_syscall,
- [__NR_oldstat] sys_ni_syscall,
- [__NR_lseek] sys_lseek,
- [__NR_getpid] sys_getpid,
- [__NR_mount] sys_ni_syscall, /* sys_mount */
- [__NR_umount] sys_ni_syscall,
- [__NR_setuid] sys_setuid,
- [__NR_getuid] sys_getuid,
- [__NR_stime] sys_stime,
- [__NR_ptrace] sys_ni_syscall, /* sys_ptrace */
- [__NR_alarm] sys_alarm,
- [__NR_oldfstat] sys_ni_syscall,
- [__NR_pause] sys_ni_syscall, /* sys_pause */
- [__NR_utime] sys_ni_syscall, /* sys_utime */
- [__NR_stty] sys_ni_syscall,
- [__NR_gtty] sys_ni_syscall,
- [__NR_access] sys_access,
- [__NR_nice] sys_nice,
- [__NR_ftime] sys_ni_syscall,
- [__NR_sync] sys_sync,
- [__NR_kill] sys_kill,
- [__NR_rename] sys_rename,
- [__NR_mkdir] sys_mkdir,
- [__NR_rmdir] sys_rmdir,
- [__NR_dup] sys_dup,
- [__NR_pipe] sys_pipe,
- [__NR_times] sys_times,
- [__NR_prof] sys_ni_syscall,
- [__NR_brk] sys_brk,
- [__NR_setgid] sys_setgid,
- [__NR_getgid] sys_getgid,
- [__NR_signal] sys_ni_syscall, /* sys_signal */
- [__NR_geteuid] sys_geteuid,
- [__NR_getegid] sys_getegid,
- [__NR_acct] sys_ni_syscall, /* sys_acct */
- [__NR_umount2] sys_ni_syscall, /* sys_umount */
- [__NR_lock] sys_ni_syscall,
- [__NR_ioctl] sys_ioctl,
- [__NR_fcntl] sys_fcntl,
- [__NR_mpx] sys_ni_syscall,
- [__NR_setpgid] sys_setpgid,
- [__NR_ulimit] sys_ni_syscall,
- [__NR_oldolduname] sys_ni_syscall,
- [__NR_umask] sys_umask,
- [__NR_chroot] sys_chroot,
- [__NR_ustat] sys_ni_syscall, /* sys_ustat */
- [__NR_dup2] sys_dup2,
- [__NR_getppid] sys_getppid,
- [__NR_getpgrp] sys_getpgrp,
- [__NR_setsid] sys_setsid,
- [__NR_sigaction] sys_ni_syscall,
- [__NR_sgetmask] sys_sgetmask,
- [__NR_ssetmask] sys_ssetmask,
- [__NR_setreuid] sys_setreuid,
- [__NR_setregid] sys_setregid,
- [__NR_sigsuspend] sys_ni_syscall,
- [__NR_sigpending] sys_ni_syscall,
- [__NR_sethostname] sys_sethostname,
- [__NR_setrlimit] sys_setrlimit,
- [__NR_getrlimit] sys_ni_syscall,
- [__NR_getrusage] sys_getrusage,
- [__NR_gettimeofday] sys_gettimeofday,
- [__NR_settimeofday] sys_settimeofday,
- [__NR_getgroups] sys_getgroups,
- [__NR_setgroups] sys_setgroups,
- [__NR_select] sys_ni_syscall,
- [__NR_symlink] sys_symlink,
- [__NR_oldlstat] sys_ni_syscall,
- [__NR_readlink] sys_readlink,
- [__NR_uselib] sys_ni_syscall, /* sys_uselib */
- [__NR_swapon] sys_ni_syscall, /* sys_swapon */
- [__NR_reboot] sys_ni_syscall, /* sys_reboot */
- [__NR_readdir] sys_ni_syscall,
- [__NR_mmap] sys_mmap,
- [__NR_munmap] sys_munmap,
- [__NR_truncate] sys_truncate,
- [__NR_ftruncate] sys_ftruncate,
- [__NR_fchmod] sys_fchmod,
- [__NR_fchown] sys_fchown,
- [__NR_getpriority] sys_getpriority,
- [__NR_setpriority] sys_setpriority,
- [__NR_profil] sys_ni_syscall,
- [__NR_statfs] sys_ni_syscall, /* sys_statfs */
- [__NR_fstatfs] sys_ni_syscall, /* sys_fstatfs */
- [__NR_ioperm] sys_ni_syscall,
- [__NR_socketcall] sys_socketcall,
- [__NR_syslog] sys_syslog,
- [__NR_setitimer] sys_setitimer,
- [__NR_getitimer] sys_getitimer,
- [__NR_stat] sys_newstat,
- [__NR_lstat] sys_newlstat,
- [__NR_fstat] sys_newfstat,
- [__NR_olduname] sys_ni_syscall,
- [__NR_iopl] sys_ni_syscall,
- [__NR_vhangup] sys_vhangup,
- [__NR_idle] sys_ni_syscall,
- [__NR_vm86] sys_ni_syscall,
- [__NR_wait4] sys_wait4,
- [__NR_swapoff] sys_ni_syscall, /* sys_swapoff */
- [__NR_sysinfo] sys_sysinfo,
- [__NR_ipc] sys_ni_syscall, /* sys_ipc */
- [__NR_fsync] sys_fsync,
- [__NR_sigreturn] sys_ni_syscall,
- [__NR_clone] sys_ni_syscall, /* ppc_clone */
- [__NR_setdomainname] sys_setdomainname,
- [__NR_uname] ppc_newuname,
- [__NR_modify_ldt] sys_ni_syscall,
- [__NR_adjtimex] sys_adjtimex,
- [__NR_mprotect] sys_mprotect,
- [__NR_sigprocmask] sys_ni_syscall,
- [__NR_create_module] sys_ni_syscall,
- [__NR_init_module] sys_ni_syscall, /* sys_init_module */
- [__NR_delete_module] sys_ni_syscall, /* sys_delete_module */
- [__NR_get_kernel_syms] sys_ni_syscall,
- [__NR_quotactl] sys_ni_syscall, /* sys_quotactl */
- [__NR_getpgid] sys_getpgid,
- [__NR_fchdir] sys_fchdir,
- [__NR_bdflush] sys_bdflush,
- [__NR_sysfs] sys_ni_syscall, /* sys_sysfs */
- [__NR_personality] ppc64_personality,
- [__NR_afs_syscall] sys_ni_syscall,
- [__NR_setfsuid] sys_setfsuid,
- [__NR_setfsgid] sys_setfsgid,
- [__NR__llseek] sys_llseek,
- [__NR_getdents] sys_getdents,
- [__NR__newselect] sys_select,
- [__NR_flock] sys_flock,
- [__NR_msync] sys_msync,
- [__NR_readv] sys_readv,
- [__NR_writev] sys_writev,
- [__NR_getsid] sys_getsid,
- [__NR_fdatasync] sys_fdatasync,
- [__NR__sysctl] sys_ni_syscall, /* sys_sysctl */
- [__NR_mlock] sys_mlock,
- [__NR_munlock] sys_munlock,
- [__NR_mlockall] sys_mlockall,
- [__NR_munlockall] sys_munlockall,
- [__NR_sched_setparam] sys_sched_setparam,
- [__NR_sched_getparam] sys_sched_getparam,
- [__NR_sched_setscheduler] sys_sched_setscheduler,
- [__NR_sched_getscheduler] sys_sched_getscheduler,
- [__NR_sched_yield] sys_sched_yield,
- [__NR_sched_get_priority_max] sys_sched_get_priority_max,
- [__NR_sched_get_priority_min] sys_sched_get_priority_min,
- [__NR_sched_rr_get_interval] sys_sched_rr_get_interval,
- [__NR_nanosleep] sys_nanosleep,
- [__NR_mremap] sys_mremap,
- [__NR_setresuid] sys_setresuid,
- [__NR_getresuid] sys_getresuid,
- [__NR_query_module] sys_ni_syscall,
- [__NR_poll] sys_poll,
- [__NR_nfsservctl] sys_ni_syscall, /* sys_nfsservctl */
- [__NR_setresgid] sys_setresgid,
- [__NR_getresgid] sys_getresgid,
- [__NR_prctl] sys_prctl,
- [__NR_rt_sigreturn] sys_ni_syscall, /* ppc64_rt_sigreturn */
- [__NR_rt_sigaction] sys_ni_syscall, /* sys_rt_sigaction */
- [__NR_rt_sigprocmask] sys_ni_syscall, /* sys_rt_sigprocmask */
- [__NR_rt_sigpending] sys_ni_syscall, /* sys_rt_sigpending */
- [__NR_rt_sigtimedwait] sys_ni_syscall, /* sys_rt_sigtimedwait */
- [__NR_rt_sigqueueinfo] sys_ni_syscall, /* sys_rt_sigqueueinfo */
- [__NR_rt_sigsuspend] sys_ni_syscall, /* sys_rt_sigsuspend */
- [__NR_pread64] sys_pread64,
- [__NR_pwrite64] sys_pwrite64,
- [__NR_chown] sys_chown,
- [__NR_getcwd] sys_getcwd,
- [__NR_capget] sys_capget,
- [__NR_capset] sys_capset,
- [__NR_sigaltstack] sys_ni_syscall, /* sys_sigaltstack */
- [__NR_sendfile] sys_sendfile64,
- [__NR_getpmsg] sys_ni_syscall,
- [__NR_putpmsg] sys_ni_syscall,
- [__NR_vfork] sys_ni_syscall, /* ppc_vfork */
- [__NR_ugetrlimit] sys_getrlimit,
- [__NR_readahead] sys_readahead,
- [192] sys_ni_syscall,
- [193] sys_ni_syscall,
- [194] sys_ni_syscall,
- [195] sys_ni_syscall,
- [196] sys_ni_syscall,
- [197] sys_ni_syscall,
- [__NR_pciconfig_read] sys_ni_syscall, /* sys_pciconfig_read */
- [__NR_pciconfig_write] sys_ni_syscall, /* sys_pciconfig_write */
- [__NR_pciconfig_iobase] sys_ni_syscall, /* sys_pciconfig_iobase */
- [__NR_multiplexer] sys_ni_syscall,
- [__NR_getdents64] sys_getdents64,
- [__NR_pivot_root] sys_pivot_root,
- [204] sys_ni_syscall,
- [__NR_madvise] sys_madvise,
- [__NR_mincore] sys_mincore,
- [__NR_gettid] sys_gettid,
- [__NR_tkill] sys_tkill,
- [__NR_setxattr] sys_setxattr,
- [__NR_lsetxattr] sys_lsetxattr,
- [__NR_fsetxattr] sys_fsetxattr,
- [__NR_getxattr] sys_getxattr,
- [__NR_lgetxattr] sys_lgetxattr,
- [__NR_fgetxattr] sys_fgetxattr,
- [__NR_listxattr] sys_listxattr,
- [__NR_llistxattr] sys_llistxattr,
- [__NR_flistxattr] sys_flistxattr,
- [__NR_removexattr] sys_removexattr,
- [__NR_lremovexattr] sys_lremovexattr,
- [__NR_fremovexattr] sys_fremovexattr,
- [__NR_futex] sys_futex,
- [__NR_sched_setaffinity] sys_sched_setaffinity,
- [__NR_sched_getaffinity] sys_sched_getaffinity,
- [224] sys_ni_syscall,
- [__NR_tuxcall] sys_ni_syscall,
- [226] sys_ni_syscall,
- [__NR_io_setup] sys_io_setup,
- [__NR_io_destroy] sys_io_destroy,
- [__NR_io_getevents] sys_io_getevents,
- [__NR_io_submit] sys_io_submit,
- [__NR_io_cancel] sys_io_cancel,
- [__NR_set_tid_address] sys_ni_syscall, /* sys_set_tid_address */
- [__NR_fadvise64] sys_fadvise64,
- [__NR_exit_group] sys_ni_syscall, /* sys_exit_group */
- [__NR_lookup_dcookie] sys_ni_syscall, /* sys_lookup_dcookie */
- [__NR_epoll_create] sys_epoll_create,
- [__NR_epoll_ctl] sys_epoll_ctl,
- [__NR_epoll_wait] sys_epoll_wait,
- [__NR_remap_file_pages] sys_remap_file_pages,
- [__NR_timer_create] sys_timer_create,
- [__NR_timer_settime] sys_timer_settime,
- [__NR_timer_gettime] sys_timer_gettime,
- [__NR_timer_getoverrun] sys_timer_getoverrun,
- [__NR_timer_delete] sys_timer_delete,
- [__NR_clock_settime] sys_clock_settime,
- [__NR_clock_gettime] sys_clock_gettime,
- [__NR_clock_getres] sys_clock_getres,
- [__NR_clock_nanosleep] sys_clock_nanosleep,
- [__NR_swapcontext] sys_ni_syscall, /* ppc64_swapcontext */
- [__NR_tgkill] sys_tgkill,
- [__NR_utimes] sys_utimes,
- [__NR_statfs64] sys_statfs64,
- [__NR_fstatfs64] sys_fstatfs64,
- [254] sys_ni_syscall,
- [__NR_rtas] ppc_rtas,
- [256] sys_ni_syscall,
- [257] sys_ni_syscall,
- [258] sys_ni_syscall,
- [__NR_mbind] sys_ni_syscall, /* sys_mbind */
- [__NR_get_mempolicy] sys_ni_syscall, /* sys_get_mempolicy */
- [__NR_set_mempolicy] sys_ni_syscall, /* sys_set_mempolicy */
- [__NR_mq_open] sys_ni_syscall, /* sys_mq_open */
- [__NR_mq_unlink] sys_ni_syscall, /* sys_mq_unlink */
- [__NR_mq_timedsend] sys_ni_syscall, /* sys_mq_timedsend */
- [__NR_mq_timedreceive] sys_ni_syscall, /* sys_mq_timedreceive */
- [__NR_mq_notify] sys_ni_syscall, /* sys_mq_notify */
- [__NR_mq_getsetattr] sys_ni_syscall, /* sys_mq_getsetattr */
- [__NR_kexec_load] sys_ni_syscall, /* sys_kexec_load */
- [__NR_add_key] sys_ni_syscall, /* sys_add_key */
- [__NR_request_key] sys_ni_syscall, /* sys_request_key */
- [__NR_keyctl] sys_ni_syscall, /* sys_keyctl */
- [__NR_waitid] sys_ni_syscall, /* sys_waitid */
- [__NR_ioprio_set] sys_ni_syscall, /* sys_ioprio_set */
- [__NR_ioprio_get] sys_ni_syscall, /* sys_ioprio_get */
- [__NR_inotify_init] sys_ni_syscall, /* sys_inotify_init */
- [__NR_inotify_add_watch] sys_ni_syscall, /* sys_inotify_add_watch */
- [__NR_inotify_rm_watch] sys_ni_syscall, /* sys_inotify_rm_watch */
- [__NR_spu_run] sys_ni_syscall, /* sys_spu_run */
- [__NR_spu_create] sys_ni_syscall, /* sys_spu_create */
- [__NR_pselect6] sys_ni_syscall, /* sys_pselect */
- [__NR_ppoll] sys_ni_syscall, /* sys_ppoll */
- [__NR_unshare] sys_unshare,
- [__NR_splice] sys_splice,
- [__NR_tee] sys_tee,
- [__NR_vmsplice] sys_vmsplice,
- [__NR_openat] sys_openat,
- [__NR_mkdirat] sys_mkdirat,
- [__NR_mknodat] sys_mknodat,
- [__NR_fchownat] sys_fchownat,
- [__NR_futimesat] sys_futimesat,
- [__NR_newfstatat] sys_newfstatat,
- [__NR_unlinkat] sys_unlinkat,
- [__NR_renameat] sys_renameat,
- [__NR_linkat] sys_linkat,
- [__NR_symlinkat] sys_symlinkat,
- [__NR_readlinkat] sys_readlinkat,
- [__NR_fchmodat] sys_fchmodat,
- [__NR_faccessat] sys_faccessat,
- [__NR_get_robust_list] sys_get_robust_list,
- [__NR_set_robust_list] sys_set_robust_list,
+#define SYSCALL(func) sys_ni_syscall,
+#define COMPAT_SYS(func) sys_ni_syscall,
+#define PPC_SYS(func) sys_ni_syscall,
+#define OLDSYS(func) sys_ni_syscall,
+#define SYS32ONLY(func) sys_ni_syscall,
+#define SYSX(f, f3264, f32) sys_ni_syscall,
+
+#define SYSCALL_SPU(func) sys_##func,
+#define COMPAT_SYS_SPU(func) sys_##func,
+#define PPC_SYS_SPU(func) ppc_##func,
+#define SYSX_SPU(f, f3264, f32) f,
+
+#include <asm/systbl.h>
};
long spu_sys_callback(struct spu_syscall_block *s)
diff --git a/arch/powerpc/platforms/cell/spu_priv1.c b/arch/powerpc/platforms/cell/spu_priv1.c
deleted file mode 100644
index b2656421c7b5..000000000000
--- a/arch/powerpc/platforms/cell/spu_priv1.c
+++ /dev/null
@@ -1,133 +0,0 @@
-/*
- * access to SPU privileged registers
- */
-#include <linux/module.h>
-
-#include <asm/io.h>
-#include <asm/spu.h>
-
-void spu_int_mask_and(struct spu *spu, int class, u64 mask)
-{
- u64 old_mask;
-
- old_mask = in_be64(&spu->priv1->int_mask_RW[class]);
- out_be64(&spu->priv1->int_mask_RW[class], old_mask & mask);
-}
-EXPORT_SYMBOL_GPL(spu_int_mask_and);
-
-void spu_int_mask_or(struct spu *spu, int class, u64 mask)
-{
- u64 old_mask;
-
- old_mask = in_be64(&spu->priv1->int_mask_RW[class]);
- out_be64(&spu->priv1->int_mask_RW[class], old_mask | mask);
-}
-EXPORT_SYMBOL_GPL(spu_int_mask_or);
-
-void spu_int_mask_set(struct spu *spu, int class, u64 mask)
-{
- out_be64(&spu->priv1->int_mask_RW[class], mask);
-}
-EXPORT_SYMBOL_GPL(spu_int_mask_set);
-
-u64 spu_int_mask_get(struct spu *spu, int class)
-{
- return in_be64(&spu->priv1->int_mask_RW[class]);
-}
-EXPORT_SYMBOL_GPL(spu_int_mask_get);
-
-void spu_int_stat_clear(struct spu *spu, int class, u64 stat)
-{
- out_be64(&spu->priv1->int_stat_RW[class], stat);
-}
-EXPORT_SYMBOL_GPL(spu_int_stat_clear);
-
-u64 spu_int_stat_get(struct spu *spu, int class)
-{
- return in_be64(&spu->priv1->int_stat_RW[class]);
-}
-EXPORT_SYMBOL_GPL(spu_int_stat_get);
-
-void spu_int_route_set(struct spu *spu, u64 route)
-{
- out_be64(&spu->priv1->int_route_RW, route);
-}
-EXPORT_SYMBOL_GPL(spu_int_route_set);
-
-u64 spu_mfc_dar_get(struct spu *spu)
-{
- return in_be64(&spu->priv1->mfc_dar_RW);
-}
-EXPORT_SYMBOL_GPL(spu_mfc_dar_get);
-
-u64 spu_mfc_dsisr_get(struct spu *spu)
-{
- return in_be64(&spu->priv1->mfc_dsisr_RW);
-}
-EXPORT_SYMBOL_GPL(spu_mfc_dsisr_get);
-
-void spu_mfc_dsisr_set(struct spu *spu, u64 dsisr)
-{
- out_be64(&spu->priv1->mfc_dsisr_RW, dsisr);
-}
-EXPORT_SYMBOL_GPL(spu_mfc_dsisr_set);
-
-void spu_mfc_sdr_set(struct spu *spu, u64 sdr)
-{
- out_be64(&spu->priv1->mfc_sdr_RW, sdr);
-}
-EXPORT_SYMBOL_GPL(spu_mfc_sdr_set);
-
-void spu_mfc_sr1_set(struct spu *spu, u64 sr1)
-{
- out_be64(&spu->priv1->mfc_sr1_RW, sr1);
-}
-EXPORT_SYMBOL_GPL(spu_mfc_sr1_set);
-
-u64 spu_mfc_sr1_get(struct spu *spu)
-{
- return in_be64(&spu->priv1->mfc_sr1_RW);
-}
-EXPORT_SYMBOL_GPL(spu_mfc_sr1_get);
-
-void spu_mfc_tclass_id_set(struct spu *spu, u64 tclass_id)
-{
- out_be64(&spu->priv1->mfc_tclass_id_RW, tclass_id);
-}
-EXPORT_SYMBOL_GPL(spu_mfc_tclass_id_set);
-
-u64 spu_mfc_tclass_id_get(struct spu *spu)
-{
- return in_be64(&spu->priv1->mfc_tclass_id_RW);
-}
-EXPORT_SYMBOL_GPL(spu_mfc_tclass_id_get);
-
-void spu_tlb_invalidate(struct spu *spu)
-{
- out_be64(&spu->priv1->tlb_invalidate_entry_W, 0ul);
-}
-EXPORT_SYMBOL_GPL(spu_tlb_invalidate);
-
-void spu_resource_allocation_groupID_set(struct spu *spu, u64 id)
-{
- out_be64(&spu->priv1->resource_allocation_groupID_RW, id);
-}
-EXPORT_SYMBOL_GPL(spu_resource_allocation_groupID_set);
-
-u64 spu_resource_allocation_groupID_get(struct spu *spu)
-{
- return in_be64(&spu->priv1->resource_allocation_groupID_RW);
-}
-EXPORT_SYMBOL_GPL(spu_resource_allocation_groupID_get);
-
-void spu_resource_allocation_enable_set(struct spu *spu, u64 enable)
-{
- out_be64(&spu->priv1->resource_allocation_enable_RW, enable);
-}
-EXPORT_SYMBOL_GPL(spu_resource_allocation_enable_set);
-
-u64 spu_resource_allocation_enable_get(struct spu *spu)
-{
- return in_be64(&spu->priv1->resource_allocation_enable_RW);
-}
-EXPORT_SYMBOL_GPL(spu_resource_allocation_enable_get);
diff --git a/arch/powerpc/platforms/cell/spu_priv1_mmio.c b/arch/powerpc/platforms/cell/spu_priv1_mmio.c
new file mode 100644
index 000000000000..71b69f0a1a48
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spu_priv1_mmio.c
@@ -0,0 +1,159 @@
+/*
+ * spu hypervisor abstraction for direct hardware access.
+ *
+ * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
+ * Copyright 2006 Sony Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/module.h>
+
+#include <asm/io.h>
+#include <asm/spu.h>
+#include <asm/spu_priv1.h>
+
+#include "interrupt.h"
+
+static void int_mask_and(struct spu *spu, int class, u64 mask)
+{
+ u64 old_mask;
+
+ old_mask = in_be64(&spu->priv1->int_mask_RW[class]);
+ out_be64(&spu->priv1->int_mask_RW[class], old_mask & mask);
+}
+
+static void int_mask_or(struct spu *spu, int class, u64 mask)
+{
+ u64 old_mask;
+
+ old_mask = in_be64(&spu->priv1->int_mask_RW[class]);
+ out_be64(&spu->priv1->int_mask_RW[class], old_mask | mask);
+}
+
+static void int_mask_set(struct spu *spu, int class, u64 mask)
+{
+ out_be64(&spu->priv1->int_mask_RW[class], mask);
+}
+
+static u64 int_mask_get(struct spu *spu, int class)
+{
+ return in_be64(&spu->priv1->int_mask_RW[class]);
+}
+
+static void int_stat_clear(struct spu *spu, int class, u64 stat)
+{
+ out_be64(&spu->priv1->int_stat_RW[class], stat);
+}
+
+static u64 int_stat_get(struct spu *spu, int class)
+{
+ return in_be64(&spu->priv1->int_stat_RW[class]);
+}
+
+static void cpu_affinity_set(struct spu *spu, int cpu)
+{
+ u64 target = iic_get_target_id(cpu);
+ u64 route = target << 48 | target << 32 | target << 16;
+ out_be64(&spu->priv1->int_route_RW, route);
+}
+
+static u64 mfc_dar_get(struct spu *spu)
+{
+ return in_be64(&spu->priv1->mfc_dar_RW);
+}
+
+static u64 mfc_dsisr_get(struct spu *spu)
+{
+ return in_be64(&spu->priv1->mfc_dsisr_RW);
+}
+
+static void mfc_dsisr_set(struct spu *spu, u64 dsisr)
+{
+ out_be64(&spu->priv1->mfc_dsisr_RW, dsisr);
+}
+
+static void mfc_sdr_set(struct spu *spu, u64 sdr)
+{
+ out_be64(&spu->priv1->mfc_sdr_RW, sdr);
+}
+
+static void mfc_sr1_set(struct spu *spu, u64 sr1)
+{
+ out_be64(&spu->priv1->mfc_sr1_RW, sr1);
+}
+
+static u64 mfc_sr1_get(struct spu *spu)
+{
+ return in_be64(&spu->priv1->mfc_sr1_RW);
+}
+
+static void mfc_tclass_id_set(struct spu *spu, u64 tclass_id)
+{
+ out_be64(&spu->priv1->mfc_tclass_id_RW, tclass_id);
+}
+
+static u64 mfc_tclass_id_get(struct spu *spu)
+{
+ return in_be64(&spu->priv1->mfc_tclass_id_RW);
+}
+
+static void tlb_invalidate(struct spu *spu)
+{
+ out_be64(&spu->priv1->tlb_invalidate_entry_W, 0ul);
+}
+
+static void resource_allocation_groupID_set(struct spu *spu, u64 id)
+{
+ out_be64(&spu->priv1->resource_allocation_groupID_RW, id);
+}
+
+static u64 resource_allocation_groupID_get(struct spu *spu)
+{
+ return in_be64(&spu->priv1->resource_allocation_groupID_RW);
+}
+
+static void resource_allocation_enable_set(struct spu *spu, u64 enable)
+{
+ out_be64(&spu->priv1->resource_allocation_enable_RW, enable);
+}
+
+static u64 resource_allocation_enable_get(struct spu *spu)
+{
+ return in_be64(&spu->priv1->resource_allocation_enable_RW);
+}
+
+const struct spu_priv1_ops spu_priv1_mmio_ops =
+{
+ .int_mask_and = int_mask_and,
+ .int_mask_or = int_mask_or,
+ .int_mask_set = int_mask_set,
+ .int_mask_get = int_mask_get,
+ .int_stat_clear = int_stat_clear,
+ .int_stat_get = int_stat_get,
+ .cpu_affinity_set = cpu_affinity_set,
+ .mfc_dar_get = mfc_dar_get,
+ .mfc_dsisr_get = mfc_dsisr_get,
+ .mfc_dsisr_set = mfc_dsisr_set,
+ .mfc_sdr_set = mfc_sdr_set,
+ .mfc_sr1_set = mfc_sr1_set,
+ .mfc_sr1_get = mfc_sr1_get,
+ .mfc_tclass_id_set = mfc_tclass_id_set,
+ .mfc_tclass_id_get = mfc_tclass_id_get,
+ .tlb_invalidate = tlb_invalidate,
+ .resource_allocation_groupID_set = resource_allocation_groupID_set,
+ .resource_allocation_groupID_get = resource_allocation_groupID_get,
+ .resource_allocation_enable_set = resource_allocation_enable_set,
+ .resource_allocation_enable_get = resource_allocation_enable_get,
+};
diff --git a/arch/powerpc/platforms/cell/spufs/Makefile b/arch/powerpc/platforms/cell/spufs/Makefile
index a7cddf40e3d9..bb5dc634272c 100644
--- a/arch/powerpc/platforms/cell/spufs/Makefile
+++ b/arch/powerpc/platforms/cell/spufs/Makefile
@@ -1,5 +1,7 @@
+obj-y += switch.o
+
obj-$(CONFIG_SPU_FS) += spufs.o
-spufs-y += inode.o file.o context.o switch.o syscalls.o
+spufs-y += inode.o file.o context.o syscalls.o
spufs-y += sched.o backing_ops.o hw_ops.o run.o
# Rules to build switch.o with the help of SPU tool chain
@@ -8,11 +10,14 @@ SPU_CC := $(SPU_CROSS)gcc
SPU_AS := $(SPU_CROSS)gcc
SPU_LD := $(SPU_CROSS)ld
SPU_OBJCOPY := $(SPU_CROSS)objcopy
-SPU_CFLAGS := -O2 -Wall -I$(srctree)/include -I$(objtree)/include2
-SPU_AFLAGS := -c -D__ASSEMBLY__ -I$(srctree)/include -I$(objtree)/include2
+SPU_CFLAGS := -O2 -Wall -I$(srctree)/include \
+ -I$(objtree)/include2 -D__KERNEL__
+SPU_AFLAGS := -c -D__ASSEMBLY__ -I$(srctree)/include \
+ -I$(objtree)/include2 -D__KERNEL__
SPU_LDFLAGS := -N -Ttext=0x0
$(obj)/switch.o: $(obj)/spu_save_dump.h $(obj)/spu_restore_dump.h
+clean-files := spu_save_dump.h spu_restore_dump.h
# Compile SPU files
cmd_spu_cc = $(SPU_CC) $(SPU_CFLAGS) -c -o $@ $<
@@ -45,7 +50,8 @@ cmd_hexdump = ( \
echo " * Hex-dump auto generated from $*.c." ; \
echo " * Do not edit!" ; \
echo " */" ; \
- echo "static unsigned int $*_code[] __page_aligned = {" ; \
+ echo "static unsigned int $*_code[] " \
+ "__attribute__((__aligned__(128))) = {" ; \
hexdump -v -e '"0x" 4/1 "%02x" "," "\n"' $< ; \
echo "};" ; \
) > $@
diff --git a/arch/powerpc/platforms/cell/spufs/context.c b/arch/powerpc/platforms/cell/spufs/context.c
index 8bb33abfad17..36439c5e9f2d 100644
--- a/arch/powerpc/platforms/cell/spufs/context.c
+++ b/arch/powerpc/platforms/cell/spufs/context.c
@@ -30,7 +30,7 @@
struct spu_context *alloc_spu_context(void)
{
struct spu_context *ctx;
- ctx = kmalloc(sizeof *ctx, GFP_KERNEL);
+ ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
if (!ctx)
goto out;
/* Binding to physical processor deferred
@@ -48,17 +48,7 @@ struct spu_context *alloc_spu_context(void)
init_waitqueue_head(&ctx->wbox_wq);
init_waitqueue_head(&ctx->stop_wq);
init_waitqueue_head(&ctx->mfc_wq);
- ctx->ibox_fasync = NULL;
- ctx->wbox_fasync = NULL;
- ctx->mfc_fasync = NULL;
- ctx->mfc = NULL;
- ctx->tagwait = 0;
ctx->state = SPU_STATE_SAVED;
- ctx->local_store = NULL;
- ctx->cntl = NULL;
- ctx->signal1 = NULL;
- ctx->signal2 = NULL;
- ctx->spu = NULL;
ctx->ops = &spu_backing_ops;
ctx->owner = get_task_mm(current);
goto out;
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index 366185e92667..80c02660e617 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -825,6 +825,55 @@ DEFINE_SIMPLE_ATTRIBUTE(spufs_signal2_type, spufs_signal2_type_get,
spufs_signal2_type_set, "%llu");
#ifdef CONFIG_SPUFS_MMAP
+static struct page *spufs_mss_mmap_nopage(struct vm_area_struct *vma,
+ unsigned long address, int *type)
+{
+ return spufs_ps_nopage(vma, address, type, 0x0000);
+}
+
+static struct vm_operations_struct spufs_mss_mmap_vmops = {
+ .nopage = spufs_mss_mmap_nopage,
+};
+
+/*
+ * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
+ * Mapping this area requires that the application have CAP_SYS_RAWIO,
+ * as these registers require special care when read/writing.
+ */
+static int spufs_mss_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ if (!(vma->vm_flags & VM_SHARED))
+ return -EINVAL;
+
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
+
+ vma->vm_flags |= VM_RESERVED;
+ vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
+ | _PAGE_NO_CACHE);
+
+ vma->vm_ops = &spufs_mss_mmap_vmops;
+ return 0;
+}
+#endif
+
+static int spufs_mss_open(struct inode *inode, struct file *file)
+{
+ struct spufs_inode_info *i = SPUFS_I(inode);
+
+ file->private_data = i->i_ctx;
+ return nonseekable_open(inode, file);
+}
+
+static struct file_operations spufs_mss_fops = {
+ .open = spufs_mss_open,
+#ifdef CONFIG_SPUFS_MMAP
+ .mmap = spufs_mss_mmap,
+#endif
+};
+
+
+#ifdef CONFIG_SPUFS_MMAP
static struct page *spufs_mfc_mmap_nopage(struct vm_area_struct *vma,
unsigned long address, int *type)
{
@@ -1279,6 +1328,22 @@ static u64 spufs_srr0_get(void *data)
DEFINE_SIMPLE_ATTRIBUTE(spufs_srr0_ops, spufs_srr0_get, spufs_srr0_set,
"%llx\n")
+static u64 spufs_id_get(void *data)
+{
+ struct spu_context *ctx = data;
+ u64 num;
+
+ spu_acquire(ctx);
+ if (ctx->state == SPU_STATE_RUNNABLE)
+ num = ctx->spu->number;
+ else
+ num = (unsigned int)-1;
+ spu_release(ctx);
+
+ return num;
+}
+DEFINE_SIMPLE_ATTRIBUTE(spufs_id_ops, spufs_id_get, 0, "0x%llx\n")
+
struct tree_descr spufs_dir_contents[] = {
{ "mem", &spufs_mem_fops, 0666, },
{ "regs", &spufs_regs_fops, 0666, },
@@ -1292,6 +1357,7 @@ struct tree_descr spufs_dir_contents[] = {
{ "signal2", &spufs_signal2_fops, 0666, },
{ "signal1_type", &spufs_signal1_type, 0666, },
{ "signal2_type", &spufs_signal2_type, 0666, },
+ { "mss", &spufs_mss_fops, 0666, },
{ "mfc", &spufs_mfc_fops, 0666, },
{ "cntl", &spufs_cntl_fops, 0666, },
{ "npc", &spufs_npc_ops, 0666, },
@@ -1301,5 +1367,6 @@ struct tree_descr spufs_dir_contents[] = {
{ "spu_tag_mask", &spufs_spu_tag_mask_ops, 0666, },
{ "event_mask", &spufs_event_mask_ops, 0666, },
{ "srr0", &spufs_srr0_ops, 0666, },
+ { "phys-id", &spufs_id_ops, 0666, },
{},
};
diff --git a/arch/powerpc/platforms/cell/spufs/hw_ops.c b/arch/powerpc/platforms/cell/spufs/hw_ops.c
index a13a8b5a014d..ede2cac46b6d 100644
--- a/arch/powerpc/platforms/cell/spufs/hw_ops.c
+++ b/arch/powerpc/platforms/cell/spufs/hw_ops.c
@@ -32,6 +32,7 @@
#include <asm/io.h>
#include <asm/spu.h>
+#include <asm/spu_priv1.h>
#include <asm/spu_csa.h>
#include <asm/mmu_context.h>
#include "spufs.h"
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index d9554199afa7..7b4572805db9 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -157,20 +157,12 @@ static void spufs_prune_dir(struct dentry *dir)
mutex_unlock(&dir->d_inode->i_mutex);
}
+/* Caller must hold root->i_mutex */
static int spufs_rmdir(struct inode *root, struct dentry *dir_dentry)
{
- struct spu_context *ctx;
-
/* remove all entries */
- mutex_lock(&root->i_mutex);
spufs_prune_dir(dir_dentry);
- mutex_unlock(&root->i_mutex);
-
- /* We have to give up the mm_struct */
- ctx = SPUFS_I(dir_dentry->d_inode)->i_ctx;
- spu_forget(ctx);
- /* XXX Do we need to hold i_mutex here ? */
return simple_rmdir(root, dir_dentry);
}
@@ -199,16 +191,23 @@ out:
static int spufs_dir_close(struct inode *inode, struct file *file)
{
+ struct spu_context *ctx;
struct inode *dir;
struct dentry *dentry;
int ret;
dentry = file->f_dentry;
dir = dentry->d_parent->d_inode;
+ ctx = SPUFS_I(dentry->d_inode)->i_ctx;
+ mutex_lock(&dir->i_mutex);
ret = spufs_rmdir(dir, dentry);
+ mutex_unlock(&dir->i_mutex);
WARN_ON(ret);
+ /* We have to give up the mm_struct */
+ spu_forget(ctx);
+
return dcache_dir_close(inode, file);
}
@@ -305,6 +304,10 @@ long spufs_create_thread(struct nameidata *nd,
nd->dentry != nd->dentry->d_sb->s_root)
goto out;
+ /* all flags are reserved */
+ if (flags)
+ goto out;
+
dentry = lookup_create(nd, 1);
ret = PTR_ERR(dentry);
if (IS_ERR(dentry))
@@ -324,8 +327,13 @@ long spufs_create_thread(struct nameidata *nd,
* in error path of *_open().
*/
ret = spufs_context_open(dget(dentry), mntget(nd->mnt));
- if (ret < 0)
- spufs_rmdir(nd->dentry->d_inode, dentry);
+ if (ret < 0) {
+ WARN_ON(spufs_rmdir(nd->dentry->d_inode, dentry));
+ mutex_unlock(&nd->dentry->d_inode->i_mutex);
+ spu_forget(SPUFS_I(dentry->d_inode)->i_ctx);
+ dput(dentry);
+ goto out;
+ }
out_dput:
dput(dentry);
@@ -428,11 +436,11 @@ spufs_fill_super(struct super_block *sb, void *data, int silent)
return spufs_create_root(sb, data);
}
-static struct super_block *
+static int
spufs_get_sb(struct file_system_type *fstype, int flags,
- const char *name, void *data)
+ const char *name, void *data, struct vfsmount *mnt)
{
- return get_sb_single(fstype, flags, data, spufs_fill_super);
+ return get_sb_single(fstype, flags, data, spufs_fill_super, mnt);
}
static struct file_system_type spufs_type = {
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index bf652cd77000..3dcc5d8d66b9 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -43,6 +43,7 @@
#include <asm/mmu_context.h>
#include <asm/spu.h>
#include <asm/spu_csa.h>
+#include <asm/spu_priv1.h>
#include "spufs.h"
#define SPU_MIN_TIMESLICE (100 * HZ / 1000)
@@ -363,7 +364,7 @@ int spu_activate(struct spu_context *ctx, u64 flags)
* We're likely to wait for interrupts on the same
* CPU that we are now on, so send them here.
*/
- spu_irq_setaffinity(spu, raw_smp_processor_id());
+ spu_cpu_affinity_set(spu, raw_smp_processor_id());
put_active_spu(spu);
return 0;
}
diff --git a/arch/powerpc/platforms/cell/spufs/spu_restore_dump.h_shipped b/arch/powerpc/platforms/cell/spufs/spu_restore_dump.h_shipped
index 1b2355ff7036..15183d209b58 100644
--- a/arch/powerpc/platforms/cell/spufs/spu_restore_dump.h_shipped
+++ b/arch/powerpc/platforms/cell/spufs/spu_restore_dump.h_shipped
@@ -3,229 +3,901 @@
* Hex-dump auto generated from spu_restore.c.
* Do not edit!
*/
-static unsigned int spu_restore_code[] __page_aligned = {
-0x40800000, 0x409ff801, 0x24000080, 0x24fd8081,
-0x1cd80081, 0x33001180, 0x42030003, 0x33800284,
-0x1c010204, 0x40200000, 0x40200000, 0x40200000,
-0x34000190, 0x34004191, 0x34008192, 0x3400c193,
-0x141fc205, 0x23fffd84, 0x1c100183, 0x217ffa85,
-0x3080a000, 0x3080a201, 0x3080a402, 0x3080a603,
-0x3080a804, 0x3080aa05, 0x3080ac06, 0x3080ae07,
-0x3080b008, 0x3080b209, 0x3080b40a, 0x3080b60b,
-0x3080b80c, 0x3080ba0d, 0x3080bc0e, 0x3080be0f,
-0x00003ffc, 0x00000000, 0x00000000, 0x00000000,
-0x01a00182, 0x3ec00083, 0xb0a14103, 0x01a00204,
-0x3ec10082, 0x4202800e, 0x04000703, 0xb0a14202,
-0x21a00803, 0x3fbf028d, 0x3f20068d, 0x3fbe0682,
-0x3fe30102, 0x21a00882, 0x3f82028f, 0x3fe3078f,
-0x3fbf0784, 0x3f200204, 0x3fbe0204, 0x3fe30204,
-0x04000203, 0x21a00903, 0x40848002, 0x21a00982,
-0x40800003, 0x21a00a03, 0x40802002, 0x21a00a82,
-0x21a00083, 0x40800082, 0x21a00b02, 0x10002818,
-0x40a80002, 0x32800007, 0x4207000c, 0x18008208,
-0x40a0000b, 0x4080020a, 0x40800709, 0x00200000,
-0x42070002, 0x3ac30384, 0x1cffc489, 0x00200000,
-0x18008383, 0x38830382, 0x4cffc486, 0x3ac28185,
-0xb0408584, 0x28830382, 0x1c020387, 0x38828182,
-0xb0408405, 0x1802c408, 0x28828182, 0x217ff886,
-0x04000583, 0x21a00803, 0x3fbe0682, 0x3fe30102,
-0x04000106, 0x21a00886, 0x04000603, 0x21a00903,
-0x40803c02, 0x21a00982, 0x40800003, 0x04000184,
-0x21a00a04, 0x40802202, 0x21a00a82, 0x42028005,
-0x34208702, 0x21002282, 0x21a00804, 0x21a00886,
-0x3fbf0782, 0x3f200102, 0x3fbe0102, 0x3fe30102,
-0x21a00902, 0x40804003, 0x21a00983, 0x21a00a04,
-0x40805a02, 0x21a00a82, 0x40800083, 0x21a00b83,
-0x01a00c02, 0x01a00d83, 0x3420c282, 0x21a00e02,
-0x34210283, 0x21a00f03, 0x34200284, 0x77400200,
-0x3421c282, 0x21a00702, 0x34218283, 0x21a00083,
-0x34214282, 0x21a00b02, 0x4200480c, 0x00200000,
-0x1c010286, 0x34220284, 0x34220302, 0x0f608203,
-0x5c024204, 0x3b81810b, 0x42013c02, 0x00200000,
-0x18008185, 0x38808183, 0x3b814182, 0x21004e84,
-0x4020007f, 0x35000100, 0x000004e0, 0x000002a0,
-0x000002e8, 0x00000428, 0x00000360, 0x000002e8,
-0x000004a0, 0x00000468, 0x000003c8, 0x00000360,
-0x409ffe02, 0x30801203, 0x40800204, 0x3ec40085,
-0x10009c09, 0x3ac10606, 0xb060c105, 0x4020007f,
-0x4020007f, 0x20801203, 0x38810602, 0xb0408586,
-0x28810602, 0x32004180, 0x34204702, 0x21a00382,
-0x4020007f, 0x327fdc80, 0x409ffe02, 0x30801203,
-0x40800204, 0x3ec40087, 0x40800405, 0x00200000,
-0x40800606, 0x3ac10608, 0x3ac14609, 0x3ac1860a,
-0xb060c107, 0x20801203, 0x41004003, 0x38810602,
-0x4020007f, 0xb0408188, 0x4020007f, 0x28810602,
-0x41201002, 0x38814603, 0x10009c09, 0xb060c109,
-0x4020007f, 0x28814603, 0x41193f83, 0x38818602,
-0x60ffc003, 0xb040818a, 0x28818602, 0x32003080,
-0x409ffe02, 0x30801203, 0x40800204, 0x3ec40087,
-0x41201008, 0x10009c14, 0x40800405, 0x3ac10609,
-0x40800606, 0x3ac1460a, 0xb060c107, 0x3ac1860b,
-0x20801203, 0x38810602, 0xb0408409, 0x28810602,
-0x38814603, 0xb060c40a, 0x4020007f, 0x28814603,
-0x41193f83, 0x38818602, 0x60ffc003, 0xb040818b,
-0x28818602, 0x32002380, 0x409ffe02, 0x30801204,
-0x40800205, 0x3ec40083, 0x40800406, 0x3ac14607,
-0x3ac18608, 0xb0810103, 0x41004002, 0x20801204,
-0x4020007f, 0x38814603, 0x10009c0b, 0xb060c107,
-0x4020007f, 0x4020007f, 0x28814603, 0x38818602,
-0x4020007f, 0x4020007f, 0xb0408588, 0x28818602,
-0x4020007f, 0x32001780, 0x409ffe02, 0x1000640e,
-0x40800204, 0x30801203, 0x40800405, 0x3ec40087,
-0x40800606, 0x3ac10608, 0x3ac14609, 0x3ac1860a,
-0xb060c107, 0x20801203, 0x413d8003, 0x38810602,
-0x4020007f, 0x327fd780, 0x409ffe02, 0x10007f0c,
-0x40800205, 0x30801204, 0x40800406, 0x3ec40083,
-0x3ac14607, 0x3ac18608, 0xb0810103, 0x413d8002,
-0x20801204, 0x38814603, 0x4020007f, 0x327feb80,
-0x409ffe02, 0x30801203, 0x40800204, 0x3ec40087,
-0x40800405, 0x1000650a, 0x40800606, 0x3ac10608,
-0x3ac14609, 0x3ac1860a, 0xb060c107, 0x20801203,
-0x38810602, 0xb0408588, 0x4020007f, 0x327fc980,
-0x00400000, 0x40800003, 0x4020007f, 0x35000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
+static unsigned int spu_restore_code[] __attribute__((__aligned__(128))) = {
+0x40800000,
+0x409ff801,
+0x24000080,
+0x24fd8081,
+0x1cd80081,
+0x33001180,
+0x42030003,
+0x33800284,
+0x1c010204,
+0x40200000,
+0x40200000,
+0x40200000,
+0x34000190,
+0x34004191,
+0x34008192,
+0x3400c193,
+0x141fc205,
+0x23fffd84,
+0x1c100183,
+0x217ffa85,
+0x3080a000,
+0x3080a201,
+0x3080a402,
+0x3080a603,
+0x3080a804,
+0x3080aa05,
+0x3080ac06,
+0x3080ae07,
+0x3080b008,
+0x3080b209,
+0x3080b40a,
+0x3080b60b,
+0x3080b80c,
+0x3080ba0d,
+0x3080bc0e,
+0x3080be0f,
+0x00003ffc,
+0x00000000,
+0x00000000,
+0x00000000,
+0x01a00182,
+0x3ec00083,
+0xb0a14103,
+0x01a00204,
+0x3ec10082,
+0x4202800e,
+0x04000703,
+0xb0a14202,
+0x21a00803,
+0x3fbf028d,
+0x3f20068d,
+0x3fbe0682,
+0x3fe30102,
+0x21a00882,
+0x3f82028f,
+0x3fe3078f,
+0x3fbf0784,
+0x3f200204,
+0x3fbe0204,
+0x3fe30204,
+0x04000203,
+0x21a00903,
+0x40848002,
+0x21a00982,
+0x40800003,
+0x21a00a03,
+0x40802002,
+0x21a00a82,
+0x21a00083,
+0x40800082,
+0x21a00b02,
+0x10002818,
+0x42a00002,
+0x32800007,
+0x4207000c,
+0x18008208,
+0x40a0000b,
+0x4080020a,
+0x40800709,
+0x00200000,
+0x42070002,
+0x3ac30384,
+0x1cffc489,
+0x00200000,
+0x18008383,
+0x38830382,
+0x4cffc486,
+0x3ac28185,
+0xb0408584,
+0x28830382,
+0x1c020387,
+0x38828182,
+0xb0408405,
+0x1802c408,
+0x28828182,
+0x217ff886,
+0x04000583,
+0x21a00803,
+0x3fbe0682,
+0x3fe30102,
+0x04000106,
+0x21a00886,
+0x04000603,
+0x21a00903,
+0x40803c02,
+0x21a00982,
+0x40800003,
+0x04000184,
+0x21a00a04,
+0x40802202,
+0x21a00a82,
+0x42028005,
+0x34208702,
+0x21002282,
+0x21a00804,
+0x21a00886,
+0x3fbf0782,
+0x3f200102,
+0x3fbe0102,
+0x3fe30102,
+0x21a00902,
+0x40804003,
+0x21a00983,
+0x21a00a04,
+0x40805a02,
+0x21a00a82,
+0x40800083,
+0x21a00b83,
+0x01a00c02,
+0x01a00d83,
+0x3420c282,
+0x21a00e02,
+0x34210283,
+0x21a00f03,
+0x34200284,
+0x77400200,
+0x3421c282,
+0x21a00702,
+0x34218283,
+0x21a00083,
+0x34214282,
+0x21a00b02,
+0x4200480c,
+0x00200000,
+0x1c010286,
+0x34220284,
+0x34220302,
+0x0f608203,
+0x5c024204,
+0x3b81810b,
+0x42013c02,
+0x00200000,
+0x18008185,
+0x38808183,
+0x3b814182,
+0x21004e84,
+0x4020007f,
+0x35000100,
+0x000004e0,
+0x000002a0,
+0x000002e8,
+0x00000428,
+0x00000360,
+0x000002e8,
+0x000004a0,
+0x00000468,
+0x000003c8,
+0x00000360,
+0x409ffe02,
+0x30801203,
+0x40800204,
+0x3ec40085,
+0x10009c09,
+0x3ac10606,
+0xb060c105,
+0x4020007f,
+0x4020007f,
+0x20801203,
+0x38810602,
+0xb0408586,
+0x28810602,
+0x32004180,
+0x34204702,
+0x21a00382,
+0x4020007f,
+0x327fdc80,
+0x409ffe02,
+0x30801203,
+0x40800204,
+0x3ec40087,
+0x40800405,
+0x00200000,
+0x40800606,
+0x3ac10608,
+0x3ac14609,
+0x3ac1860a,
+0xb060c107,
+0x20801203,
+0x41004003,
+0x38810602,
+0x4020007f,
+0xb0408188,
+0x4020007f,
+0x28810602,
+0x41201002,
+0x38814603,
+0x10009c09,
+0xb060c109,
+0x4020007f,
+0x28814603,
+0x41193f83,
+0x38818602,
+0x60ffc003,
+0xb040818a,
+0x28818602,
+0x32003080,
+0x409ffe02,
+0x30801203,
+0x40800204,
+0x3ec40087,
+0x41201008,
+0x10009c14,
+0x40800405,
+0x3ac10609,
+0x40800606,
+0x3ac1460a,
+0xb060c107,
+0x3ac1860b,
+0x20801203,
+0x38810602,
+0xb0408409,
+0x28810602,
+0x38814603,
+0xb060c40a,
+0x4020007f,
+0x28814603,
+0x41193f83,
+0x38818602,
+0x60ffc003,
+0xb040818b,
+0x28818602,
+0x32002380,
+0x409ffe02,
+0x30801204,
+0x40800205,
+0x3ec40083,
+0x40800406,
+0x3ac14607,
+0x3ac18608,
+0xb0810103,
+0x41004002,
+0x20801204,
+0x4020007f,
+0x38814603,
+0x10009c0b,
+0xb060c107,
+0x4020007f,
+0x4020007f,
+0x28814603,
+0x38818602,
+0x4020007f,
+0x4020007f,
+0xb0408588,
+0x28818602,
+0x4020007f,
+0x32001780,
+0x409ffe02,
+0x1000640e,
+0x40800204,
+0x30801203,
+0x40800405,
+0x3ec40087,
+0x40800606,
+0x3ac10608,
+0x3ac14609,
+0x3ac1860a,
+0xb060c107,
+0x20801203,
+0x413d8003,
+0x38810602,
+0x4020007f,
+0x327fd780,
+0x409ffe02,
+0x10007f0c,
+0x40800205,
+0x30801204,
+0x40800406,
+0x3ec40083,
+0x3ac14607,
+0x3ac18608,
+0xb0810103,
+0x413d8002,
+0x20801204,
+0x38814603,
+0x4020007f,
+0x327feb80,
+0x409ffe02,
+0x30801203,
+0x40800204,
+0x3ec40087,
+0x40800405,
+0x1000650a,
+0x40800606,
+0x3ac10608,
+0x3ac14609,
+0x3ac1860a,
+0xb060c107,
+0x20801203,
+0x38810602,
+0xb0408588,
+0x4020007f,
+0x327fc980,
+0x00400000,
+0x40800003,
+0x4020007f,
+0x35000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
};
diff --git a/arch/powerpc/platforms/cell/spufs/spu_save_dump.h_shipped b/arch/powerpc/platforms/cell/spufs/spu_save_dump.h_shipped
index 39e54003f1df..b9f81ac8a632 100644
--- a/arch/powerpc/platforms/cell/spufs/spu_save_dump.h_shipped
+++ b/arch/powerpc/platforms/cell/spufs/spu_save_dump.h_shipped
@@ -3,189 +3,741 @@
* Hex-dump auto generated from spu_save.c.
* Do not edit!
*/
-static unsigned int spu_save_code[] __page_aligned = {
-0x20805000, 0x20805201, 0x20805402, 0x20805603,
-0x20805804, 0x20805a05, 0x20805c06, 0x20805e07,
-0x20806008, 0x20806209, 0x2080640a, 0x2080660b,
-0x2080680c, 0x20806a0d, 0x20806c0e, 0x20806e0f,
-0x4201c003, 0x33800184, 0x1c010204, 0x40200000,
-0x24000190, 0x24004191, 0x24008192, 0x2400c193,
-0x141fc205, 0x23fffd84, 0x1c100183, 0x217ffb85,
-0x40800000, 0x409ff801, 0x24000080, 0x24fd8081,
-0x1cd80081, 0x33000180, 0x00000000, 0x00000000,
-0x01a00182, 0x3ec00083, 0xb1c38103, 0x01a00204,
-0x3ec10082, 0x4201400d, 0xb1c38202, 0x01a00583,
-0x34218682, 0x3ed80684, 0xb0408184, 0x24218682,
-0x01a00603, 0x00200000, 0x34214682, 0x3ed40684,
-0xb0408184, 0x40800003, 0x24214682, 0x21a00083,
-0x40800082, 0x21a00b02, 0x4020007f, 0x1000251e,
-0x40a80002, 0x32800008, 0x4205c00c, 0x00200000,
-0x40a0000b, 0x3f82070f, 0x4080020a, 0x40800709,
-0x3fe3078f, 0x3fbf0783, 0x3f200183, 0x3fbe0183,
-0x3fe30187, 0x18008387, 0x4205c002, 0x3ac30404,
-0x1cffc489, 0x00200000, 0x18008403, 0x38830402,
-0x4cffc486, 0x3ac28185, 0xb0408584, 0x28830402,
-0x1c020408, 0x38828182, 0xb0408385, 0x1802c387,
-0x28828182, 0x217ff886, 0x04000582, 0x32800007,
-0x21a00802, 0x3fbf0705, 0x3f200285, 0x3fbe0285,
-0x3fe30285, 0x21a00885, 0x04000603, 0x21a00903,
-0x40803c02, 0x21a00982, 0x04000386, 0x21a00a06,
-0x40801202, 0x21a00a82, 0x73000003, 0x24200683,
-0x01a00404, 0x00200000, 0x34204682, 0x3ec40683,
-0xb0408203, 0x24204682, 0x01a00783, 0x00200000,
-0x3421c682, 0x3edc0684, 0xb0408184, 0x2421c682,
-0x21a00806, 0x21a00885, 0x3fbf0784, 0x3f200204,
-0x3fbe0204, 0x3fe30204, 0x21a00904, 0x40804002,
-0x21a00982, 0x21a00a06, 0x40805a02, 0x21a00a82,
-0x04000683, 0x21a00803, 0x21a00885, 0x21a00904,
-0x40848002, 0x21a00982, 0x21a00a06, 0x40801002,
-0x21a00a82, 0x21a00a06, 0x40806602, 0x00200000,
-0x35800009, 0x21a00a82, 0x40800083, 0x21a00b83,
-0x01a00c02, 0x01a00d83, 0x00003ffb, 0x40800003,
-0x4020007f, 0x35000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
-0x00000000, 0x00000000, 0x00000000, 0x00000000,
+static unsigned int spu_save_code[] __attribute__((__aligned__(128))) = {
+0x20805000,
+0x20805201,
+0x20805402,
+0x20805603,
+0x20805804,
+0x20805a05,
+0x20805c06,
+0x20805e07,
+0x20806008,
+0x20806209,
+0x2080640a,
+0x2080660b,
+0x2080680c,
+0x20806a0d,
+0x20806c0e,
+0x20806e0f,
+0x4201c003,
+0x33800184,
+0x1c010204,
+0x40200000,
+0x24000190,
+0x24004191,
+0x24008192,
+0x2400c193,
+0x141fc205,
+0x23fffd84,
+0x1c100183,
+0x217ffb85,
+0x40800000,
+0x409ff801,
+0x24000080,
+0x24fd8081,
+0x1cd80081,
+0x33000180,
+0x00000000,
+0x00000000,
+0x01a00182,
+0x3ec00083,
+0xb1c38103,
+0x01a00204,
+0x3ec10082,
+0x4201400d,
+0xb1c38202,
+0x01a00583,
+0x34218682,
+0x3ed80684,
+0xb0408184,
+0x24218682,
+0x01a00603,
+0x00200000,
+0x34214682,
+0x3ed40684,
+0xb0408184,
+0x40800003,
+0x24214682,
+0x21a00083,
+0x40800082,
+0x21a00b02,
+0x4020007f,
+0x1000251e,
+0x42a00002,
+0x32800008,
+0x4205c00c,
+0x00200000,
+0x40a0000b,
+0x3f82070f,
+0x4080020a,
+0x40800709,
+0x3fe3078f,
+0x3fbf0783,
+0x3f200183,
+0x3fbe0183,
+0x3fe30187,
+0x18008387,
+0x4205c002,
+0x3ac30404,
+0x1cffc489,
+0x00200000,
+0x18008403,
+0x38830402,
+0x4cffc486,
+0x3ac28185,
+0xb0408584,
+0x28830402,
+0x1c020408,
+0x38828182,
+0xb0408385,
+0x1802c387,
+0x28828182,
+0x217ff886,
+0x04000582,
+0x32800007,
+0x21a00802,
+0x3fbf0705,
+0x3f200285,
+0x3fbe0285,
+0x3fe30285,
+0x21a00885,
+0x04000603,
+0x21a00903,
+0x40803c02,
+0x21a00982,
+0x04000386,
+0x21a00a06,
+0x40801202,
+0x21a00a82,
+0x73000003,
+0x24200683,
+0x01a00404,
+0x00200000,
+0x34204682,
+0x3ec40683,
+0xb0408203,
+0x24204682,
+0x01a00783,
+0x00200000,
+0x3421c682,
+0x3edc0684,
+0xb0408184,
+0x2421c682,
+0x21a00806,
+0x21a00885,
+0x3fbf0784,
+0x3f200204,
+0x3fbe0204,
+0x3fe30204,
+0x21a00904,
+0x40804002,
+0x21a00982,
+0x21a00a06,
+0x40805a02,
+0x21a00a82,
+0x04000683,
+0x21a00803,
+0x21a00885,
+0x21a00904,
+0x40848002,
+0x21a00982,
+0x21a00a06,
+0x40801002,
+0x21a00a82,
+0x21a00a06,
+0x40806602,
+0x00200000,
+0x35800009,
+0x21a00a82,
+0x40800083,
+0x21a00b83,
+0x01a00c02,
+0x01a00d83,
+0x00003ffb,
+0x40800003,
+0x4020007f,
+0x35000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
};
diff --git a/arch/powerpc/platforms/cell/spufs/switch.c b/arch/powerpc/platforms/cell/spufs/switch.c
index 1726bfe38ee0..b30e55dab832 100644
--- a/arch/powerpc/platforms/cell/spufs/switch.c
+++ b/arch/powerpc/platforms/cell/spufs/switch.c
@@ -46,6 +46,7 @@
#include <asm/io.h>
#include <asm/spu.h>
+#include <asm/spu_priv1.h>
#include <asm/spu_csa.h>
#include <asm/mmu_context.h>
@@ -622,12 +623,17 @@ static inline void save_ppuint_mb(struct spu_state *csa, struct spu *spu)
static inline void save_ch_part1(struct spu_state *csa, struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
- u64 idx, ch_indices[7] = { 0UL, 1UL, 3UL, 4UL, 24UL, 25UL, 27UL };
+ u64 idx, ch_indices[7] = { 0UL, 3UL, 4UL, 24UL, 25UL, 27UL };
int i;
/* Save, Step 42:
- * Save the following CH: [0,1,3,4,24,25,27]
*/
+
+ /* Save CH 1, without channel count */
+ out_be64(&priv2->spu_chnlcntptr_RW, 1);
+ csa->spu_chnldata_RW[1] = in_be64(&priv2->spu_chnldata_RW);
+
+ /* Save the following CH: [0,3,4,24,25,27] */
for (i = 0; i < 7; i++) {
idx = ch_indices[i];
out_be64(&priv2->spu_chnlcntptr_RW, idx);
@@ -718,13 +724,15 @@ static inline void invalidate_slbs(struct spu_state *csa, struct spu *spu)
static inline void get_kernel_slb(u64 ea, u64 slb[2])
{
- slb[0] = (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | SLB_VSID_KERNEL;
- slb[1] = (ea & ESID_MASK) | SLB_ESID_V;
+ u64 llp;
- /* Large pages are used for kernel text/data, but not vmalloc. */
- if (cpu_has_feature(CPU_FTR_16M_PAGE)
- && REGION_ID(ea) == KERNEL_REGION_ID)
- slb[0] |= SLB_VSID_L;
+ if (REGION_ID(ea) == KERNEL_REGION_ID)
+ llp = mmu_psize_defs[mmu_linear_psize].sllp;
+ else
+ llp = mmu_psize_defs[mmu_virtual_psize].sllp;
+ slb[0] = (get_kernel_vsid(ea) << SLB_VSID_SHIFT) |
+ SLB_VSID_KERNEL | llp;
+ slb[1] = (ea & ESID_MASK) | SLB_ESID_V;
}
static inline void load_mfc_slb(struct spu *spu, u64 slb[2], int slbe)
@@ -1103,13 +1111,18 @@ static inline void clear_spu_status(struct spu_state *csa, struct spu *spu)
static inline void reset_ch_part1(struct spu_state *csa, struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
- u64 ch_indices[7] = { 0UL, 1UL, 3UL, 4UL, 24UL, 25UL, 27UL };
+ u64 ch_indices[7] = { 0UL, 3UL, 4UL, 24UL, 25UL, 27UL };
u64 idx;
int i;
/* Restore, Step 20:
- * Reset the following CH: [0,1,3,4,24,25,27]
*/
+
+ /* Reset CH 1 */
+ out_be64(&priv2->spu_chnlcntptr_RW, 1);
+ out_be64(&priv2->spu_chnldata_RW, 0UL);
+
+ /* Reset the following CH: [0,3,4,24,25,27] */
for (i = 0; i < 7; i++) {
idx = ch_indices[i];
out_be64(&priv2->spu_chnlcntptr_RW, idx);
@@ -1570,12 +1583,17 @@ static inline void restore_decr_wrapped(struct spu_state *csa, struct spu *spu)
static inline void restore_ch_part1(struct spu_state *csa, struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
- u64 idx, ch_indices[7] = { 0UL, 1UL, 3UL, 4UL, 24UL, 25UL, 27UL };
+ u64 idx, ch_indices[7] = { 0UL, 3UL, 4UL, 24UL, 25UL, 27UL };
int i;
/* Restore, Step 59:
- * Restore the following CH: [0,1,3,4,24,25,27]
*/
+
+ /* Restore CH 1 without count */
+ out_be64(&priv2->spu_chnlcntptr_RW, 1);
+ out_be64(&priv2->spu_chnldata_RW, csa->spu_chnldata_RW[1]);
+
+ /* Restore the following CH: [0,3,4,24,25,27] */
for (i = 0; i < 7; i++) {
idx = ch_indices[i];
out_be64(&priv2->spu_chnlcntptr_RW, idx);
@@ -2074,6 +2092,7 @@ int spu_save(struct spu_state *prev, struct spu *spu)
}
return rc;
}
+EXPORT_SYMBOL_GPL(spu_save);
/**
* spu_restore - SPU context restore, with harvest and locking.
@@ -2090,7 +2109,6 @@ int spu_restore(struct spu_state *new, struct spu *spu)
acquire_spu_lock(spu);
harvest(NULL, spu);
- spu->stop_code = 0;
spu->dar = 0;
spu->dsisr = 0;
spu->slb_replace = 0;
@@ -2103,6 +2121,7 @@ int spu_restore(struct spu_state *new, struct spu *spu)
}
return rc;
}
+EXPORT_SYMBOL_GPL(spu_restore);
/**
* spu_harvest - SPU harvest (reset) operation
@@ -2125,6 +2144,7 @@ static void init_prob(struct spu_state *csa)
csa->spu_chnlcnt_RW[28] = 1;
csa->spu_chnlcnt_RW[30] = 1;
csa->prob.spu_runcntl_RW = SPU_RUNCNTL_STOP;
+ csa->prob.mb_stat_R = 0x000400;
}
static void init_priv1(struct spu_state *csa)
@@ -2193,6 +2213,7 @@ void spu_init_csa(struct spu_state *csa)
init_priv1(csa);
init_priv2(csa);
}
+EXPORT_SYMBOL_GPL(spu_init_csa);
void spu_fini_csa(struct spu_state *csa)
{
@@ -2203,3 +2224,4 @@ void spu_fini_csa(struct spu_state *csa)
vfree(csa->lscsa);
}
+EXPORT_SYMBOL_GPL(spu_fini_csa);
diff --git a/arch/powerpc/platforms/iseries/Makefile b/arch/powerpc/platforms/iseries/Makefile
index ce8c0b943fa0..dee4eb4d8bec 100644
--- a/arch/powerpc/platforms/iseries/Makefile
+++ b/arch/powerpc/platforms/iseries/Makefile
@@ -1,9 +1,11 @@
EXTRA_CFLAGS += -mno-minimal-toc
-obj-y += hvlog.o hvlpconfig.o lpardata.o setup.o mf.o lpevents.o \
+obj-y += hvlog.o hvlpconfig.o lpardata.o setup.o dt_mod.o mf.o lpevents.o \
hvcall.o proc.o htab.o iommu.o misc.o irq.o
obj-$(CONFIG_PCI) += pci.o vpdinfo.o
-obj-$(CONFIG_IBMVIO) += vio.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_VIOPATH) += viopath.o
obj-$(CONFIG_MODULES) += ksyms.o
+
+$(obj)/dt_mod.o: $(obj)/dt.o
+ @$(OBJCOPY) --rename-section .rodata.str1.8=.dt_strings $(obj)/dt.o $(obj)/dt_mod.o
diff --git a/arch/powerpc/platforms/iseries/call_pci.h b/arch/powerpc/platforms/iseries/call_pci.h
index 59d4e0ad5cf3..dbdf69850ed9 100644
--- a/arch/powerpc/platforms/iseries/call_pci.h
+++ b/arch/powerpc/platforms/iseries/call_pci.h
@@ -145,6 +145,25 @@ static inline u64 HvCallPci_configLoad16(u16 busNumber, u8 subBusNumber,
return retVal.rc;
}
+static inline u64 HvCallPci_configLoad32(u16 busNumber, u8 subBusNumber,
+ u8 deviceId, u32 offset, u32 *value)
+{
+ struct HvCallPci_DsaAddr dsa;
+ struct HvCallPci_LoadReturn retVal;
+
+ *((u64*)&dsa) = 0;
+
+ dsa.busNumber = busNumber;
+ dsa.subBusNumber = subBusNumber;
+ dsa.deviceId = deviceId;
+
+ HvCall3Ret16(HvCallPciConfigLoad32, &retVal, *(u64 *)&dsa, offset, 0);
+
+ *value = retVal.value;
+
+ return retVal.rc;
+}
+
static inline u64 HvCallPci_configStore8(u16 busNumber, u8 subBusNumber,
u8 deviceId, u32 offset, u8 value)
{
diff --git a/arch/powerpc/platforms/iseries/dt.c b/arch/powerpc/platforms/iseries/dt.c
new file mode 100644
index 000000000000..d3444aabe76e
--- /dev/null
+++ b/arch/powerpc/platforms/iseries/dt.c
@@ -0,0 +1,615 @@
+/*
+ * Copyright (c) 2005-2006 Michael Ellerman, IBM Corporation
+ *
+ * Description:
+ * This file contains all the routines to build a flattened device
+ * tree for a legacy iSeries machine.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#undef DEBUG
+
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci_regs.h>
+#include <linux/pci_ids.h>
+#include <linux/threads.h>
+#include <linux/bitops.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/if_ether.h> /* ETH_ALEN */
+
+#include <asm/machdep.h>
+#include <asm/prom.h>
+#include <asm/lppaca.h>
+#include <asm/cputable.h>
+#include <asm/abs_addr.h>
+#include <asm/system.h>
+#include <asm/iseries/hv_types.h>
+#include <asm/iseries/hv_lp_config.h>
+#include <asm/iseries/hv_call_xm.h>
+#include <asm/iseries/it_exp_vpd_panel.h>
+#include <asm/udbg.h>
+
+#include "processor_vpd.h"
+#include "call_hpt.h"
+#include "call_pci.h"
+#include "pci.h"
+
+#ifdef DEBUG
+#define DBG(fmt...) udbg_printf(fmt)
+#else
+#define DBG(fmt...)
+#endif
+
+/*
+ * These are created by the linker script at the start and end
+ * of the section containing all the strings from this file.
+ */
+extern char __dt_strings_start[];
+extern char __dt_strings_end[];
+
+struct iseries_flat_dt {
+ struct boot_param_header header;
+ u64 reserve_map[2];
+};
+
+static void * __initdata dt_data;
+
+/*
+ * Putting these strings here keeps them out of the section
+ * that we rename to .dt_strings using objcopy and capture
+ * for the strings blob of the flattened device tree.
+ */
+static char __initdata device_type_cpu[] = "cpu";
+static char __initdata device_type_memory[] = "memory";
+static char __initdata device_type_serial[] = "serial";
+static char __initdata device_type_network[] = "network";
+static char __initdata device_type_block[] = "block";
+static char __initdata device_type_byte[] = "byte";
+static char __initdata device_type_pci[] = "pci";
+static char __initdata device_type_vdevice[] = "vdevice";
+static char __initdata device_type_vscsi[] = "vscsi";
+
+static struct iseries_flat_dt * __init dt_init(void)
+{
+ struct iseries_flat_dt *dt;
+ unsigned long str_len;
+
+ str_len = __dt_strings_end - __dt_strings_start;
+ dt = (struct iseries_flat_dt *)ALIGN(klimit, 8);
+ dt->header.off_mem_rsvmap =
+ offsetof(struct iseries_flat_dt, reserve_map);
+ dt->header.off_dt_strings = ALIGN(sizeof(*dt), 8);
+ dt->header.off_dt_struct = dt->header.off_dt_strings
+ + ALIGN(str_len, 8);
+ dt_data = (void *)((unsigned long)dt + dt->header.off_dt_struct);
+ dt->header.dt_strings_size = str_len;
+
+ /* There is no notion of hardware cpu id on iSeries */
+ dt->header.boot_cpuid_phys = smp_processor_id();
+
+ memcpy((char *)dt + dt->header.off_dt_strings, __dt_strings_start,
+ str_len);
+
+ dt->header.magic = OF_DT_HEADER;
+ dt->header.version = 0x10;
+ dt->header.last_comp_version = 0x10;
+
+ dt->reserve_map[0] = 0;
+ dt->reserve_map[1] = 0;
+
+ return dt;
+}
+
+static void __init dt_push_u32(struct iseries_flat_dt *dt, u32 value)
+{
+ *((u32 *)dt_data) = value;
+ dt_data += sizeof(u32);
+}
+
+#ifdef notyet
+static void __init dt_push_u64(struct iseries_flat_dt *dt, u64 value)
+{
+ *((u64 *)dt_data) = value;
+ dt_data += sizeof(u64);
+}
+#endif
+
+static void __init dt_push_bytes(struct iseries_flat_dt *dt, const char *data,
+ int len)
+{
+ memcpy(dt_data, data, len);
+ dt_data += ALIGN(len, 4);
+}
+
+static void __init dt_start_node(struct iseries_flat_dt *dt, const char *name)
+{
+ dt_push_u32(dt, OF_DT_BEGIN_NODE);
+ dt_push_bytes(dt, name, strlen(name) + 1);
+}
+
+#define dt_end_node(dt) dt_push_u32(dt, OF_DT_END_NODE)
+
+static void __init dt_prop(struct iseries_flat_dt *dt, const char *name,
+ const void *data, int len)
+{
+ unsigned long offset;
+
+ dt_push_u32(dt, OF_DT_PROP);
+
+ /* Length of the data */
+ dt_push_u32(dt, len);
+
+ offset = name - __dt_strings_start;
+
+ /* The offset of the properties name in the string blob. */
+ dt_push_u32(dt, (u32)offset);
+
+ /* The actual data. */
+ dt_push_bytes(dt, data, len);
+}
+
+static void __init dt_prop_str(struct iseries_flat_dt *dt, const char *name,
+ const char *data)
+{
+ dt_prop(dt, name, data, strlen(data) + 1); /* + 1 for NULL */
+}
+
+static void __init dt_prop_u32(struct iseries_flat_dt *dt, const char *name,
+ u32 data)
+{
+ dt_prop(dt, name, &data, sizeof(u32));
+}
+
+#ifdef notyet
+static void __init dt_prop_u64(struct iseries_flat_dt *dt, const char *name,
+ u64 data)
+{
+ dt_prop(dt, name, &data, sizeof(u64));
+}
+#endif
+
+static void __init dt_prop_u64_list(struct iseries_flat_dt *dt,
+ const char *name, u64 *data, int n)
+{
+ dt_prop(dt, name, data, sizeof(u64) * n);
+}
+
+static void __init dt_prop_u32_list(struct iseries_flat_dt *dt,
+ const char *name, u32 *data, int n)
+{
+ dt_prop(dt, name, data, sizeof(u32) * n);
+}
+
+#ifdef notyet
+static void __init dt_prop_empty(struct iseries_flat_dt *dt, const char *name)
+{
+ dt_prop(dt, name, NULL, 0);
+}
+#endif
+
+static void __init dt_cpus(struct iseries_flat_dt *dt)
+{
+ unsigned char buf[32];
+ unsigned char *p;
+ unsigned int i, index;
+ struct IoHriProcessorVpd *d;
+ u32 pft_size[2];
+
+ /* yuck */
+ snprintf(buf, 32, "PowerPC,%s", cur_cpu_spec->cpu_name);
+ p = strchr(buf, ' ');
+ if (!p) p = buf + strlen(buf);
+
+ dt_start_node(dt, "cpus");
+ dt_prop_u32(dt, "#address-cells", 1);
+ dt_prop_u32(dt, "#size-cells", 0);
+
+ pft_size[0] = 0; /* NUMA CEC cookie, 0 for non NUMA */
+ pft_size[1] = __ilog2(HvCallHpt_getHptPages() * HW_PAGE_SIZE);
+
+ for (i = 0; i < NR_CPUS; i++) {
+ if (lppaca[i].dyn_proc_status >= 2)
+ continue;
+
+ snprintf(p, 32 - (p - buf), "@%d", i);
+ dt_start_node(dt, buf);
+
+ dt_prop_str(dt, "device_type", device_type_cpu);
+
+ index = lppaca[i].dyn_hv_phys_proc_index;
+ d = &xIoHriProcessorVpd[index];
+
+ dt_prop_u32(dt, "i-cache-size", d->xInstCacheSize * 1024);
+ dt_prop_u32(dt, "i-cache-line-size", d->xInstCacheOperandSize);
+
+ dt_prop_u32(dt, "d-cache-size", d->xDataL1CacheSizeKB * 1024);
+ dt_prop_u32(dt, "d-cache-line-size", d->xDataCacheOperandSize);
+
+ /* magic conversions to Hz copied from old code */
+ dt_prop_u32(dt, "clock-frequency",
+ ((1UL << 34) * 1000000) / d->xProcFreq);
+ dt_prop_u32(dt, "timebase-frequency",
+ ((1UL << 32) * 1000000) / d->xTimeBaseFreq);
+
+ dt_prop_u32(dt, "reg", i);
+
+ dt_prop_u32_list(dt, "ibm,pft-size", pft_size, 2);
+
+ dt_end_node(dt);
+ }
+
+ dt_end_node(dt);
+}
+
+static void __init dt_model(struct iseries_flat_dt *dt)
+{
+ char buf[16] = "IBM,";
+
+ /* "IBM," + mfgId[2:3] + systemSerial[1:5] */
+ strne2a(buf + 4, xItExtVpdPanel.mfgID + 2, 2);
+ strne2a(buf + 6, xItExtVpdPanel.systemSerial + 1, 5);
+ buf[11] = '\0';
+ dt_prop_str(dt, "system-id", buf);
+
+ /* "IBM," + machineType[0:4] */
+ strne2a(buf + 4, xItExtVpdPanel.machineType, 4);
+ buf[8] = '\0';
+ dt_prop_str(dt, "model", buf);
+
+ dt_prop_str(dt, "compatible", "IBM,iSeries");
+}
+
+static void __init dt_do_vdevice(struct iseries_flat_dt *dt,
+ const char *name, u32 reg, int unit,
+ const char *type, const char *compat, int end)
+{
+ char buf[32];
+
+ snprintf(buf, 32, "%s@%08x", name, reg + ((unit >= 0) ? unit : 0));
+ dt_start_node(dt, buf);
+ dt_prop_str(dt, "device_type", type);
+ if (compat)
+ dt_prop_str(dt, "compatible", compat);
+ dt_prop_u32(dt, "reg", reg + ((unit >= 0) ? unit : 0));
+ if (unit >= 0)
+ dt_prop_u32(dt, "linux,unit_address", unit);
+ if (end)
+ dt_end_node(dt);
+}
+
+static void __init dt_vdevices(struct iseries_flat_dt *dt)
+{
+ u32 reg = 0;
+ HvLpIndexMap vlan_map;
+ int i;
+
+ dt_start_node(dt, "vdevice");
+ dt_prop_str(dt, "device_type", device_type_vdevice);
+ dt_prop_str(dt, "compatible", "IBM,iSeries-vdevice");
+ dt_prop_u32(dt, "#address-cells", 1);
+ dt_prop_u32(dt, "#size-cells", 0);
+
+ dt_do_vdevice(dt, "vty", reg, -1, device_type_serial, NULL, 1);
+ reg++;
+
+ dt_do_vdevice(dt, "v-scsi", reg, -1, device_type_vscsi,
+ "IBM,v-scsi", 1);
+ reg++;
+
+ vlan_map = HvLpConfig_getVirtualLanIndexMap();
+ for (i = 0; i < HVMAXARCHITECTEDVIRTUALLANS; i++) {
+ unsigned char mac_addr[ETH_ALEN];
+
+ if ((vlan_map & (0x8000 >> i)) == 0)
+ continue;
+ dt_do_vdevice(dt, "l-lan", reg, i, device_type_network,
+ "IBM,iSeries-l-lan", 0);
+ mac_addr[0] = 0x02;
+ mac_addr[1] = 0x01;
+ mac_addr[2] = 0xff;
+ mac_addr[3] = i;
+ mac_addr[4] = 0xff;
+ mac_addr[5] = HvLpConfig_getLpIndex_outline();
+ dt_prop(dt, "local-mac-address", (char *)mac_addr, ETH_ALEN);
+ dt_prop(dt, "mac-address", (char *)mac_addr, ETH_ALEN);
+ dt_prop_u32(dt, "max-frame-size", 9000);
+ dt_prop_u32(dt, "address-bits", 48);
+
+ dt_end_node(dt);
+ }
+ reg += HVMAXARCHITECTEDVIRTUALLANS;
+
+ for (i = 0; i < HVMAXARCHITECTEDVIRTUALDISKS; i++)
+ dt_do_vdevice(dt, "viodasd", reg, i, device_type_block,
+ "IBM,iSeries-viodasd", 1);
+ reg += HVMAXARCHITECTEDVIRTUALDISKS;
+
+ for (i = 0; i < HVMAXARCHITECTEDVIRTUALCDROMS; i++)
+ dt_do_vdevice(dt, "viocd", reg, i, device_type_block,
+ "IBM,iSeries-viocd", 1);
+ reg += HVMAXARCHITECTEDVIRTUALCDROMS;
+
+ for (i = 0; i < HVMAXARCHITECTEDVIRTUALTAPES; i++)
+ dt_do_vdevice(dt, "viotape", reg, i, device_type_byte,
+ "IBM,iSeries-viotape", 1);
+
+ dt_end_node(dt);
+}
+
+struct pci_class_name {
+ u16 code;
+ const char *name;
+ const char *type;
+};
+
+static struct pci_class_name __initdata pci_class_name[] = {
+ { PCI_CLASS_NETWORK_ETHERNET, "ethernet", device_type_network },
+};
+
+static struct pci_class_name * __init dt_find_pci_class_name(u16 class_code)
+{
+ struct pci_class_name *cp;
+
+ for (cp = pci_class_name;
+ cp < &pci_class_name[ARRAY_SIZE(pci_class_name)]; cp++)
+ if (cp->code == class_code)
+ return cp;
+ return NULL;
+}
+
+/*
+ * This assumes that the node slot is always on the primary bus!
+ */
+static void __init scan_bridge_slot(struct iseries_flat_dt *dt,
+ HvBusNumber bus, struct HvCallPci_BridgeInfo *bridge_info)
+{
+ HvSubBusNumber sub_bus = bridge_info->subBusNumber;
+ u16 vendor_id;
+ u16 device_id;
+ u32 class_id;
+ int err;
+ char buf[32];
+ u32 reg[5];
+ int id_sel = ISERIES_GET_DEVICE_FROM_SUBBUS(sub_bus);
+ int function = ISERIES_GET_FUNCTION_FROM_SUBBUS(sub_bus);
+ HvAgentId eads_id_sel = ISERIES_PCI_AGENTID(id_sel, function);
+ u8 devfn;
+ struct pci_class_name *cp;
+
+ /*
+ * Connect all functions of any device found.
+ */
+ for (id_sel = 1; id_sel <= bridge_info->maxAgents; id_sel++) {
+ for (function = 0; function < 8; function++) {
+ HvAgentId agent_id = ISERIES_PCI_AGENTID(id_sel,
+ function);
+ err = HvCallXm_connectBusUnit(bus, sub_bus,
+ agent_id, 0);
+ if (err) {
+ if (err != 0x302)
+ DBG("connectBusUnit(%x, %x, %x) %x\n",
+ bus, sub_bus, agent_id, err);
+ continue;
+ }
+
+ err = HvCallPci_configLoad16(bus, sub_bus, agent_id,
+ PCI_VENDOR_ID, &vendor_id);
+ if (err) {
+ DBG("ReadVendor(%x, %x, %x) %x\n",
+ bus, sub_bus, agent_id, err);
+ continue;
+ }
+ err = HvCallPci_configLoad16(bus, sub_bus, agent_id,
+ PCI_DEVICE_ID, &device_id);
+ if (err) {
+ DBG("ReadDevice(%x, %x, %x) %x\n",
+ bus, sub_bus, agent_id, err);
+ continue;
+ }
+ err = HvCallPci_configLoad32(bus, sub_bus, agent_id,
+ PCI_CLASS_REVISION , &class_id);
+ if (err) {
+ DBG("ReadClass(%x, %x, %x) %x\n",
+ bus, sub_bus, agent_id, err);
+ continue;
+ }
+
+ devfn = PCI_DEVFN(ISERIES_ENCODE_DEVICE(eads_id_sel),
+ function);
+ cp = dt_find_pci_class_name(class_id >> 16);
+ if (cp && cp->name)
+ strncpy(buf, cp->name, sizeof(buf) - 1);
+ else
+ snprintf(buf, sizeof(buf), "pci%x,%x",
+ vendor_id, device_id);
+ buf[sizeof(buf) - 1] = '\0';
+ snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
+ "@%x", PCI_SLOT(devfn));
+ buf[sizeof(buf) - 1] = '\0';
+ if (function != 0)
+ snprintf(buf + strlen(buf),
+ sizeof(buf) - strlen(buf),
+ ",%x", function);
+ dt_start_node(dt, buf);
+ reg[0] = (bus << 16) | (devfn << 8);
+ reg[1] = 0;
+ reg[2] = 0;
+ reg[3] = 0;
+ reg[4] = 0;
+ dt_prop_u32_list(dt, "reg", reg, 5);
+ if (cp && (cp->type || cp->name))
+ dt_prop_str(dt, "device_type",
+ cp->type ? cp->type : cp->name);
+ dt_prop_u32(dt, "vendor-id", vendor_id);
+ dt_prop_u32(dt, "device-id", device_id);
+ dt_prop_u32(dt, "class-code", class_id >> 8);
+ dt_prop_u32(dt, "revision-id", class_id & 0xff);
+ dt_prop_u32(dt, "linux,subbus", sub_bus);
+ dt_prop_u32(dt, "linux,agent-id", agent_id);
+ dt_prop_u32(dt, "linux,logical-slot-number",
+ bridge_info->logicalSlotNumber);
+ dt_end_node(dt);
+
+ }
+ }
+}
+
+static void __init scan_bridge(struct iseries_flat_dt *dt, HvBusNumber bus,
+ HvSubBusNumber sub_bus, int id_sel)
+{
+ struct HvCallPci_BridgeInfo bridge_info;
+ HvAgentId agent_id;
+ int function;
+ int ret;
+
+ /* Note: hvSubBus and irq is always be 0 at this level! */
+ for (function = 0; function < 8; ++function) {
+ agent_id = ISERIES_PCI_AGENTID(id_sel, function);
+ ret = HvCallXm_connectBusUnit(bus, sub_bus, agent_id, 0);
+ if (ret != 0) {
+ if (ret != 0xb)
+ DBG("connectBusUnit(%x, %x, %x) %x\n",
+ bus, sub_bus, agent_id, ret);
+ continue;
+ }
+ DBG("found device at bus %d idsel %d func %d (AgentId %x)\n",
+ bus, id_sel, function, agent_id);
+ ret = HvCallPci_getBusUnitInfo(bus, sub_bus, agent_id,
+ iseries_hv_addr(&bridge_info),
+ sizeof(struct HvCallPci_BridgeInfo));
+ if (ret != 0)
+ continue;
+ DBG("bridge info: type %x subbus %x "
+ "maxAgents %x maxsubbus %x logslot %x\n",
+ bridge_info.busUnitInfo.deviceType,
+ bridge_info.subBusNumber,
+ bridge_info.maxAgents,
+ bridge_info.maxSubBusNumber,
+ bridge_info.logicalSlotNumber);
+ if (bridge_info.busUnitInfo.deviceType ==
+ HvCallPci_BridgeDevice)
+ scan_bridge_slot(dt, bus, &bridge_info);
+ else
+ DBG("PCI: Invalid Bridge Configuration(0x%02X)",
+ bridge_info.busUnitInfo.deviceType);
+ }
+}
+
+static void __init scan_phb(struct iseries_flat_dt *dt, HvBusNumber bus)
+{
+ struct HvCallPci_DeviceInfo dev_info;
+ const HvSubBusNumber sub_bus = 0; /* EADs is always 0. */
+ int err;
+ int id_sel;
+ const int max_agents = 8;
+
+ /*
+ * Probe for EADs Bridges
+ */
+ for (id_sel = 1; id_sel < max_agents; ++id_sel) {
+ err = HvCallPci_getDeviceInfo(bus, sub_bus, id_sel,
+ iseries_hv_addr(&dev_info),
+ sizeof(struct HvCallPci_DeviceInfo));
+ if (err) {
+ if (err != 0x302)
+ DBG("getDeviceInfo(%x, %x, %x) %x\n",
+ bus, sub_bus, id_sel, err);
+ continue;
+ }
+ if (dev_info.deviceType != HvCallPci_NodeDevice) {
+ DBG("PCI: Invalid System Configuration"
+ "(0x%02X) for bus 0x%02x id 0x%02x.\n",
+ dev_info.deviceType, bus, id_sel);
+ continue;
+ }
+ scan_bridge(dt, bus, sub_bus, id_sel);
+ }
+}
+
+static void __init dt_pci_devices(struct iseries_flat_dt *dt)
+{
+ HvBusNumber bus;
+ char buf[32];
+ u32 buses[2];
+ int phb_num = 0;
+
+ /* Check all possible buses. */
+ for (bus = 0; bus < 256; bus++) {
+ int err = HvCallXm_testBus(bus);
+
+ if (err) {
+ /*
+ * Check for Unexpected Return code, a clue that
+ * something has gone wrong.
+ */
+ if (err != 0x0301)
+ DBG("Unexpected Return on Probe(0x%02X) "
+ "0x%04X\n", bus, err);
+ continue;
+ }
+ DBG("bus %d appears to exist\n", bus);
+ snprintf(buf, 32, "pci@%d", phb_num);
+ dt_start_node(dt, buf);
+ dt_prop_str(dt, "device_type", device_type_pci);
+ dt_prop_str(dt, "compatible", "IBM,iSeries-Logical-PHB");
+ dt_prop_u32(dt, "#address-cells", 3);
+ dt_prop_u32(dt, "#size-cells", 2);
+ buses[0] = buses[1] = bus;
+ dt_prop_u32_list(dt, "bus-range", buses, 2);
+ scan_phb(dt, bus);
+ dt_end_node(dt);
+ phb_num++;
+ }
+}
+
+static void dt_finish(struct iseries_flat_dt *dt)
+{
+ dt_push_u32(dt, OF_DT_END);
+ dt->header.totalsize = (unsigned long)dt_data - (unsigned long)dt;
+ klimit = ALIGN((unsigned long)dt_data, 8);
+}
+
+void * __init build_flat_dt(unsigned long phys_mem_size)
+{
+ struct iseries_flat_dt *iseries_dt;
+ u64 tmp[2];
+
+ iseries_dt = dt_init();
+
+ dt_start_node(iseries_dt, "");
+
+ dt_prop_u32(iseries_dt, "#address-cells", 2);
+ dt_prop_u32(iseries_dt, "#size-cells", 2);
+ dt_model(iseries_dt);
+
+ /* /memory */
+ dt_start_node(iseries_dt, "memory@0");
+ dt_prop_str(iseries_dt, "device_type", device_type_memory);
+ tmp[0] = 0;
+ tmp[1] = phys_mem_size;
+ dt_prop_u64_list(iseries_dt, "reg", tmp, 2);
+ dt_end_node(iseries_dt);
+
+ /* /chosen */
+ dt_start_node(iseries_dt, "chosen");
+ dt_prop_str(iseries_dt, "bootargs", cmd_line);
+ dt_end_node(iseries_dt);
+
+ dt_cpus(iseries_dt);
+
+ dt_vdevices(iseries_dt);
+ dt_pci_devices(iseries_dt);
+
+ dt_end_node(iseries_dt);
+
+ dt_finish(iseries_dt);
+
+ return iseries_dt;
+}
diff --git a/arch/powerpc/platforms/iseries/iommu.c b/arch/powerpc/platforms/iseries/iommu.c
index bea0b703f409..e3bd2015f2c9 100644
--- a/arch/powerpc/platforms/iseries/iommu.c
+++ b/arch/powerpc/platforms/iseries/iommu.c
@@ -4,6 +4,7 @@
* Rewrite, cleanup:
*
* Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation
+ * Copyright (C) 2006 Olof Johansson <olof@lixom.net>
*
* Dynamic DMA mapping support, iSeries-specific parts.
*
@@ -31,42 +32,37 @@
#include <asm/tce.h>
#include <asm/machdep.h>
#include <asm/abs_addr.h>
+#include <asm/prom.h>
#include <asm/pci-bridge.h>
#include <asm/iseries/hv_call_xm.h>
-
-#include "iommu.h"
-
-extern struct list_head iSeries_Global_Device_List;
-
+#include <asm/iseries/iommu.h>
static void tce_build_iSeries(struct iommu_table *tbl, long index, long npages,
unsigned long uaddr, enum dma_data_direction direction)
{
u64 rc;
- union tce_entry tce;
+ u64 tce, rpn;
index <<= TCE_PAGE_FACTOR;
npages <<= TCE_PAGE_FACTOR;
while (npages--) {
- tce.te_word = 0;
- tce.te_bits.tb_rpn = virt_to_abs(uaddr) >> TCE_SHIFT;
+ rpn = virt_to_abs(uaddr) >> TCE_SHIFT;
+ tce = (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT;
if (tbl->it_type == TCE_VB) {
/* Virtual Bus */
- tce.te_bits.tb_valid = 1;
- tce.te_bits.tb_allio = 1;
+ tce |= TCE_VALID|TCE_ALLIO;
if (direction != DMA_TO_DEVICE)
- tce.te_bits.tb_rdwr = 1;
+ tce |= TCE_VB_WRITE;
} else {
/* PCI Bus */
- tce.te_bits.tb_rdwr = 1; /* Read allowed */
+ tce |= TCE_PCI_READ; /* Read allowed */
if (direction != DMA_TO_DEVICE)
- tce.te_bits.tb_pciwr = 1;
+ tce |= TCE_PCI_WRITE;
}
- rc = HvCallXm_setTce((u64)tbl->it_index, (u64)index,
- tce.te_word);
+ rc = HvCallXm_setTce((u64)tbl->it_index, (u64)index, tce);
if (rc)
panic("PCI_DMA: HvCallXm_setTce failed, Rc: 0x%lx\n",
rc);
@@ -124,7 +120,7 @@ void iommu_table_getparms_iSeries(unsigned long busno,
/* itc_size is in pages worth of table, it_size is in # of entries */
tbl->it_size = ((parms->itc_size * TCE_PAGE_SIZE) /
- sizeof(union tce_entry)) >> TCE_PAGE_FACTOR;
+ TCE_ENTRY_SIZE) >> TCE_PAGE_FACTOR;
tbl->it_busno = parms->itc_busno;
tbl->it_offset = parms->itc_offset >> TCE_PAGE_FACTOR;
tbl->it_index = parms->itc_index;
@@ -142,10 +138,15 @@ void iommu_table_getparms_iSeries(unsigned long busno,
*/
static struct iommu_table *iommu_table_find(struct iommu_table * tbl)
{
- struct pci_dn *pdn;
+ struct device_node *node;
- list_for_each_entry(pdn, &iSeries_Global_Device_List, Device_List) {
- struct iommu_table *it = pdn->iommu_table;
+ for (node = NULL; (node = of_find_all_nodes(node)); ) {
+ struct pci_dn *pdn = PCI_DN(node);
+ struct iommu_table *it;
+
+ if (pdn == NULL)
+ continue;
+ it = pdn->iommu_table;
if ((it != NULL) &&
(it->it_type == TCE_PCI) &&
(it->it_offset == tbl->it_offset) &&
@@ -161,15 +162,18 @@ void iommu_devnode_init_iSeries(struct device_node *dn)
{
struct iommu_table *tbl;
struct pci_dn *pdn = PCI_DN(dn);
+ u32 *lsn = (u32 *)get_property(dn, "linux,logical-slot-number", NULL);
+
+ BUG_ON(lsn == NULL);
tbl = kmalloc(sizeof(struct iommu_table), GFP_KERNEL);
- iommu_table_getparms_iSeries(pdn->busno, pdn->LogicalSlot, 0, tbl);
+ iommu_table_getparms_iSeries(pdn->busno, *lsn, 0, tbl);
/* Look for existing tce table */
pdn->iommu_table = iommu_table_find(tbl);
if (pdn->iommu_table == NULL)
- pdn->iommu_table = iommu_init_table(tbl);
+ pdn->iommu_table = iommu_init_table(tbl, -1);
else
kfree(tbl);
}
diff --git a/arch/powerpc/platforms/iseries/irq.c b/arch/powerpc/platforms/iseries/irq.c
index be3fbfc24e6c..62bbbcf5ded3 100644
--- a/arch/powerpc/platforms/iseries/irq.c
+++ b/arch/powerpc/platforms/iseries/irq.c
@@ -42,6 +42,7 @@
#include <asm/iseries/it_lp_queue.h>
#include "irq.h"
+#include "pci.h"
#include "call_pci.h"
#if defined(CONFIG_SMP)
@@ -312,12 +313,12 @@ static hw_irq_controller iSeries_IRQ_handler = {
* Note that sub_bus is always 0 (at the moment at least).
*/
int __init iSeries_allocate_IRQ(HvBusNumber bus,
- HvSubBusNumber sub_bus, HvAgentId dev_id)
+ HvSubBusNumber sub_bus, u32 bsubbus)
{
int virtirq;
unsigned int realirq;
- u8 idsel = (dev_id >> 4);
- u8 function = dev_id & 7;
+ u8 idsel = ISERIES_GET_DEVICE_FROM_SUBBUS(bsubbus);
+ u8 function = ISERIES_GET_FUNCTION_FROM_SUBBUS(bsubbus);
realirq = (((((sub_bus << 8) + (bus - 1)) << 3) + (idsel - 1)) << 3)
+ function;
diff --git a/arch/powerpc/platforms/iseries/irq.h b/arch/powerpc/platforms/iseries/irq.h
index b9c801ba5a47..188aa808abd7 100644
--- a/arch/powerpc/platforms/iseries/irq.h
+++ b/arch/powerpc/platforms/iseries/irq.h
@@ -2,7 +2,7 @@
#define _ISERIES_IRQ_H
extern void iSeries_init_IRQ(void);
-extern int iSeries_allocate_IRQ(HvBusNumber, HvSubBusNumber, HvAgentId);
+extern int iSeries_allocate_IRQ(HvBusNumber, HvSubBusNumber, u32);
extern void iSeries_activate_IRQs(void);
extern int iSeries_get_irq(struct pt_regs *);
diff --git a/arch/powerpc/platforms/iseries/mf.c b/arch/powerpc/platforms/iseries/mf.c
index d771b8ee857d..1a2c2a50f922 100644
--- a/arch/powerpc/platforms/iseries/mf.c
+++ b/arch/powerpc/platforms/iseries/mf.c
@@ -45,7 +45,6 @@
#include "setup.h"
-extern int piranha_simulator;
static int mf_initialized;
/*
@@ -658,7 +657,7 @@ static void mf_clear_src(void)
void __init mf_display_progress(u16 value)
{
- if (piranha_simulator || !mf_initialized)
+ if (!mf_initialized)
return;
if (0xFFFF == value)
@@ -1295,9 +1294,6 @@ __initcall(mf_proc_init);
*/
void iSeries_get_rtc_time(struct rtc_time *rtc_tm)
{
- if (piranha_simulator)
- return;
-
mf_get_rtc(rtc_tm);
rtc_tm->tm_mon--;
}
@@ -1316,9 +1312,6 @@ unsigned long iSeries_get_boot_time(void)
{
struct rtc_time tm;
- if (piranha_simulator)
- return 0;
-
mf_get_boot_rtc(&tm);
return mktime(tm.tm_year + 1900, tm.tm_mon, tm.tm_mday,
tm.tm_hour, tm.tm_min, tm.tm_sec);
diff --git a/arch/powerpc/platforms/iseries/pci.c b/arch/powerpc/platforms/iseries/pci.c
index a19833b880e4..35bcc98111f5 100644
--- a/arch/powerpc/platforms/iseries/pci.c
+++ b/arch/powerpc/platforms/iseries/pci.c
@@ -37,36 +37,18 @@
#include <asm/iseries/hv_call_xm.h>
#include <asm/iseries/mf.h>
+#include <asm/iseries/iommu.h>
#include <asm/ppc-pci.h>
#include "irq.h"
#include "pci.h"
#include "call_pci.h"
-#include "iommu.h"
-
-extern unsigned long io_page_mask;
/*
* Forward declares of prototypes.
*/
static struct device_node *find_Device_Node(int bus, int devfn);
-static void scan_PHB_slots(struct pci_controller *Phb);
-static void scan_EADS_bridge(HvBusNumber Bus, HvSubBusNumber SubBus, int IdSel);
-static int scan_bridge_slot(HvBusNumber Bus, struct HvCallPci_BridgeInfo *Info);
-
-LIST_HEAD(iSeries_Global_Device_List);
-
-static int DeviceCount;
-
-/* Counters and control flags. */
-static long Pci_Io_Read_Count;
-static long Pci_Io_Write_Count;
-#if 0
-static long Pci_Cfg_Read_Count;
-static long Pci_Cfg_Write_Count;
-#endif
-static long Pci_Error_Count;
static int Pci_Retry_Max = 3; /* Only retry 3 times */
static int Pci_Error_Flag = 1; /* Set Retry Error on. */
@@ -81,41 +63,19 @@ static struct pci_ops iSeries_pci_ops;
#define IOMM_TABLE_ENTRY_SIZE 0x0000000000400000UL
#define BASE_IO_MEMORY 0xE000000000000000UL
-static unsigned long max_io_memory = 0xE000000000000000UL;
+static unsigned long max_io_memory = BASE_IO_MEMORY;
static long current_iomm_table_entry;
/*
* Lookup Tables.
*/
-static struct device_node **iomm_table;
-static u8 *iobar_table;
+static struct device_node *iomm_table[IOMM_TABLE_MAX_ENTRIES];
+static u8 iobar_table[IOMM_TABLE_MAX_ENTRIES];
-/*
- * Static and Global variables
- */
-static char *pci_io_text = "iSeries PCI I/O";
+static const char pci_io_text[] = "iSeries PCI I/O";
static DEFINE_SPINLOCK(iomm_table_lock);
/*
- * iomm_table_initialize
- *
- * Allocates and initalizes the Address Translation Table and Bar
- * Tables to get them ready for use. Must be called before any
- * I/O space is handed out to the device BARs.
- */
-static void iomm_table_initialize(void)
-{
- spin_lock(&iomm_table_lock);
- iomm_table = kmalloc(sizeof(*iomm_table) * IOMM_TABLE_MAX_ENTRIES,
- GFP_KERNEL);
- iobar_table = kmalloc(sizeof(*iobar_table) * IOMM_TABLE_MAX_ENTRIES,
- GFP_KERNEL);
- spin_unlock(&iomm_table_lock);
- if ((iomm_table == NULL) || (iobar_table == NULL))
- panic("PCI: I/O tables allocation failed.\n");
-}
-
-/*
* iomm_table_allocate_entry
*
* Adds pci_dev entry in address translation table
@@ -142,9 +102,8 @@ static void iomm_table_allocate_entry(struct pci_dev *dev, int bar_num)
*/
spin_lock(&iomm_table_lock);
bar_res->name = pci_io_text;
- bar_res->start =
+ bar_res->start = BASE_IO_MEMORY +
IOMM_TABLE_ENTRY_SIZE * current_iomm_table_entry;
- bar_res->start += BASE_IO_MEMORY;
bar_res->end = bar_res->start + bar_size - 1;
/*
* Allocate the number of table entries needed for BAR.
@@ -156,7 +115,7 @@ static void iomm_table_allocate_entry(struct pci_dev *dev, int bar_num)
++current_iomm_table_entry;
}
max_io_memory = BASE_IO_MEMORY +
- (IOMM_TABLE_ENTRY_SIZE * current_iomm_table_entry);
+ IOMM_TABLE_ENTRY_SIZE * current_iomm_table_entry;
spin_unlock(&iomm_table_lock);
}
@@ -173,13 +132,10 @@ static void iomm_table_allocate_entry(struct pci_dev *dev, int bar_num)
*/
static void allocate_device_bars(struct pci_dev *dev)
{
- struct resource *bar_res;
int bar_num;
- for (bar_num = 0; bar_num <= PCI_ROM_RESOURCE; ++bar_num) {
- bar_res = &dev->resource[bar_num];
+ for (bar_num = 0; bar_num <= PCI_ROM_RESOURCE; ++bar_num)
iomm_table_allocate_entry(dev, bar_num);
- }
}
/*
@@ -199,34 +155,7 @@ static void pci_Log_Error(char *Error_Text, int Bus, int SubBus,
}
/*
- * build_device_node(u16 Bus, int SubBus, u8 DevFn)
- */
-static struct device_node *build_device_node(HvBusNumber Bus,
- HvSubBusNumber SubBus, int AgentId, int Function)
-{
- struct device_node *node;
- struct pci_dn *pdn;
-
- node = kmalloc(sizeof(struct device_node), GFP_KERNEL);
- if (node == NULL)
- return NULL;
- memset(node, 0, sizeof(struct device_node));
- pdn = kzalloc(sizeof(*pdn), GFP_KERNEL);
- if (pdn == NULL) {
- kfree(node);
- return NULL;
- }
- node->data = pdn;
- pdn->node = node;
- list_add_tail(&pdn->Device_List, &iSeries_Global_Device_List);
- pdn->busno = Bus;
- pdn->bussubno = SubBus;
- pdn->devfn = PCI_DEVFN(ISERIES_ENCODE_DEVICE(AgentId), Function);
- return node;
-}
-
-/*
- * unsigned long __init find_and_init_phbs(void)
+ * iSeries_pcibios_init
*
* Description:
* This function checks for all possible system PCI host bridges that connect
@@ -234,50 +163,42 @@ static struct device_node *build_device_node(HvBusNumber Bus,
* ownership status. A pci_controller is built for any bus which is partially
* owned or fully owned by this guest partition.
*/
-unsigned long __init find_and_init_phbs(void)
+void iSeries_pcibios_init(void)
{
struct pci_controller *phb;
- HvBusNumber bus;
-
- /* Check all possible buses. */
- for (bus = 0; bus < 256; bus++) {
- int ret = HvCallXm_testBus(bus);
- if (ret == 0) {
- printk("bus %d appears to exist\n", bus);
+ struct device_node *root = of_find_node_by_path("/");
+ struct device_node *node = NULL;
- phb = pcibios_alloc_controller(NULL);
- if (phb == NULL)
- return -ENOMEM;
-
- phb->pci_mem_offset = phb->local_number = bus;
- phb->first_busno = bus;
- phb->last_busno = bus;
- phb->ops = &iSeries_pci_ops;
-
- /* Find and connect the devices. */
- scan_PHB_slots(phb);
- }
- /*
- * Check for Unexpected Return code, a clue that something
- * has gone wrong.
- */
- else if (ret != 0x0301)
- printk(KERN_ERR "Unexpected Return on Probe(0x%04X): 0x%04X",
- bus, ret);
+ if (root == NULL) {
+ printk(KERN_CRIT "iSeries_pcibios_init: can't find root "
+ "of device tree\n");
+ return;
+ }
+ while ((node = of_get_next_child(root, node)) != NULL) {
+ HvBusNumber bus;
+ u32 *busp;
+
+ if ((node->type == NULL) || (strcmp(node->type, "pci") != 0))
+ continue;
+
+ busp = (u32 *)get_property(node, "bus-range", NULL);
+ if (busp == NULL)
+ continue;
+ bus = *busp;
+ printk("bus %d appears to exist\n", bus);
+ phb = pcibios_alloc_controller(node);
+ if (phb == NULL)
+ continue;
+
+ phb->pci_mem_offset = phb->local_number = bus;
+ phb->first_busno = bus;
+ phb->last_busno = bus;
+ phb->ops = &iSeries_pci_ops;
}
- return 0;
-}
-/*
- * iSeries_pcibios_init
- *
- * Chance to initialize and structures or variable before PCI Bus walk.
- */
-void iSeries_pcibios_init(void)
-{
- iomm_table_initialize();
- find_and_init_phbs();
- io_page_mask = -1;
+ of_node_put(root);
+
+ pci_devs_phb_init();
}
/*
@@ -299,6 +220,34 @@ void __init iSeries_pci_final_fixup(void)
pdev->bus->number, pdev->devfn, node);
if (node != NULL) {
+ struct pci_dn *pdn = PCI_DN(node);
+ u32 *agent;
+
+ agent = (u32 *)get_property(node, "linux,agent-id",
+ NULL);
+ if ((pdn != NULL) && (agent != NULL)) {
+ u8 irq = iSeries_allocate_IRQ(pdn->busno, 0,
+ pdn->bussubno);
+ int err;
+
+ err = HvCallXm_connectBusUnit(pdn->busno, pdn->bussubno,
+ *agent, irq);
+ if (err)
+ pci_Log_Error("Connect Bus Unit",
+ pdn->busno, pdn->bussubno, *agent, err);
+ else {
+ err = HvCallPci_configStore8(pdn->busno, pdn->bussubno,
+ *agent,
+ PCI_INTERRUPT_LINE,
+ irq);
+ if (err)
+ pci_Log_Error("PciCfgStore Irq Failed!",
+ pdn->busno, pdn->bussubno, *agent, err);
+ }
+ if (!err)
+ pdev->irq = irq;
+ }
+
++DeviceCount;
pdev->sysdata = (void *)node;
PCI_DN(node)->pcidev = pdev;
@@ -308,7 +257,6 @@ void __init iSeries_pci_final_fixup(void)
} else
printk("PCI: Device Tree not found for 0x%016lX\n",
(unsigned long)pdev);
- pdev->irq = PCI_DN(node)->Irq;
}
iSeries_activate_IRQs();
mf_display_src(0xC9000200);
@@ -323,148 +271,6 @@ void pcibios_fixup_resources(struct pci_dev *pdev)
}
/*
- * Loop through each node function to find usable EADs bridges.
- */
-static void scan_PHB_slots(struct pci_controller *Phb)
-{
- struct HvCallPci_DeviceInfo *DevInfo;
- HvBusNumber bus = Phb->local_number; /* System Bus */
- const HvSubBusNumber SubBus = 0; /* EADs is always 0. */
- int HvRc = 0;
- int IdSel;
- const int MaxAgents = 8;
-
- DevInfo = (struct HvCallPci_DeviceInfo*)
- kmalloc(sizeof(struct HvCallPci_DeviceInfo), GFP_KERNEL);
- if (DevInfo == NULL)
- return;
-
- /*
- * Probe for EADs Bridges
- */
- for (IdSel = 1; IdSel < MaxAgents; ++IdSel) {
- HvRc = HvCallPci_getDeviceInfo(bus, SubBus, IdSel,
- iseries_hv_addr(DevInfo),
- sizeof(struct HvCallPci_DeviceInfo));
- if (HvRc == 0) {
- if (DevInfo->deviceType == HvCallPci_NodeDevice)
- scan_EADS_bridge(bus, SubBus, IdSel);
- else
- printk("PCI: Invalid System Configuration(0x%02X)"
- " for bus 0x%02x id 0x%02x.\n",
- DevInfo->deviceType, bus, IdSel);
- }
- else
- pci_Log_Error("getDeviceInfo", bus, SubBus, IdSel, HvRc);
- }
- kfree(DevInfo);
-}
-
-static void scan_EADS_bridge(HvBusNumber bus, HvSubBusNumber SubBus,
- int IdSel)
-{
- struct HvCallPci_BridgeInfo *BridgeInfo;
- HvAgentId AgentId;
- int Function;
- int HvRc;
-
- BridgeInfo = (struct HvCallPci_BridgeInfo *)
- kmalloc(sizeof(struct HvCallPci_BridgeInfo), GFP_KERNEL);
- if (BridgeInfo == NULL)
- return;
-
- /* Note: hvSubBus and irq is always be 0 at this level! */
- for (Function = 0; Function < 8; ++Function) {
- AgentId = ISERIES_PCI_AGENTID(IdSel, Function);
- HvRc = HvCallXm_connectBusUnit(bus, SubBus, AgentId, 0);
- if (HvRc == 0) {
- printk("found device at bus %d idsel %d func %d (AgentId %x)\n",
- bus, IdSel, Function, AgentId);
- /* Connect EADs: 0x18.00.12 = 0x00 */
- HvRc = HvCallPci_getBusUnitInfo(bus, SubBus, AgentId,
- iseries_hv_addr(BridgeInfo),
- sizeof(struct HvCallPci_BridgeInfo));
- if (HvRc == 0) {
- printk("bridge info: type %x subbus %x maxAgents %x maxsubbus %x logslot %x\n",
- BridgeInfo->busUnitInfo.deviceType,
- BridgeInfo->subBusNumber,
- BridgeInfo->maxAgents,
- BridgeInfo->maxSubBusNumber,
- BridgeInfo->logicalSlotNumber);
- if (BridgeInfo->busUnitInfo.deviceType ==
- HvCallPci_BridgeDevice) {
- /* Scan_Bridge_Slot...: 0x18.00.12 */
- scan_bridge_slot(bus, BridgeInfo);
- } else
- printk("PCI: Invalid Bridge Configuration(0x%02X)",
- BridgeInfo->busUnitInfo.deviceType);
- }
- } else if (HvRc != 0x000B)
- pci_Log_Error("EADs Connect",
- bus, SubBus, AgentId, HvRc);
- }
- kfree(BridgeInfo);
-}
-
-/*
- * This assumes that the node slot is always on the primary bus!
- */
-static int scan_bridge_slot(HvBusNumber Bus,
- struct HvCallPci_BridgeInfo *BridgeInfo)
-{
- struct device_node *node;
- HvSubBusNumber SubBus = BridgeInfo->subBusNumber;
- u16 VendorId = 0;
- int HvRc = 0;
- u8 Irq = 0;
- int IdSel = ISERIES_GET_DEVICE_FROM_SUBBUS(SubBus);
- int Function = ISERIES_GET_FUNCTION_FROM_SUBBUS(SubBus);
- HvAgentId EADsIdSel = ISERIES_PCI_AGENTID(IdSel, Function);
-
- /* iSeries_allocate_IRQ.: 0x18.00.12(0xA3) */
- Irq = iSeries_allocate_IRQ(Bus, 0, EADsIdSel);
-
- /*
- * Connect all functions of any device found.
- */
- for (IdSel = 1; IdSel <= BridgeInfo->maxAgents; ++IdSel) {
- for (Function = 0; Function < 8; ++Function) {
- HvAgentId AgentId = ISERIES_PCI_AGENTID(IdSel, Function);
- HvRc = HvCallXm_connectBusUnit(Bus, SubBus,
- AgentId, Irq);
- if (HvRc != 0) {
- pci_Log_Error("Connect Bus Unit",
- Bus, SubBus, AgentId, HvRc);
- continue;
- }
-
- HvRc = HvCallPci_configLoad16(Bus, SubBus, AgentId,
- PCI_VENDOR_ID, &VendorId);
- if (HvRc != 0) {
- pci_Log_Error("Read Vendor",
- Bus, SubBus, AgentId, HvRc);
- continue;
- }
- printk("read vendor ID: %x\n", VendorId);
-
- /* FoundDevice: 0x18.28.10 = 0x12AE */
- HvRc = HvCallPci_configStore8(Bus, SubBus, AgentId,
- PCI_INTERRUPT_LINE, Irq);
- if (HvRc != 0)
- pci_Log_Error("PciCfgStore Irq Failed!",
- Bus, SubBus, AgentId, HvRc);
-
- ++DeviceCount;
- node = build_device_node(Bus, SubBus, EADsIdSel, Function);
- PCI_DN(node)->Irq = Irq;
- PCI_DN(node)->LogicalSlot = BridgeInfo->logicalSlotNumber;
-
- } /* for (Function = 0; Function < 8; ++Function) */
- } /* for (IdSel = 1; IdSel <= MaxAgents; ++IdSel) */
- return HvRc;
-}
-
-/*
* I/0 Memory copy MUST use mmio commands on iSeries
* To do; For performance, include the hv call directly
*/
@@ -509,11 +315,13 @@ EXPORT_SYMBOL(iSeries_memcpy_fromio);
*/
static struct device_node *find_Device_Node(int bus, int devfn)
{
- struct pci_dn *pdn;
+ struct device_node *node;
+
+ for (node = NULL; (node = of_find_all_nodes(node)); ) {
+ struct pci_dn *pdn = PCI_DN(node);
- list_for_each_entry(pdn, &iSeries_Global_Device_List, Device_List) {
- if ((bus == pdn->busno) && (devfn == pdn->devfn))
- return pdn->node;
+ if (pdn && (bus == pdn->busno) && (devfn == pdn->devfn))
+ return node;
}
return NULL;
}
@@ -625,7 +433,6 @@ static int CheckReturnCode(char *TextHdr, struct device_node *DevNode,
if (ret != 0) {
struct pci_dn *pdn = PCI_DN(DevNode);
- ++Pci_Error_Count;
(*retry)++;
printk("PCI: %s: Device 0x%04X:%02X I/O Error(%2d): 0x%04X\n",
TextHdr, pdn->busno, pdn->devfn,
@@ -707,7 +514,6 @@ u8 iSeries_Read_Byte(const volatile void __iomem *IoAddress)
return 0xff;
}
do {
- ++Pci_Io_Read_Count;
HvCall3Ret16(HvCallPciBarLoad8, &ret, dsa, BarOffset, 0);
} while (CheckReturnCode("RDB", DevNode, &retry, ret.rc) != 0);
@@ -737,7 +543,6 @@ u16 iSeries_Read_Word(const volatile void __iomem *IoAddress)
return 0xffff;
}
do {
- ++Pci_Io_Read_Count;
HvCall3Ret16(HvCallPciBarLoad16, &ret, dsa,
BarOffset, 0);
} while (CheckReturnCode("RDW", DevNode, &retry, ret.rc) != 0);
@@ -768,7 +573,6 @@ u32 iSeries_Read_Long(const volatile void __iomem *IoAddress)
return 0xffffffff;
}
do {
- ++Pci_Io_Read_Count;
HvCall3Ret16(HvCallPciBarLoad32, &ret, dsa,
BarOffset, 0);
} while (CheckReturnCode("RDL", DevNode, &retry, ret.rc) != 0);
@@ -806,7 +610,6 @@ void iSeries_Write_Byte(u8 data, volatile void __iomem *IoAddress)
return;
}
do {
- ++Pci_Io_Write_Count;
rc = HvCall4(HvCallPciBarStore8, dsa, BarOffset, data, 0);
} while (CheckReturnCode("WWB", DevNode, &retry, rc) != 0);
}
@@ -834,7 +637,6 @@ void iSeries_Write_Word(u16 data, volatile void __iomem *IoAddress)
return;
}
do {
- ++Pci_Io_Write_Count;
rc = HvCall4(HvCallPciBarStore16, dsa, BarOffset, swab16(data), 0);
} while (CheckReturnCode("WWW", DevNode, &retry, rc) != 0);
}
@@ -862,7 +664,6 @@ void iSeries_Write_Long(u32 data, volatile void __iomem *IoAddress)
return;
}
do {
- ++Pci_Io_Write_Count;
rc = HvCall4(HvCallPciBarStore32, dsa, BarOffset, swab32(data), 0);
} while (CheckReturnCode("WWL", DevNode, &retry, rc) != 0);
}
diff --git a/arch/powerpc/platforms/iseries/setup.c b/arch/powerpc/platforms/iseries/setup.c
index a6fd9bedb074..617c724c4590 100644
--- a/arch/powerpc/platforms/iseries/setup.c
+++ b/arch/powerpc/platforms/iseries/setup.c
@@ -50,7 +50,6 @@
#include <asm/iseries/hv_call_xm.h>
#include <asm/iseries/it_lp_queue.h>
#include <asm/iseries/mf.h>
-#include <asm/iseries/it_exp_vpd_panel.h>
#include <asm/iseries/hv_lp_event.h>
#include <asm/iseries/lpar_map.h>
#include <asm/udbg.h>
@@ -81,9 +80,6 @@ extern void iSeries_pci_final_fixup(void);
static void iSeries_pci_final_fixup(void) { }
#endif
-/* Global Variables */
-int piranha_simulator;
-
extern int rd_size; /* Defined in drivers/block/rd.c */
extern unsigned long embedded_sysmap_start;
extern unsigned long embedded_sysmap_end;
@@ -91,8 +87,6 @@ extern unsigned long embedded_sysmap_end;
extern unsigned long iSeries_recal_tb;
extern unsigned long iSeries_recal_titan;
-static unsigned long cmd_mem_limit;
-
struct MemoryBlock {
unsigned long absStart;
unsigned long absEnd;
@@ -340,8 +334,6 @@ static void __init iSeries_init_early(void)
#ifdef CONFIG_SMP
smp_init_iSeries();
#endif
- if (itLpNaca.xPirEnvironMode == 0)
- piranha_simulator = 1;
/* Associate Lp Event Queue 0 with processor 0 */
HvCallEvent_setLpEventQueueInterruptProc(0, 0);
@@ -536,10 +528,10 @@ static void __init iSeries_setup_arch(void)
{
if (get_lppaca()->shared_proc) {
ppc_md.idle_loop = iseries_shared_idle;
- printk(KERN_INFO "Using shared processor idle loop\n");
+ printk(KERN_DEBUG "Using shared processor idle loop\n");
} else {
ppc_md.idle_loop = iseries_dedicated_idle;
- printk(KERN_INFO "Using dedicated idle loop\n");
+ printk(KERN_DEBUG "Using dedicated idle loop\n");
}
/* Setup the Lp Event Queue */
@@ -714,243 +706,6 @@ define_machine(iseries) {
/* XXX Implement enable_pmcs for iSeries */
};
-struct blob {
- unsigned char data[PAGE_SIZE];
- unsigned long next;
-};
-
-struct iseries_flat_dt {
- struct boot_param_header header;
- u64 reserve_map[2];
- struct blob dt;
- struct blob strings;
-};
-
-struct iseries_flat_dt iseries_dt;
-
-void dt_init(struct iseries_flat_dt *dt)
-{
- dt->header.off_mem_rsvmap =
- offsetof(struct iseries_flat_dt, reserve_map);
- dt->header.off_dt_struct = offsetof(struct iseries_flat_dt, dt);
- dt->header.off_dt_strings = offsetof(struct iseries_flat_dt, strings);
- dt->header.totalsize = sizeof(struct iseries_flat_dt);
- dt->header.dt_strings_size = sizeof(struct blob);
-
- /* There is no notion of hardware cpu id on iSeries */
- dt->header.boot_cpuid_phys = smp_processor_id();
-
- dt->dt.next = (unsigned long)&dt->dt.data;
- dt->strings.next = (unsigned long)&dt->strings.data;
-
- dt->header.magic = OF_DT_HEADER;
- dt->header.version = 0x10;
- dt->header.last_comp_version = 0x10;
-
- dt->reserve_map[0] = 0;
- dt->reserve_map[1] = 0;
-}
-
-void dt_check_blob(struct blob *b)
-{
- if (b->next >= (unsigned long)&b->next) {
- DBG("Ran out of space in flat device tree blob!\n");
- BUG();
- }
-}
-
-void dt_push_u32(struct iseries_flat_dt *dt, u32 value)
-{
- *((u32*)dt->dt.next) = value;
- dt->dt.next += sizeof(u32);
-
- dt_check_blob(&dt->dt);
-}
-
-void dt_push_u64(struct iseries_flat_dt *dt, u64 value)
-{
- *((u64*)dt->dt.next) = value;
- dt->dt.next += sizeof(u64);
-
- dt_check_blob(&dt->dt);
-}
-
-unsigned long dt_push_bytes(struct blob *blob, char *data, int len)
-{
- unsigned long start = blob->next - (unsigned long)blob->data;
-
- memcpy((char *)blob->next, data, len);
- blob->next = _ALIGN(blob->next + len, 4);
-
- dt_check_blob(blob);
-
- return start;
-}
-
-void dt_start_node(struct iseries_flat_dt *dt, char *name)
-{
- dt_push_u32(dt, OF_DT_BEGIN_NODE);
- dt_push_bytes(&dt->dt, name, strlen(name) + 1);
-}
-
-#define dt_end_node(dt) dt_push_u32(dt, OF_DT_END_NODE)
-
-void dt_prop(struct iseries_flat_dt *dt, char *name, char *data, int len)
-{
- unsigned long offset;
-
- dt_push_u32(dt, OF_DT_PROP);
-
- /* Length of the data */
- dt_push_u32(dt, len);
-
- /* Put the property name in the string blob. */
- offset = dt_push_bytes(&dt->strings, name, strlen(name) + 1);
-
- /* The offset of the properties name in the string blob. */
- dt_push_u32(dt, (u32)offset);
-
- /* The actual data. */
- dt_push_bytes(&dt->dt, data, len);
-}
-
-void dt_prop_str(struct iseries_flat_dt *dt, char *name, char *data)
-{
- dt_prop(dt, name, data, strlen(data) + 1); /* + 1 for NULL */
-}
-
-void dt_prop_u32(struct iseries_flat_dt *dt, char *name, u32 data)
-{
- dt_prop(dt, name, (char *)&data, sizeof(u32));
-}
-
-void dt_prop_u64(struct iseries_flat_dt *dt, char *name, u64 data)
-{
- dt_prop(dt, name, (char *)&data, sizeof(u64));
-}
-
-void dt_prop_u64_list(struct iseries_flat_dt *dt, char *name, u64 *data, int n)
-{
- dt_prop(dt, name, (char *)data, sizeof(u64) * n);
-}
-
-void dt_prop_u32_list(struct iseries_flat_dt *dt, char *name, u32 *data, int n)
-{
- dt_prop(dt, name, (char *)data, sizeof(u32) * n);
-}
-
-void dt_prop_empty(struct iseries_flat_dt *dt, char *name)
-{
- dt_prop(dt, name, NULL, 0);
-}
-
-void dt_cpus(struct iseries_flat_dt *dt)
-{
- unsigned char buf[32];
- unsigned char *p;
- unsigned int i, index;
- struct IoHriProcessorVpd *d;
- u32 pft_size[2];
-
- /* yuck */
- snprintf(buf, 32, "PowerPC,%s", cur_cpu_spec->cpu_name);
- p = strchr(buf, ' ');
- if (!p) p = buf + strlen(buf);
-
- dt_start_node(dt, "cpus");
- dt_prop_u32(dt, "#address-cells", 1);
- dt_prop_u32(dt, "#size-cells", 0);
-
- pft_size[0] = 0; /* NUMA CEC cookie, 0 for non NUMA */
- pft_size[1] = __ilog2(HvCallHpt_getHptPages() * HW_PAGE_SIZE);
-
- for (i = 0; i < NR_CPUS; i++) {
- if (lppaca[i].dyn_proc_status >= 2)
- continue;
-
- snprintf(p, 32 - (p - buf), "@%d", i);
- dt_start_node(dt, buf);
-
- dt_prop_str(dt, "device_type", "cpu");
-
- index = lppaca[i].dyn_hv_phys_proc_index;
- d = &xIoHriProcessorVpd[index];
-
- dt_prop_u32(dt, "i-cache-size", d->xInstCacheSize * 1024);
- dt_prop_u32(dt, "i-cache-line-size", d->xInstCacheOperandSize);
-
- dt_prop_u32(dt, "d-cache-size", d->xDataL1CacheSizeKB * 1024);
- dt_prop_u32(dt, "d-cache-line-size", d->xDataCacheOperandSize);
-
- /* magic conversions to Hz copied from old code */
- dt_prop_u32(dt, "clock-frequency",
- ((1UL << 34) * 1000000) / d->xProcFreq);
- dt_prop_u32(dt, "timebase-frequency",
- ((1UL << 32) * 1000000) / d->xTimeBaseFreq);
-
- dt_prop_u32(dt, "reg", i);
-
- dt_prop_u32_list(dt, "ibm,pft-size", pft_size, 2);
-
- dt_end_node(dt);
- }
-
- dt_end_node(dt);
-}
-
-void dt_model(struct iseries_flat_dt *dt)
-{
- char buf[16] = "IBM,";
-
- /* "IBM," + mfgId[2:3] + systemSerial[1:5] */
- strne2a(buf + 4, xItExtVpdPanel.mfgID + 2, 2);
- strne2a(buf + 6, xItExtVpdPanel.systemSerial + 1, 5);
- buf[11] = '\0';
- dt_prop_str(dt, "system-id", buf);
-
- /* "IBM," + machineType[0:4] */
- strne2a(buf + 4, xItExtVpdPanel.machineType, 4);
- buf[8] = '\0';
- dt_prop_str(dt, "model", buf);
-
- dt_prop_str(dt, "compatible", "IBM,iSeries");
-}
-
-void build_flat_dt(struct iseries_flat_dt *dt, unsigned long phys_mem_size)
-{
- u64 tmp[2];
-
- dt_init(dt);
-
- dt_start_node(dt, "");
-
- dt_prop_u32(dt, "#address-cells", 2);
- dt_prop_u32(dt, "#size-cells", 2);
- dt_model(dt);
-
- /* /memory */
- dt_start_node(dt, "memory@0");
- dt_prop_str(dt, "name", "memory");
- dt_prop_str(dt, "device_type", "memory");
- tmp[0] = 0;
- tmp[1] = phys_mem_size;
- dt_prop_u64_list(dt, "reg", tmp, 2);
- dt_end_node(dt);
-
- /* /chosen */
- dt_start_node(dt, "chosen");
- dt_prop_str(dt, "bootargs", cmd_line);
- if (cmd_mem_limit)
- dt_prop_u64(dt, "linux,memory-limit", cmd_mem_limit);
- dt_end_node(dt);
-
- dt_cpus(dt);
-
- dt_end_node(dt);
-
- dt_push_u32(dt, OF_DT_END);
-}
-
void * __init iSeries_early_setup(void)
{
unsigned long phys_mem_size;
@@ -965,28 +720,8 @@ void * __init iSeries_early_setup(void)
iSeries_get_cmdline();
- /* Save unparsed command line copy for /proc/cmdline */
- strlcpy(saved_command_line, cmd_line, COMMAND_LINE_SIZE);
-
- /* Parse early parameters, in particular mem=x */
- parse_early_param();
-
- build_flat_dt(&iseries_dt, phys_mem_size);
-
- return (void *) __pa(&iseries_dt);
-}
-
-/*
- * On iSeries we just parse the mem=X option from the command line.
- * On pSeries it's a bit more complicated, see prom_init_mem()
- */
-static int __init early_parsemem(char *p)
-{
- if (p)
- cmd_mem_limit = ALIGN(memparse(p, &p), PAGE_SIZE);
- return 0;
+ return (void *) __pa(build_flat_dt(phys_mem_size));
}
-early_param("mem", early_parsemem);
static void hvputc(char c)
{
diff --git a/arch/powerpc/platforms/iseries/setup.h b/arch/powerpc/platforms/iseries/setup.h
index 5213044ec411..0a47ac53c959 100644
--- a/arch/powerpc/platforms/iseries/setup.h
+++ b/arch/powerpc/platforms/iseries/setup.h
@@ -21,4 +21,6 @@ extern unsigned long iSeries_get_boot_time(void);
extern int iSeries_set_rtc_time(struct rtc_time *tm);
extern void iSeries_get_rtc_time(struct rtc_time *tm);
+extern void *build_flat_dt(unsigned long phys_mem_size);
+
#endif /* __ISERIES_SETUP_H__ */
diff --git a/arch/powerpc/platforms/iseries/vio.c b/arch/powerpc/platforms/iseries/vio.c
deleted file mode 100644
index ad36ab0639f0..000000000000
--- a/arch/powerpc/platforms/iseries/vio.c
+++ /dev/null
@@ -1,131 +0,0 @@
-/*
- * IBM PowerPC iSeries Virtual I/O Infrastructure Support.
- *
- * Copyright (c) 2005 Stephen Rothwell, IBM Corp.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-#include <linux/types.h>
-#include <linux/device.h>
-#include <linux/init.h>
-
-#include <asm/vio.h>
-#include <asm/iommu.h>
-#include <asm/tce.h>
-#include <asm/abs_addr.h>
-#include <asm/page.h>
-#include <asm/iseries/vio.h>
-#include <asm/iseries/hv_types.h>
-#include <asm/iseries/hv_lp_config.h>
-#include <asm/iseries/hv_call_xm.h>
-
-#include "iommu.h"
-
-struct device *iSeries_vio_dev = &vio_bus_device.dev;
-EXPORT_SYMBOL(iSeries_vio_dev);
-
-static struct iommu_table veth_iommu_table;
-static struct iommu_table vio_iommu_table;
-
-static void __init iommu_vio_init(void)
-{
- iommu_table_getparms_iSeries(255, 0, 0xff, &veth_iommu_table);
- veth_iommu_table.it_size /= 2;
- vio_iommu_table = veth_iommu_table;
- vio_iommu_table.it_offset += veth_iommu_table.it_size;
-
- if (!iommu_init_table(&veth_iommu_table))
- printk("Virtual Bus VETH TCE table failed.\n");
- if (!iommu_init_table(&vio_iommu_table))
- printk("Virtual Bus VIO TCE table failed.\n");
-}
-
-/**
- * vio_register_device_iseries: - Register a new iSeries vio device.
- * @voidev: The device to register.
- */
-static struct vio_dev *__init vio_register_device_iseries(char *type,
- uint32_t unit_num)
-{
- struct vio_dev *viodev;
-
- /* allocate a vio_dev for this device */
- viodev = kmalloc(sizeof(struct vio_dev), GFP_KERNEL);
- if (!viodev)
- return NULL;
- memset(viodev, 0, sizeof(struct vio_dev));
-
- snprintf(viodev->dev.bus_id, BUS_ID_SIZE, "%s%d", type, unit_num);
-
- viodev->name = viodev->dev.bus_id;
- viodev->type = type;
- viodev->unit_address = unit_num;
- viodev->iommu_table = &vio_iommu_table;
- if (vio_register_device(viodev) == NULL) {
- kfree(viodev);
- return NULL;
- }
- return viodev;
-}
-
-void __init probe_bus_iseries(void)
-{
- HvLpIndexMap vlan_map;
- struct vio_dev *viodev;
- int i;
-
- /* there is only one of each of these */
- vio_register_device_iseries("viocons", 0);
- vio_register_device_iseries("vscsi", 0);
-
- vlan_map = HvLpConfig_getVirtualLanIndexMap();
- for (i = 0; i < HVMAXARCHITECTEDVIRTUALLANS; i++) {
- if ((vlan_map & (0x8000 >> i)) == 0)
- continue;
- viodev = vio_register_device_iseries("vlan", i);
- /* veth is special and has it own iommu_table */
- viodev->iommu_table = &veth_iommu_table;
- }
- for (i = 0; i < HVMAXARCHITECTEDVIRTUALDISKS; i++)
- vio_register_device_iseries("viodasd", i);
- for (i = 0; i < HVMAXARCHITECTEDVIRTUALCDROMS; i++)
- vio_register_device_iseries("viocd", i);
- for (i = 0; i < HVMAXARCHITECTEDVIRTUALTAPES; i++)
- vio_register_device_iseries("viotape", i);
-}
-
-/**
- * vio_match_device_iseries: - Tell if a iSeries VIO device matches a
- * vio_device_id
- */
-static int vio_match_device_iseries(const struct vio_device_id *id,
- const struct vio_dev *dev)
-{
- return strncmp(dev->type, id->type, strlen(id->type)) == 0;
-}
-
-static struct vio_bus_ops vio_bus_ops_iseries = {
- .match = vio_match_device_iseries,
-};
-
-/**
- * vio_bus_init_iseries: - Initialize the iSeries virtual IO bus
- */
-static int __init vio_bus_init_iseries(void)
-{
- int err;
-
- err = vio_bus_init(&vio_bus_ops_iseries);
- if (err == 0) {
- iommu_vio_init();
- vio_bus_device.iommu_table = &vio_iommu_table;
- iSeries_vio_dev = &vio_bus_device.dev;
- probe_bus_iseries();
- }
- return err;
-}
-
-__initcall(vio_bus_init_iseries);
diff --git a/arch/powerpc/platforms/maple/pci.c b/arch/powerpc/platforms/maple/pci.c
index 85d6c93659cc..9a4efc0c3b29 100644
--- a/arch/powerpc/platforms/maple/pci.c
+++ b/arch/powerpc/platforms/maple/pci.c
@@ -437,9 +437,6 @@ void __init maple_pci_init(void)
/* Tell pci.c to not change any resource allocations. */
pci_probe_only = 1;
-
- /* Allow all IO */
- io_page_mask = -1;
}
int maple_pci_get_legacy_ide_irq(struct pci_dev *pdev, int channel)
diff --git a/arch/powerpc/platforms/maple/setup.c b/arch/powerpc/platforms/maple/setup.c
index 24c0aef4ea39..a0505ea48a86 100644
--- a/arch/powerpc/platforms/maple/setup.c
+++ b/arch/powerpc/platforms/maple/setup.c
@@ -189,7 +189,7 @@ void __init maple_setup_arch(void)
conswitchp = &dummy_con;
#endif
- printk(KERN_INFO "Using native/NAP idle loop\n");
+ printk(KERN_DEBUG "Using native/NAP idle loop\n");
}
/*
diff --git a/arch/powerpc/platforms/powermac/cpufreq_32.c b/arch/powerpc/platforms/powermac/cpufreq_32.c
index cfd6527a0d7e..af2a8f9f1222 100644
--- a/arch/powerpc/platforms/powermac/cpufreq_32.c
+++ b/arch/powerpc/platforms/powermac/cpufreq_32.c
@@ -314,7 +314,7 @@ static int pmu_set_cpu_speed(int low_speed)
_set_L3CR(save_l3cr);
/* Restore userland MMU context */
- set_context(current->active_mm->context, current->active_mm->pgd);
+ set_context(current->active_mm->context.id, current->active_mm->pgd);
#ifdef DEBUG_FREQ
printk(KERN_DEBUG "HID1, after: %x\n", mfspr(SPRN_HID1));
diff --git a/arch/powerpc/platforms/powermac/feature.c b/arch/powerpc/platforms/powermac/feature.c
index a5063cd675c5..85e00cb0006e 100644
--- a/arch/powerpc/platforms/powermac/feature.c
+++ b/arch/powerpc/platforms/powermac/feature.c
@@ -2510,7 +2510,7 @@ found:
if (get_property(np, "flush-on-lock", NULL))
break;
powersave_nap = 1;
- printk(KERN_INFO "Processor NAP mode on idle enabled.\n");
+ printk(KERN_DEBUG "Processor NAP mode on idle enabled.\n");
break;
}
diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c
index ea179afea632..80035853467b 100644
--- a/arch/powerpc/platforms/powermac/pci.c
+++ b/arch/powerpc/platforms/powermac/pci.c
@@ -1068,9 +1068,6 @@ void __init pmac_pci_init(void)
/* Tell pci.c to not use the common resource allocation mechanism */
pci_probe_only = 1;
- /* Allow all IO */
- io_page_mask = -1;
-
#else /* CONFIG_PPC64 */
init_p2pbridge();
fixup_nec_usb2();
diff --git a/arch/powerpc/platforms/powermac/pfunc_core.c b/arch/powerpc/platforms/powermac/pfunc_core.c
index f08173b0f065..047f954a89eb 100644
--- a/arch/powerpc/platforms/powermac/pfunc_core.c
+++ b/arch/powerpc/platforms/powermac/pfunc_core.c
@@ -871,10 +871,17 @@ int pmf_register_irq_client(struct device_node *target,
spin_unlock_irqrestore(&pmf_lock, flags);
if (func == NULL)
return -ENODEV;
+
+ /* guard against manipulations of list */
mutex_lock(&pmf_irq_mutex);
if (list_empty(&func->irq_clients))
func->dev->handlers->irq_enable(func);
+
+ /* guard against pmf_do_irq while changing list */
+ spin_lock_irqsave(&pmf_lock, flags);
list_add(&client->link, &func->irq_clients);
+ spin_unlock_irqrestore(&pmf_lock, flags);
+
client->func = func;
mutex_unlock(&pmf_irq_mutex);
@@ -885,12 +892,19 @@ EXPORT_SYMBOL_GPL(pmf_register_irq_client);
void pmf_unregister_irq_client(struct pmf_irq_client *client)
{
struct pmf_function *func = client->func;
+ unsigned long flags;
BUG_ON(func == NULL);
+ /* guard against manipulations of list */
mutex_lock(&pmf_irq_mutex);
client->func = NULL;
+
+ /* guard against pmf_do_irq while changing list */
+ spin_lock_irqsave(&pmf_lock, flags);
list_del(&client->link);
+ spin_unlock_irqrestore(&pmf_lock, flags);
+
if (list_empty(&func->irq_clients))
func->dev->handlers->irq_disable(func);
mutex_unlock(&pmf_irq_mutex);
diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c
index b9200fb07815..9cc7db7a8bdc 100644
--- a/arch/powerpc/platforms/powermac/setup.c
+++ b/arch/powerpc/platforms/powermac/setup.c
@@ -458,7 +458,7 @@ static int pmac_pm_finish(suspend_state_t state)
printk(KERN_DEBUG "%s(%d)\n", __FUNCTION__, state);
/* Restore userland MMU context */
- set_context(current->active_mm->context, current->active_mm->pgd);
+ set_context(current->active_mm->context.id, current->active_mm->pgd);
return 0;
}
diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile
index 930898635c9f..e5e0ff466904 100644
--- a/arch/powerpc/platforms/pseries/Makefile
+++ b/arch/powerpc/platforms/pseries/Makefile
@@ -1,8 +1,11 @@
+ifeq ($(CONFIG_PPC64),y)
+EXTRA_CFLAGS += -mno-minimal-toc
+endif
+
obj-y := pci.o lpar.o hvCall.o nvram.o reconfig.o \
setup.o iommu.o ras.o rtasd.o pci_dlpar.o \
firmware.o
obj-$(CONFIG_SMP) += smp.o
-obj-$(CONFIG_IBMVIO) += vio.o
obj-$(CONFIG_XICS) += xics.o
obj-$(CONFIG_SCANLOG) += scanlog.o
obj-$(CONFIG_EEH) += eeh.o eeh_cache.o eeh_driver.o eeh_event.o
diff --git a/arch/powerpc/platforms/pseries/eeh_cache.c b/arch/powerpc/platforms/pseries/eeh_cache.c
index d4a402c5866c..98c23aec85be 100644
--- a/arch/powerpc/platforms/pseries/eeh_cache.c
+++ b/arch/powerpc/platforms/pseries/eeh_cache.c
@@ -304,6 +304,8 @@ void __init pci_addr_cache_build(void)
pci_addr_cache_insert_device(dev);
dn = pci_device_to_OF_node(dev);
+ if (!dn)
+ continue;
pci_dev_get (dev); /* matching put is in eeh_remove_device() */
PCI_DN(dn)->pcidev = dev;
}
diff --git a/arch/powerpc/platforms/pseries/eeh_driver.c b/arch/powerpc/platforms/pseries/eeh_driver.c
index 1fba695e32e8..0ec9a5445b95 100644
--- a/arch/powerpc/platforms/pseries/eeh_driver.c
+++ b/arch/powerpc/platforms/pseries/eeh_driver.c
@@ -23,9 +23,8 @@
*
*/
#include <linux/delay.h>
-#include <linux/irq.h>
#include <linux/interrupt.h>
-#include <linux/notifier.h>
+#include <linux/irq.h>
#include <linux/pci.h>
#include <asm/eeh.h>
#include <asm/eeh_event.h>
@@ -202,7 +201,11 @@ static void eeh_report_failure(struct pci_dev *dev, void *userdata)
static int eeh_reset_device (struct pci_dn *pe_dn, struct pci_bus *bus)
{
- int rc;
+ int cnt, rc;
+
+ /* pcibios will clear the counter; save the value */
+ cnt = pe_dn->eeh_freeze_count;
+
if (bus)
pcibios_remove_pci_devices(bus);
@@ -241,6 +244,7 @@ static int eeh_reset_device (struct pci_dn *pe_dn, struct pci_bus *bus)
ssleep (5);
pcibios_add_pci_devices(bus);
}
+ pe_dn->eeh_freeze_count = cnt;
return 0;
}
@@ -250,23 +254,29 @@ static int eeh_reset_device (struct pci_dn *pe_dn, struct pci_bus *bus)
*/
#define MAX_WAIT_FOR_RECOVERY 15
-void handle_eeh_events (struct eeh_event *event)
+struct pci_dn * handle_eeh_events (struct eeh_event *event)
{
struct device_node *frozen_dn;
struct pci_dn *frozen_pdn;
struct pci_bus *frozen_bus;
int rc = 0;
enum pci_ers_result result = PCI_ERS_RESULT_NONE;
- const char *pci_str, *drv_str;
+ const char *location, *pci_str, *drv_str;
frozen_dn = find_device_pe(event->dn);
frozen_bus = pcibios_find_pci_bus(frozen_dn);
if (!frozen_dn) {
- printk(KERN_ERR "EEH: Error: Cannot find partition endpoint for %s\n",
- pci_name(event->dev));
- return;
+
+ location = (char *) get_property(event->dn, "ibm,loc-code", NULL);
+ location = location ? location : "unknown";
+ printk(KERN_ERR "EEH: Error: Cannot find partition endpoint "
+ "for location=%s pci addr=%s\n",
+ location, pci_name(event->dev));
+ return NULL;
}
+ location = (char *) get_property(frozen_dn, "ibm,loc-code", NULL);
+ location = location ? location : "unknown";
/* There are two different styles for coming up with the PE.
* In the old style, it was the highest EEH-capable device
@@ -278,9 +288,10 @@ void handle_eeh_events (struct eeh_event *event)
frozen_bus = pcibios_find_pci_bus (frozen_dn->parent);
if (!frozen_bus) {
- printk(KERN_ERR "EEH: Cannot find PCI bus for %s\n",
- frozen_dn->full_name);
- return;
+ printk(KERN_ERR "EEH: Cannot find PCI bus "
+ "for location=%s dn=%s\n",
+ location, frozen_dn->full_name);
+ return NULL;
}
#if 0
@@ -314,8 +325,9 @@ void handle_eeh_events (struct eeh_event *event)
eeh_slot_error_detail(frozen_pdn, 1 /* Temporary Error */);
printk(KERN_WARNING
- "EEH: This PCI device has failed %d times since last reboot: %s - %s\n",
- frozen_pdn->eeh_freeze_count, drv_str, pci_str);
+ "EEH: This PCI device has failed %d times since last reboot: "
+ "location=%s driver=%s pci addr=%s\n",
+ frozen_pdn->eeh_freeze_count, location, drv_str, pci_str);
/* Walk the various device drivers attached to this slot through
* a reset sequence, giving each an opportunity to do what it needs
@@ -355,7 +367,7 @@ void handle_eeh_events (struct eeh_event *event)
/* Tell all device drivers that they can resume operations */
pci_walk_bus(frozen_bus, eeh_report_resume, NULL);
- return;
+ return frozen_pdn;
excess_failures:
/*
@@ -364,17 +376,18 @@ excess_failures:
* due to actual, failed cards.
*/
printk(KERN_ERR
- "EEH: PCI device %s - %s has failed %d times \n"
- "and has been permanently disabled. Please try reseating\n"
- "this device or replacing it.\n",
- drv_str, pci_str, frozen_pdn->eeh_freeze_count);
+ "EEH: PCI device at location=%s driver=%s pci addr=%s \n"
+ "has failed %d times and has been permanently disabled. \n"
+ "Please try reseating this device or replacing it.\n",
+ location, drv_str, pci_str, frozen_pdn->eeh_freeze_count);
goto perm_error;
hard_fail:
printk(KERN_ERR
- "EEH: Unable to recover from failure of PCI device %s - %s\n"
+ "EEH: Unable to recover from failure of PCI device "
+ "at location=%s driver=%s pci addr=%s \n"
"Please try reseating this device or replacing it.\n",
- drv_str, pci_str);
+ location, drv_str, pci_str);
perm_error:
eeh_slot_error_detail(frozen_pdn, 2 /* Permanent Error */);
@@ -384,6 +397,8 @@ perm_error:
/* Shut down the device drivers for good. */
pcibios_remove_pci_devices(frozen_bus);
+
+ return NULL;
}
/* ---------- end of file ---------- */
diff --git a/arch/powerpc/platforms/pseries/eeh_event.c b/arch/powerpc/platforms/pseries/eeh_event.c
index 40020c65c89e..8f2d12935b99 100644
--- a/arch/powerpc/platforms/pseries/eeh_event.c
+++ b/arch/powerpc/platforms/pseries/eeh_event.c
@@ -18,6 +18,7 @@
* Copyright (c) 2005 Linas Vepstas <linas@linas.org>
*/
+#include <linux/delay.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/pci.h>
@@ -56,38 +57,43 @@ static int eeh_event_handler(void * dummy)
{
unsigned long flags;
struct eeh_event *event;
+ struct pci_dn *pdn;
daemonize ("eehd");
+ set_current_state(TASK_INTERRUPTIBLE);
- while (1) {
- set_current_state(TASK_INTERRUPTIBLE);
+ spin_lock_irqsave(&eeh_eventlist_lock, flags);
+ event = NULL;
- spin_lock_irqsave(&eeh_eventlist_lock, flags);
- event = NULL;
+ /* Unqueue the event, get ready to process. */
+ if (!list_empty(&eeh_eventlist)) {
+ event = list_entry(eeh_eventlist.next, struct eeh_event, list);
+ list_del(&event->list);
+ }
+ spin_unlock_irqrestore(&eeh_eventlist_lock, flags);
- /* Unqueue the event, get ready to process. */
- if (!list_empty(&eeh_eventlist)) {
- event = list_entry(eeh_eventlist.next, struct eeh_event, list);
- list_del(&event->list);
- }
- spin_unlock_irqrestore(&eeh_eventlist_lock, flags);
+ if (event == NULL)
+ return 0;
- if (event == NULL)
- break;
+ /* Serialize processing of EEH events */
+ mutex_lock(&eeh_event_mutex);
+ eeh_mark_slot(event->dn, EEH_MODE_RECOVERING);
- /* Serialize processing of EEH events */
- mutex_lock(&eeh_event_mutex);
- eeh_mark_slot(event->dn, EEH_MODE_RECOVERING);
+ printk(KERN_INFO "EEH: Detected PCI bus error on device %s\n",
+ pci_name(event->dev));
- printk(KERN_INFO "EEH: Detected PCI bus error on device %s\n",
- pci_name(event->dev));
+ pdn = handle_eeh_events(event);
- handle_eeh_events(event);
+ eeh_clear_slot(event->dn, EEH_MODE_RECOVERING);
+ pci_dev_put(event->dev);
+ kfree(event);
+ mutex_unlock(&eeh_event_mutex);
- eeh_clear_slot(event->dn, EEH_MODE_RECOVERING);
- pci_dev_put(event->dev);
- kfree(event);
- mutex_unlock(&eeh_event_mutex);
+ /* If there are no new errors after an hour, clear the counter. */
+ if (pdn && pdn->eeh_freeze_count>0) {
+ msleep_interruptible (3600*1000);
+ if (pdn->eeh_freeze_count>0)
+ pdn->eeh_freeze_count--;
}
return 0;
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index 2643078433f0..d03a8b078f9d 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -1,23 +1,24 @@
/*
* Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
*
- * Rewrite, cleanup:
+ * Rewrite, cleanup:
*
* Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation
+ * Copyright (C) 2006 Olof Johansson <olof@lixom.net>
*
* Dynamic DMA mapping support, pSeries-specific parts, both SMP and LPAR.
*
- *
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
- *
+ *
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
+ *
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
@@ -49,52 +50,46 @@
#define DBG(fmt...)
-static void tce_build_pSeries(struct iommu_table *tbl, long index,
- long npages, unsigned long uaddr,
+static void tce_build_pSeries(struct iommu_table *tbl, long index,
+ long npages, unsigned long uaddr,
enum dma_data_direction direction)
{
- union tce_entry t;
- union tce_entry *tp;
+ u64 proto_tce;
+ u64 *tcep;
+ u64 rpn;
index <<= TCE_PAGE_FACTOR;
npages <<= TCE_PAGE_FACTOR;
- t.te_word = 0;
- t.te_rdwr = 1; // Read allowed
+ proto_tce = TCE_PCI_READ; // Read allowed
if (direction != DMA_TO_DEVICE)
- t.te_pciwr = 1;
+ proto_tce |= TCE_PCI_WRITE;
- tp = ((union tce_entry *)tbl->it_base) + index;
+ tcep = ((u64 *)tbl->it_base) + index;
while (npages--) {
/* can't move this out since we might cross LMB boundary */
- t.te_rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT;
-
- tp->te_word = t.te_word;
+ rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT;
+ *tcep = proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT;
uaddr += TCE_PAGE_SIZE;
- tp++;
+ tcep++;
}
}
static void tce_free_pSeries(struct iommu_table *tbl, long index, long npages)
{
- union tce_entry t;
- union tce_entry *tp;
+ u64 *tcep;
npages <<= TCE_PAGE_FACTOR;
index <<= TCE_PAGE_FACTOR;
- t.te_word = 0;
- tp = ((union tce_entry *)tbl->it_base) + index;
-
- while (npages--) {
- tp->te_word = t.te_word;
-
- tp++;
- }
+ tcep = ((u64 *)tbl->it_base) + index;
+
+ while (npages--)
+ *(tcep++) = 0;
}
@@ -103,43 +98,44 @@ static void tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum,
enum dma_data_direction direction)
{
u64 rc;
- union tce_entry tce;
+ u64 proto_tce, tce;
+ u64 rpn;
tcenum <<= TCE_PAGE_FACTOR;
npages <<= TCE_PAGE_FACTOR;
- tce.te_word = 0;
- tce.te_rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT;
- tce.te_rdwr = 1;
+ rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT;
+ proto_tce = TCE_PCI_READ;
if (direction != DMA_TO_DEVICE)
- tce.te_pciwr = 1;
+ proto_tce |= TCE_PCI_WRITE;
while (npages--) {
- rc = plpar_tce_put((u64)tbl->it_index,
- (u64)tcenum << 12,
- tce.te_word );
-
+ tce = proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT;
+ rc = plpar_tce_put((u64)tbl->it_index, (u64)tcenum << 12, tce);
+
if (rc && printk_ratelimit()) {
printk("tce_build_pSeriesLP: plpar_tce_put failed. rc=%ld\n", rc);
printk("\tindex = 0x%lx\n", (u64)tbl->it_index);
printk("\ttcenum = 0x%lx\n", (u64)tcenum);
- printk("\ttce val = 0x%lx\n", tce.te_word );
+ printk("\ttce val = 0x%lx\n", tce );
show_stack(current, (unsigned long *)__get_SP());
}
-
+
tcenum++;
- tce.te_rpn++;
+ rpn++;
}
}
-static DEFINE_PER_CPU(void *, tce_page) = NULL;
+static DEFINE_PER_CPU(u64 *, tce_page) = NULL;
static void tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
long npages, unsigned long uaddr,
enum dma_data_direction direction)
{
u64 rc;
- union tce_entry tce, *tcep;
+ u64 proto_tce;
+ u64 *tcep;
+ u64 rpn;
long l, limit;
if (TCE_PAGE_FACTOR == 0 && npages == 1)
@@ -152,7 +148,7 @@ static void tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
* from iommu_alloc{,_sg}()
*/
if (!tcep) {
- tcep = (void *)__get_free_page(GFP_ATOMIC);
+ tcep = (u64 *)__get_free_page(GFP_ATOMIC);
/* If allocation fails, fall back to the loop implementation */
if (!tcep)
return tce_build_pSeriesLP(tbl, tcenum, npages,
@@ -163,11 +159,10 @@ static void tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
tcenum <<= TCE_PAGE_FACTOR;
npages <<= TCE_PAGE_FACTOR;
- tce.te_word = 0;
- tce.te_rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT;
- tce.te_rdwr = 1;
+ rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT;
+ proto_tce = TCE_PCI_READ;
if (direction != DMA_TO_DEVICE)
- tce.te_pciwr = 1;
+ proto_tce |= TCE_PCI_WRITE;
/* We can map max one pageful of TCEs at a time */
do {
@@ -175,11 +170,11 @@ static void tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
* Set up the page with TCE data, looping through and setting
* the values.
*/
- limit = min_t(long, npages, 4096/sizeof(union tce_entry));
+ limit = min_t(long, npages, 4096/TCE_ENTRY_SIZE);
for (l = 0; l < limit; l++) {
- tcep[l] = tce;
- tce.te_rpn++;
+ tcep[l] = proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT;
+ rpn++;
}
rc = plpar_tce_put_indirect((u64)tbl->it_index,
@@ -195,7 +190,7 @@ static void tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
printk("tce_buildmulti_pSeriesLP: plpar_tce_put failed. rc=%ld\n", rc);
printk("\tindex = 0x%lx\n", (u64)tbl->it_index);
printk("\tnpages = 0x%lx\n", (u64)npages);
- printk("\ttce[0] val = 0x%lx\n", tcep[0].te_word);
+ printk("\ttce[0] val = 0x%lx\n", tcep[0]);
show_stack(current, (unsigned long *)__get_SP());
}
}
@@ -203,23 +198,17 @@ static void tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
static void tce_free_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages)
{
u64 rc;
- union tce_entry tce;
tcenum <<= TCE_PAGE_FACTOR;
npages <<= TCE_PAGE_FACTOR;
- tce.te_word = 0;
-
while (npages--) {
- rc = plpar_tce_put((u64)tbl->it_index,
- (u64)tcenum << 12,
- tce.te_word);
+ rc = plpar_tce_put((u64)tbl->it_index, (u64)tcenum << 12, 0);
if (rc && printk_ratelimit()) {
printk("tce_free_pSeriesLP: plpar_tce_put failed. rc=%ld\n", rc);
printk("\tindex = 0x%lx\n", (u64)tbl->it_index);
printk("\ttcenum = 0x%lx\n", (u64)tcenum);
- printk("\ttce val = 0x%lx\n", tce.te_word );
show_stack(current, (unsigned long *)__get_SP());
}
@@ -231,31 +220,24 @@ static void tce_free_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages
static void tce_freemulti_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages)
{
u64 rc;
- union tce_entry tce;
tcenum <<= TCE_PAGE_FACTOR;
npages <<= TCE_PAGE_FACTOR;
- tce.te_word = 0;
-
- rc = plpar_tce_stuff((u64)tbl->it_index,
- (u64)tcenum << 12,
- tce.te_word,
- npages);
+ rc = plpar_tce_stuff((u64)tbl->it_index, (u64)tcenum << 12, 0, npages);
if (rc && printk_ratelimit()) {
printk("tce_freemulti_pSeriesLP: plpar_tce_stuff failed\n");
printk("\trc = %ld\n", rc);
printk("\tindex = 0x%lx\n", (u64)tbl->it_index);
printk("\tnpages = 0x%lx\n", (u64)npages);
- printk("\ttce val = 0x%lx\n", tce.te_word );
show_stack(current, (unsigned long *)__get_SP());
}
}
static void iommu_table_setparms(struct pci_controller *phb,
struct device_node *dn,
- struct iommu_table *tbl)
+ struct iommu_table *tbl)
{
struct device_node *node;
unsigned long *basep;
@@ -275,16 +257,16 @@ static void iommu_table_setparms(struct pci_controller *phb,
memset((void *)tbl->it_base, 0, *sizep);
tbl->it_busno = phb->bus->number;
-
+
/* Units of tce entries */
tbl->it_offset = phb->dma_window_base_cur >> PAGE_SHIFT;
-
+
/* Test if we are going over 2GB of DMA space */
if (phb->dma_window_base_cur + phb->dma_window_size > 0x80000000ul) {
udbg_printf("PCI_DMA: Unexpected number of IOAs under this PHB.\n");
- panic("PCI_DMA: Unexpected number of IOAs under this PHB.\n");
+ panic("PCI_DMA: Unexpected number of IOAs under this PHB.\n");
}
-
+
phb->dma_window_base_cur += phb->dma_window_size;
/* Set the tce table size - measured in entries */
@@ -299,30 +281,22 @@ static void iommu_table_setparms(struct pci_controller *phb,
* iommu_table_setparms_lpar
*
* Function: On pSeries LPAR systems, return TCE table info, given a pci bus.
- *
- * ToDo: properly interpret the ibm,dma-window property. The definition is:
- * logical-bus-number (1 word)
- * phys-address (#address-cells words)
- * size (#cell-size words)
- *
- * Currently we hard code these sizes (more or less).
*/
static void iommu_table_setparms_lpar(struct pci_controller *phb,
struct device_node *dn,
struct iommu_table *tbl,
- unsigned int *dma_window)
+ unsigned char *dma_window)
{
+ unsigned long offset, size;
+
tbl->it_busno = PCI_DN(dn)->bussubno;
+ of_parse_dma_window(dn, dma_window, &tbl->it_index, &offset, &size);
- /* TODO: Parse field size properties properly. */
- tbl->it_size = (((unsigned long)dma_window[4] << 32) |
- (unsigned long)dma_window[5]) >> PAGE_SHIFT;
- tbl->it_offset = (((unsigned long)dma_window[2] << 32) |
- (unsigned long)dma_window[3]) >> PAGE_SHIFT;
tbl->it_base = 0;
- tbl->it_index = dma_window[0];
tbl->it_blocksize = 16;
tbl->it_type = TCE_PCI;
+ tbl->it_offset = offset >> PAGE_SHIFT;
+ tbl->it_size = size >> PAGE_SHIFT;
}
static void iommu_bus_setup_pSeries(struct pci_bus *bus)
@@ -357,13 +331,9 @@ static void iommu_bus_setup_pSeries(struct pci_bus *bus)
if (isa_dn_orig)
of_node_put(isa_dn_orig);
- /* Count number of direct PCI children of the PHB.
- * All PCI device nodes have class-code property, so it's
- * an easy way to find them.
- */
+ /* Count number of direct PCI children of the PHB. */
for (children = 0, tmp = dn->child; tmp; tmp = tmp->sibling)
- if (get_property(tmp, "class-code", NULL))
- children++;
+ children++;
DBG("Children: %d\n", children);
@@ -394,10 +364,11 @@ static void iommu_bus_setup_pSeries(struct pci_bus *bus)
pci->phb->dma_window_size = 0x8000000ul;
pci->phb->dma_window_base_cur = 0x8000000ul;
- tbl = kmalloc(sizeof(struct iommu_table), GFP_KERNEL);
+ tbl = kmalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
+ pci->phb->node);
iommu_table_setparms(pci->phb, dn, tbl);
- pci->iommu_table = iommu_init_table(tbl);
+ pci->iommu_table = iommu_init_table(tbl, pci->phb->node);
/* Divide the rest (1.75GB) among the children */
pci->phb->dma_window_size = 0x80000000ul;
@@ -414,7 +385,7 @@ static void iommu_bus_setup_pSeriesLP(struct pci_bus *bus)
struct iommu_table *tbl;
struct device_node *dn, *pdn;
struct pci_dn *ppci;
- unsigned int *dma_window = NULL;
+ unsigned char *dma_window = NULL;
DBG("iommu_bus_setup_pSeriesLP, bus %p, bus->self %p\n", bus, bus->self);
@@ -422,7 +393,7 @@ static void iommu_bus_setup_pSeriesLP(struct pci_bus *bus)
/* Find nearest ibm,dma-window, walking up the device tree */
for (pdn = dn; pdn != NULL; pdn = pdn->parent) {
- dma_window = (unsigned int *)get_property(pdn, "ibm,dma-window", NULL);
+ dma_window = get_property(pdn, "ibm,dma-window", NULL);
if (dma_window != NULL)
break;
}
@@ -440,12 +411,12 @@ static void iommu_bus_setup_pSeriesLP(struct pci_bus *bus)
ppci->bussubno = bus->number;
- tbl = (struct iommu_table *)kmalloc(sizeof(struct iommu_table),
- GFP_KERNEL);
-
+ tbl = kmalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
+ ppci->phb->node);
+
iommu_table_setparms_lpar(ppci->phb, pdn, tbl, dma_window);
- ppci->iommu_table = iommu_init_table(tbl);
+ ppci->iommu_table = iommu_init_table(tbl, ppci->phb->node);
}
if (pdn != dn)
@@ -468,9 +439,11 @@ static void iommu_dev_setup_pSeries(struct pci_dev *dev)
*/
if (!dev->bus->self) {
DBG(" --> first child, no bridge. Allocating iommu table.\n");
- tbl = kmalloc(sizeof(struct iommu_table), GFP_KERNEL);
+ tbl = kmalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
+ PCI_DN(dn)->phb->node);
iommu_table_setparms(PCI_DN(dn)->phb, dn, tbl);
- PCI_DN(mydn)->iommu_table = iommu_init_table(tbl);
+ PCI_DN(dn)->iommu_table = iommu_init_table(tbl,
+ PCI_DN(dn)->phb->node);
return;
}
@@ -516,7 +489,7 @@ static void iommu_dev_setup_pSeriesLP(struct pci_dev *dev)
{
struct device_node *pdn, *dn;
struct iommu_table *tbl;
- int *dma_window = NULL;
+ unsigned char *dma_window = NULL;
struct pci_dn *pci;
DBG("iommu_dev_setup_pSeriesLP, dev %p (%s)\n", dev, pci_name(dev));
@@ -531,8 +504,7 @@ static void iommu_dev_setup_pSeriesLP(struct pci_dev *dev)
for (pdn = dn; pdn && PCI_DN(pdn) && !PCI_DN(pdn)->iommu_table;
pdn = pdn->parent) {
- dma_window = (unsigned int *)
- get_property(pdn, "ibm,dma-window", NULL);
+ dma_window = get_property(pdn, "ibm,dma-window", NULL);
if (dma_window)
break;
}
@@ -553,12 +525,12 @@ static void iommu_dev_setup_pSeriesLP(struct pci_dev *dev)
/* iommu_table_setparms_lpar needs bussubno. */
pci->bussubno = pci->phb->bus->number;
- tbl = (struct iommu_table *)kmalloc(sizeof(struct iommu_table),
- GFP_KERNEL);
+ tbl = kmalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
+ pci->phb->node);
iommu_table_setparms_lpar(pci->phb, pdn, tbl, dma_window);
- pci->iommu_table = iommu_init_table(tbl);
+ pci->iommu_table = iommu_init_table(tbl, pci->phb->node);
}
if (pdn != dn)
diff --git a/arch/powerpc/platforms/pseries/rtasd.c b/arch/powerpc/platforms/pseries/rtasd.c
index e0000ce769e5..2e4e04042d85 100644
--- a/arch/powerpc/platforms/pseries/rtasd.c
+++ b/arch/powerpc/platforms/pseries/rtasd.c
@@ -348,7 +348,7 @@ static int enable_surveillance(int timeout)
return 0;
if (error == -EINVAL) {
- printk(KERN_INFO "rtasd: surveillance not supported\n");
+ printk(KERN_DEBUG "rtasd: surveillance not supported\n");
return 0;
}
@@ -440,7 +440,7 @@ static int rtasd(void *unused)
goto error;
}
- printk(KERN_INFO "RTAS daemon started\n");
+ printk(KERN_DEBUG "RTAS daemon started\n");
DEBUG("will sleep for %d milliseconds\n", (30000/rtas_event_scan_rate));
@@ -487,7 +487,7 @@ static int __init rtas_init(void)
/* No RTAS */
if (rtas_token("event-scan") == RTAS_UNKNOWN_SERVICE) {
- printk(KERN_INFO "rtasd: no event-scan on system\n");
+ printk(KERN_DEBUG "rtasd: no event-scan on system\n");
return -ENODEV;
}
diff --git a/arch/powerpc/platforms/pseries/scanlog.c b/arch/powerpc/platforms/pseries/scanlog.c
index 50643496eb63..77a5bb1d9c30 100644
--- a/arch/powerpc/platforms/pseries/scanlog.c
+++ b/arch/powerpc/platforms/pseries/scanlog.c
@@ -107,9 +107,9 @@ static ssize_t scanlog_read(struct file *file, char __user *buf,
/* Break to sleep default time */
break;
default:
- if (status > 9900 && status <= 9905) {
- wait_time = rtas_extended_busy_delay_time(status);
- } else {
+ /* Assume extended busy */
+ wait_time = rtas_busy_delay_time(status);
+ if (!wait_time) {
printk(KERN_ERR "scanlog: unknown error from rtas: %d\n", status);
return -EIO;
}
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 3ba87835757e..1e28518c6121 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -235,14 +235,14 @@ static void __init pSeries_setup_arch(void)
if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
vpa_init(boot_cpuid);
if (get_lppaca()->shared_proc) {
- printk(KERN_INFO "Using shared processor idle loop\n");
+ printk(KERN_DEBUG "Using shared processor idle loop\n");
ppc_md.power_save = pseries_shared_idle_sleep;
} else {
- printk(KERN_INFO "Using dedicated idle loop\n");
+ printk(KERN_DEBUG "Using dedicated idle loop\n");
ppc_md.power_save = pseries_dedicated_idle_sleep;
}
} else {
- printk(KERN_INFO "Using default idle loop\n");
+ printk(KERN_DEBUG "Using default idle loop\n");
}
if (firmware_has_feature(FW_FEATURE_LPAR))
diff --git a/arch/powerpc/platforms/pseries/vio.c b/arch/powerpc/platforms/pseries/vio.c
deleted file mode 100644
index 8e53e04ada8b..000000000000
--- a/arch/powerpc/platforms/pseries/vio.c
+++ /dev/null
@@ -1,274 +0,0 @@
-/*
- * IBM PowerPC pSeries Virtual I/O Infrastructure Support.
- *
- * Copyright (c) 2003-2005 IBM Corp.
- * Dave Engebretsen engebret@us.ibm.com
- * Santiago Leon santil@us.ibm.com
- * Hollis Blanchard <hollisb@us.ibm.com>
- * Stephen Rothwell
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/kobject.h>
-#include <asm/iommu.h>
-#include <asm/dma.h>
-#include <asm/prom.h>
-#include <asm/vio.h>
-#include <asm/hvcall.h>
-#include <asm/tce.h>
-
-extern struct subsystem devices_subsys; /* needed for vio_find_name() */
-
-static void probe_bus_pseries(void)
-{
- struct device_node *node_vroot, *of_node;
-
- node_vroot = find_devices("vdevice");
- if ((node_vroot == NULL) || (node_vroot->child == NULL))
- /* this machine doesn't do virtual IO, and that's ok */
- return;
-
- /*
- * Create struct vio_devices for each virtual device in the device tree.
- * Drivers will associate with them later.
- */
- for (of_node = node_vroot->child; of_node != NULL;
- of_node = of_node->sibling) {
- printk(KERN_DEBUG "%s: processing %p\n", __FUNCTION__, of_node);
- vio_register_device_node(of_node);
- }
-}
-
-/**
- * vio_match_device_pseries: - Tell if a pSeries VIO device matches a
- * vio_device_id
- */
-static int vio_match_device_pseries(const struct vio_device_id *id,
- const struct vio_dev *dev)
-{
- return (strncmp(dev->type, id->type, strlen(id->type)) == 0) &&
- device_is_compatible(dev->dev.platform_data, id->compat);
-}
-
-static void vio_release_device_pseries(struct device *dev)
-{
- /* XXX free TCE table */
- of_node_put(dev->platform_data);
-}
-
-static ssize_t viodev_show_devspec(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct device_node *of_node = dev->platform_data;
-
- return sprintf(buf, "%s\n", of_node->full_name);
-}
-DEVICE_ATTR(devspec, S_IRUSR | S_IRGRP | S_IROTH, viodev_show_devspec, NULL);
-
-static void vio_unregister_device_pseries(struct vio_dev *viodev)
-{
- device_remove_file(&viodev->dev, &dev_attr_devspec);
-}
-
-static struct vio_bus_ops vio_bus_ops_pseries = {
- .match = vio_match_device_pseries,
- .unregister_device = vio_unregister_device_pseries,
- .release_device = vio_release_device_pseries,
-};
-
-/**
- * vio_bus_init_pseries: - Initialize the pSeries virtual IO bus
- */
-static int __init vio_bus_init_pseries(void)
-{
- int err;
-
- err = vio_bus_init(&vio_bus_ops_pseries);
- if (err == 0)
- probe_bus_pseries();
- return err;
-}
-
-__initcall(vio_bus_init_pseries);
-
-/**
- * vio_build_iommu_table: - gets the dma information from OF and
- * builds the TCE tree.
- * @dev: the virtual device.
- *
- * Returns a pointer to the built tce tree, or NULL if it can't
- * find property.
-*/
-static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev)
-{
- unsigned int *dma_window;
- struct iommu_table *newTceTable;
- unsigned long offset;
- int dma_window_property_size;
-
- dma_window = (unsigned int *) get_property(dev->dev.platform_data, "ibm,my-dma-window", &dma_window_property_size);
- if(!dma_window) {
- return NULL;
- }
-
- newTceTable = (struct iommu_table *) kmalloc(sizeof(struct iommu_table), GFP_KERNEL);
-
- /* There should be some code to extract the phys-encoded offset
- using prom_n_addr_cells(). However, according to a comment
- on earlier versions, it's always zero, so we don't bother */
- offset = dma_window[1] >> PAGE_SHIFT;
-
- /* TCE table size - measured in tce entries */
- newTceTable->it_size = dma_window[4] >> PAGE_SHIFT;
- /* offset for VIO should always be 0 */
- newTceTable->it_offset = offset;
- newTceTable->it_busno = 0;
- newTceTable->it_index = (unsigned long)dma_window[0];
- newTceTable->it_type = TCE_VB;
-
- return iommu_init_table(newTceTable);
-}
-
-/**
- * vio_register_device_node: - Register a new vio device.
- * @of_node: The OF node for this device.
- *
- * Creates and initializes a vio_dev structure from the data in
- * of_node (dev.platform_data) and adds it to the list of virtual devices.
- * Returns a pointer to the created vio_dev or NULL if node has
- * NULL device_type or compatible fields.
- */
-struct vio_dev * __devinit vio_register_device_node(struct device_node *of_node)
-{
- struct vio_dev *viodev;
- unsigned int *unit_address;
- unsigned int *irq_p;
-
- /* we need the 'device_type' property, in order to match with drivers */
- if ((NULL == of_node->type)) {
- printk(KERN_WARNING
- "%s: node %s missing 'device_type'\n", __FUNCTION__,
- of_node->name ? of_node->name : "<unknown>");
- return NULL;
- }
-
- unit_address = (unsigned int *)get_property(of_node, "reg", NULL);
- if (!unit_address) {
- printk(KERN_WARNING "%s: node %s missing 'reg'\n", __FUNCTION__,
- of_node->name ? of_node->name : "<unknown>");
- return NULL;
- }
-
- /* allocate a vio_dev for this node */
- viodev = kmalloc(sizeof(struct vio_dev), GFP_KERNEL);
- if (!viodev) {
- return NULL;
- }
- memset(viodev, 0, sizeof(struct vio_dev));
-
- viodev->dev.platform_data = of_node_get(of_node);
-
- viodev->irq = NO_IRQ;
- irq_p = (unsigned int *)get_property(of_node, "interrupts", NULL);
- if (irq_p) {
- int virq = virt_irq_create_mapping(*irq_p);
- if (virq == NO_IRQ) {
- printk(KERN_ERR "Unable to allocate interrupt "
- "number for %s\n", of_node->full_name);
- } else
- viodev->irq = irq_offset_up(virq);
- }
-
- snprintf(viodev->dev.bus_id, BUS_ID_SIZE, "%x", *unit_address);
- viodev->name = of_node->name;
- viodev->type = of_node->type;
- viodev->unit_address = *unit_address;
- viodev->iommu_table = vio_build_iommu_table(viodev);
-
- /* register with generic device framework */
- if (vio_register_device(viodev) == NULL) {
- /* XXX free TCE table */
- kfree(viodev);
- return NULL;
- }
- device_create_file(&viodev->dev, &dev_attr_devspec);
-
- return viodev;
-}
-EXPORT_SYMBOL(vio_register_device_node);
-
-/**
- * vio_get_attribute: - get attribute for virtual device
- * @vdev: The vio device to get property.
- * @which: The property/attribute to be extracted.
- * @length: Pointer to length of returned data size (unused if NULL).
- *
- * Calls prom.c's get_property() to return the value of the
- * attribute specified by the preprocessor constant @which
-*/
-const void * vio_get_attribute(struct vio_dev *vdev, void* which, int* length)
-{
- return get_property(vdev->dev.platform_data, (char*)which, length);
-}
-EXPORT_SYMBOL(vio_get_attribute);
-
-/* vio_find_name() - internal because only vio.c knows how we formatted the
- * kobject name
- * XXX once vio_bus_type.devices is actually used as a kset in
- * drivers/base/bus.c, this function should be removed in favor of
- * "device_find(kobj_name, &vio_bus_type)"
- */
-static struct vio_dev *vio_find_name(const char *kobj_name)
-{
- struct kobject *found;
-
- found = kset_find_obj(&devices_subsys.kset, kobj_name);
- if (!found)
- return NULL;
-
- return to_vio_dev(container_of(found, struct device, kobj));
-}
-
-/**
- * vio_find_node - find an already-registered vio_dev
- * @vnode: device_node of the virtual device we're looking for
- */
-struct vio_dev *vio_find_node(struct device_node *vnode)
-{
- uint32_t *unit_address;
- char kobj_name[BUS_ID_SIZE];
-
- /* construct the kobject name from the device node */
- unit_address = (uint32_t *)get_property(vnode, "reg", NULL);
- if (!unit_address)
- return NULL;
- snprintf(kobj_name, BUS_ID_SIZE, "%x", *unit_address);
-
- return vio_find_name(kobj_name);
-}
-EXPORT_SYMBOL(vio_find_node);
-
-int vio_enable_interrupts(struct vio_dev *dev)
-{
- int rc = h_vio_signal(dev->unit_address, VIO_IRQ_ENABLE);
- if (rc != H_SUCCESS)
- printk(KERN_ERR "vio: Error 0x%x enabling interrupts\n", rc);
- return rc;
-}
-EXPORT_SYMBOL(vio_enable_interrupts);
-
-int vio_disable_interrupts(struct vio_dev *dev)
-{
- int rc = h_vio_signal(dev->unit_address, VIO_IRQ_DISABLE);
- if (rc != H_SUCCESS)
- printk(KERN_ERR "vio: Error 0x%x disabling interrupts\n", rc);
- return rc;
-}
-EXPORT_SYMBOL(vio_disable_interrupts);
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c
index 2d60ea30fed6..b14f9b5c114e 100644
--- a/arch/powerpc/platforms/pseries/xics.c
+++ b/arch/powerpc/platforms/pseries/xics.c
@@ -522,7 +522,7 @@ nextnode:
np = of_find_node_by_type(NULL, "interrupt-controller");
if (!np) {
- printk(KERN_WARNING "xics: no ISA interrupt controller\n");
+ printk(KERN_DEBUG "xics: no ISA interrupt controller\n");
xics_irq_8259_cascade_real = -1;
xics_irq_8259_cascade = -1;
} else {
@@ -641,23 +641,26 @@ void xics_teardown_cpu(int secondary)
ops->cppr_info(cpu, 0x00);
iosync();
+ /* Clear IPI */
+ ops->qirr_info(cpu, 0xff);
+
+ /*
+ * we need to EOI the IPI if we got here from kexec down IPI
+ *
+ * probably need to check all the other interrupts too
+ * should we be flagging idle loop instead?
+ * or creating some task to be scheduled?
+ */
+ ops->xirr_info_set(cpu, XICS_IPI);
+
/*
* Some machines need to have at least one cpu in the GIQ,
* so leave the master cpu in the group.
*/
- if (secondary) {
- /*
- * we need to EOI the IPI if we got here from kexec down IPI
- *
- * probably need to check all the other interrupts too
- * should we be flagging idle loop instead?
- * or creating some task to be scheduled?
- */
- ops->xirr_info_set(cpu, XICS_IPI);
+ if (secondary)
rtas_set_indicator(GLOBAL_INTERRUPT_QUEUE,
(1UL << interrupt_server_size) - 1 -
default_distrib_server, 0);
- }
}
#ifdef CONFIG_HOTPLUG_CPU
diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile
index 4c2b356774ea..cef95b023730 100644
--- a/arch/powerpc/sysdev/Makefile
+++ b/arch/powerpc/sysdev/Makefile
@@ -1,3 +1,7 @@
+ifeq ($(CONFIG_PPC64),y)
+EXTRA_CFLAGS += -mno-minimal-toc
+endif
+
obj-$(CONFIG_MPIC) += mpic.o
obj-$(CONFIG_PPC_INDIRECT_PCI) += indirect_pci.o
obj-$(CONFIG_PPC_I8259) += i8259.o
diff --git a/arch/powerpc/sysdev/dart_iommu.c b/arch/powerpc/sysdev/dart_iommu.c
index 38087bd6e3cf..6232091cc72b 100644
--- a/arch/powerpc/sysdev/dart_iommu.c
+++ b/arch/powerpc/sysdev/dart_iommu.c
@@ -246,7 +246,7 @@ static void iommu_table_dart_setup(void)
iommu_table_dart.it_base = (unsigned long)dart_vbase;
iommu_table_dart.it_index = 0;
iommu_table_dart.it_blocksize = 1;
- iommu_init_table(&iommu_table_dart);
+ iommu_init_table(&iommu_table_dart, -1);
/* Reserve the last page of the DART to avoid possible prefetch
* past the DART mapped area
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index 7dcdfcb3c984..bffe50d02c99 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -829,7 +829,27 @@ void __init mpic_init(struct mpic *mpic)
mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, 0);
}
+void __init mpic_set_clk_ratio(struct mpic *mpic, u32 clock_ratio)
+{
+ u32 v;
+
+ v = mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_1);
+ v &= ~MPIC_GREG_GLOBAL_CONF_1_CLK_RATIO_MASK;
+ v |= MPIC_GREG_GLOBAL_CONF_1_CLK_RATIO(clock_ratio);
+ mpic_write(mpic->gregs, MPIC_GREG_GLOBAL_CONF_1, v);
+}
+void __init mpic_set_serial_int(struct mpic *mpic, int enable)
+{
+ u32 v;
+
+ v = mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_1);
+ if (enable)
+ v |= MPIC_GREG_GLOBAL_CONF_1_SIE;
+ else
+ v &= ~MPIC_GREG_GLOBAL_CONF_1_SIE;
+ mpic_write(mpic->gregs, MPIC_GREG_GLOBAL_CONF_1, v);
+}
void mpic_irq_set_priority(unsigned int irq, unsigned int pri)
{
diff --git a/arch/ppc/Kconfig b/arch/ppc/Kconfig
index e9a8f5d1dfcd..b55de4f42aec 100644
--- a/arch/ppc/Kconfig
+++ b/arch/ppc/Kconfig
@@ -40,6 +40,10 @@ config GENERIC_NVRAM
bool
default y
+config GENERIC_FIND_NEXT_BIT
+ bool
+ default y
+
config SCHED_NO_NO_OMIT_FRAME_POINTER
bool
default y
diff --git a/arch/ppc/mm/init.c b/arch/ppc/mm/init.c
index 386e000bcb73..c9bd184a295a 100644
--- a/arch/ppc/mm/init.c
+++ b/arch/ppc/mm/init.c
@@ -583,7 +583,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
mm = (address < TASK_SIZE)? vma->vm_mm: &init_mm;
pmd = pmd_offset(pgd_offset(mm, address), address);
if (!pmd_none(*pmd))
- add_hash_page(mm->context, address, pmd_val(*pmd));
+ add_hash_page(mm->context.id, address, pmd_val(*pmd));
}
#endif
}
diff --git a/arch/ppc/mm/mmu_context.c b/arch/ppc/mm/mmu_context.c
index b4a4b3f02a1c..8784f3715032 100644
--- a/arch/ppc/mm/mmu_context.c
+++ b/arch/ppc/mm/mmu_context.c
@@ -30,7 +30,7 @@
#include <asm/mmu_context.h>
#include <asm/tlbflush.h>
-mm_context_t next_mmu_context;
+unsigned long next_mmu_context;
unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1];
#ifdef FEW_CONTEXTS
atomic_t nr_free_contexts;
diff --git a/arch/ppc/mm/tlb.c b/arch/ppc/mm/tlb.c
index 6c3dc3c44c86..606b023196a2 100644
--- a/arch/ppc/mm/tlb.c
+++ b/arch/ppc/mm/tlb.c
@@ -42,7 +42,7 @@ void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, unsigned long addr)
if (Hash != 0) {
ptephys = __pa(ptep) & PAGE_MASK;
- flush_hash_pages(mm->context, addr, ptephys, 1);
+ flush_hash_pages(mm->context.id, addr, ptephys, 1);
}
}
@@ -102,7 +102,7 @@ static void flush_range(struct mm_struct *mm, unsigned long start,
pmd_t *pmd;
unsigned long pmd_end;
int count;
- unsigned int ctx = mm->context;
+ unsigned int ctx = mm->context.id;
if (Hash == 0) {
_tlbia();
@@ -166,7 +166,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
mm = (vmaddr < TASK_SIZE)? vma->vm_mm: &init_mm;
pmd = pmd_offset(pgd_offset(mm, vmaddr), vmaddr);
if (!pmd_none(*pmd))
- flush_hash_pages(mm->context, vmaddr, pmd_val(*pmd), 1);
+ flush_hash_pages(mm->context.id, vmaddr, pmd_val(*pmd), 1);
FINISH_FLUSH;
}
diff --git a/arch/ppc/platforms/4xx/Kconfig b/arch/ppc/platforms/4xx/Kconfig
index 174ddbc9758b..293bd489e7d9 100644
--- a/arch/ppc/platforms/4xx/Kconfig
+++ b/arch/ppc/platforms/4xx/Kconfig
@@ -183,7 +183,7 @@ config IBM_EMAC4
config BIOS_FIXUP
bool
- depends on BUBINGA || EP405 || SYCAMORE || WALNUT
+ depends on BUBINGA || EP405 || SYCAMORE || WALNUT || CPCI405
default y
# OAK doesn't exist but wanted to keep this around for any future 403GCX boards
diff --git a/arch/ppc/platforms/4xx/cpci405.c b/arch/ppc/platforms/4xx/cpci405.c
index 6571e39fbe48..970b69831e6f 100644
--- a/arch/ppc/platforms/4xx/cpci405.c
+++ b/arch/ppc/platforms/4xx/cpci405.c
@@ -1,10 +1,12 @@
/*
* Board setup routines for the esd CPCI-405 cPCI Board.
*
- * Author: Stefan Roese
- * stefan.roese@esd-electronics.com
+ * Copyright 2001-2006 esd electronic system design - hannover germany
*
- * Copyright 2001 esd electronic system design - hannover germany
+ * Authors: Matthias Fuchs
+ * matthias.fuchs@esd-electronics.com
+ * Stefan Roese
+ * stefan.roese@esd-electronics.com
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -20,9 +22,17 @@
#include <asm/pci-bridge.h>
#include <asm/machdep.h>
#include <asm/todc.h>
+#include <linux/serial.h>
+#include <linux/serial_core.h>
#include <asm/ocp.h>
+#include <asm/ibm_ocp_pci.h>
+#include <platforms/4xx/ibm405gp.h>
+#ifdef CONFIG_GEN_RTC
void *cpci405_nvram;
+#endif
+
+extern bd_t __res;
/*
* Some IRQs unique to CPCI-405.
@@ -36,18 +46,69 @@ ppc405_map_irq(struct pci_dev *dev, unsigned char idsel, unsigned char pin)
* A B C D
*/
{
- {28, 28, 28, 28}, /* IDSEL 15 - cPCI slot 8 */
- {29, 29, 29, 29}, /* IDSEL 16 - cPCI slot 7 */
- {30, 30, 30, 30}, /* IDSEL 17 - cPCI slot 6 */
- {27, 27, 27, 27}, /* IDSEL 18 - cPCI slot 5 */
- {28, 28, 28, 28}, /* IDSEL 19 - cPCI slot 4 */
- {29, 29, 29, 29}, /* IDSEL 20 - cPCI slot 3 */
- {30, 30, 30, 30}, /* IDSEL 21 - cPCI slot 2 */
+ {28, 29, 30, 27}, /* IDSEL 15 - cPCI slot 8 */
+ {29, 30, 27, 28}, /* IDSEL 16 - cPCI slot 7 */
+ {30, 27, 28, 29}, /* IDSEL 17 - cPCI slot 6 */
+ {27, 28, 29, 30}, /* IDSEL 18 - cPCI slot 5 */
+ {28, 29, 30, 27}, /* IDSEL 19 - cPCI slot 4 */
+ {29, 30, 27, 28}, /* IDSEL 20 - cPCI slot 3 */
+ {30, 27, 28, 29}, /* IDSEL 21 - cPCI slot 2 */
};
const long min_idsel = 15, max_idsel = 21, irqs_per_slot = 4;
return PCI_IRQ_TABLE_LOOKUP;
};
+/* The serial clock for the chip is an internal clock determined by
+ * different clock speeds/dividers.
+ * Calculate the proper input baud rate and setup the serial driver.
+ */
+static void __init
+cpci405_early_serial_map(void)
+{
+ u32 uart_div;
+ int uart_clock;
+ struct uart_port port;
+
+ /* Calculate the serial clock input frequency
+ *
+ * The uart clock is the cpu frequency (provided in the board info
+ * structure) divided by the external UART Divisor.
+ */
+ uart_div = ((mfdcr(DCRN_CHCR_BASE) & CHR0_UDIV) >> 1) + 1;
+ uart_clock = __res.bi_procfreq / uart_div;
+
+ /* Setup serial port access */
+ memset(&port, 0, sizeof(port));
+#if defined(CONFIG_UART0_TTYS0)
+ port.membase = (void*)UART0_IO_BASE;
+ port.irq = UART0_INT;
+#else
+ port.membase = (void*)UART1_IO_BASE;
+ port.irq = UART1_INT;
+#endif
+ port.uartclk = uart_clock;
+ port.regshift = 0;
+ port.iotype = UPIO_MEM;
+ port.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST;
+ port.line = 0;
+
+ if (early_serial_setup(&port) != 0) {
+ printk("Early serial init of port 0 failed\n");
+ }
+#if defined(CONFIG_UART0_TTYS0)
+ port.membase = (void*)UART1_IO_BASE;
+ port.irq = UART1_INT;
+#else
+ port.membase = (void*)UART0_IO_BASE;
+ port.irq = UART0_INT;
+#endif
+ port.line = 1;
+
+ if (early_serial_setup(&port) != 0) {
+ printk("Early serial init of port 1 failed\n");
+ }
+}
+
void __init
cpci405_setup_arch(void)
{
@@ -55,14 +116,68 @@ cpci405_setup_arch(void)
ibm_ocp_set_emac(0, 0);
- TODC_INIT(TODC_TYPE_MK48T35, cpci405_nvram, cpci405_nvram, cpci405_nvram, 8);
+ cpci405_early_serial_map();
+
+#ifdef CONFIG_GEN_RTC
+ TODC_INIT(TODC_TYPE_MK48T35,
+ cpci405_nvram, cpci405_nvram, cpci405_nvram, 8);
+#endif
+}
+
+void __init
+bios_fixup(struct pci_controller *hose, struct pcil0_regs *pcip)
+{
+ unsigned int bar_response, bar;
+
+ /* Disable region first */
+ out_le32((void *) &(pcip->pmm[0].ma), 0x00000000);
+ /* PLB starting addr, PCI: 0x80000000 */
+ out_le32((void *) &(pcip->pmm[0].la), 0x80000000);
+ /* PCI start addr, 0x80000000 */
+ out_le32((void *) &(pcip->pmm[0].pcila), PPC405_PCI_MEM_BASE);
+ /* 512MB range of PLB to PCI */
+ out_le32((void *) &(pcip->pmm[0].pciha), 0x00000000);
+ /* Enable no pre-fetch, enable region */
+ out_le32((void *) &(pcip->pmm[0].ma), ((0xffffffff -
+ (PPC405_PCI_UPPER_MEM -
+ PPC405_PCI_MEM_BASE)) | 0x01));
+
+ /* Disable region one */
+ out_le32((void *) &(pcip->pmm[1].ma), 0x00000000);
+ out_le32((void *) &(pcip->pmm[1].la), 0x00000000);
+ out_le32((void *) &(pcip->pmm[1].pcila), 0x00000000);
+ out_le32((void *) &(pcip->pmm[1].pciha), 0x00000000);
+ out_le32((void *) &(pcip->pmm[1].ma), 0x00000000);
+ out_le32((void *) &(pcip->ptm1ms), 0x00000001);
+
+ /* Disable region two */
+ out_le32((void *) &(pcip->pmm[2].ma), 0x00000000);
+ out_le32((void *) &(pcip->pmm[2].la), 0x00000000);
+ out_le32((void *) &(pcip->pmm[2].pcila), 0x00000000);
+ out_le32((void *) &(pcip->pmm[2].pciha), 0x00000000);
+ out_le32((void *) &(pcip->pmm[2].ma), 0x00000000);
+ out_le32((void *) &(pcip->ptm2ms), 0x00000000);
+ out_le32((void *) &(pcip->ptm2la), 0x00000000);
+
+ /* Zero config bars */
+ for (bar = PCI_BASE_ADDRESS_1; bar <= PCI_BASE_ADDRESS_2; bar += 4) {
+ early_write_config_dword(hose, hose->first_busno,
+ PCI_FUNC(hose->first_busno), bar,
+ 0x00000000);
+ early_read_config_dword(hose, hose->first_busno,
+ PCI_FUNC(hose->first_busno), bar,
+ &bar_response);
+ }
}
void __init
cpci405_map_io(void)
{
ppc4xx_map_io();
+
+#ifdef CONFIG_GEN_RTC
cpci405_nvram = ioremap(CPCI405_NVRAM_PADDR, CPCI405_NVRAM_SIZE);
+#endif
}
void __init
@@ -74,9 +189,11 @@ platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
ppc_md.setup_arch = cpci405_setup_arch;
ppc_md.setup_io_mappings = cpci405_map_io;
+#ifdef CONFIG_GEN_RTC
ppc_md.time_init = todc_time_init;
ppc_md.set_rtc_time = todc_set_rtc_time;
ppc_md.get_rtc_time = todc_get_rtc_time;
ppc_md.nvram_read_val = todc_direct_read_val;
ppc_md.nvram_write_val = todc_direct_write_val;
+#endif
}
diff --git a/arch/ppc/platforms/4xx/cpci405.h b/arch/ppc/platforms/4xx/cpci405.h
index e27f7cb650d8..f5a5c0cd062d 100644
--- a/arch/ppc/platforms/4xx/cpci405.h
+++ b/arch/ppc/platforms/4xx/cpci405.h
@@ -1,37 +1,29 @@
/*
* CPCI-405 board specific definitions
*
- * Copyright (c) 2001 Stefan Roese (stefan.roese@esd-electronics.com)
+ * Copyright 2001-2006 esd electronic system design - hannover germany
+ *
+ * Authors: Matthias Fuchs
+ * matthias.fuchs@esd-electronics.com
+ * Stefan Roese
+ * stefan.roese@esd-electronics.com
*/
#ifdef __KERNEL__
-#ifndef __ASM_CPCI405_H__
-#define __ASM_CPCI405_H__
+#ifndef __CPCI405_H__
+#define __CPCI405_H__
#include <linux/config.h>
-
-/* We have a 405GP core */
#include <platforms/4xx/ibm405gp.h>
-
#include <asm/ppcboot.h>
-#ifndef __ASSEMBLY__
-/* Some 4xx parts use a different timebase frequency from the internal clock.
-*/
-#define bi_tbfreq bi_intfreq
-
/* Map for the NVRAM space */
#define CPCI405_NVRAM_PADDR ((uint)0xf0200000)
#define CPCI405_NVRAM_SIZE ((uint)32*1024)
-#ifdef CONFIG_PPC405GP_INTERNAL_CLOCK
-#define BASE_BAUD 201600
-#else
-#define BASE_BAUD 691200
-#endif
+#define BASE_BAUD 0
-#define PPC4xx_MACHINE_NAME "esd CPCI-405"
+#define PPC4xx_MACHINE_NAME "esd CPCI-405"
-#endif /* !__ASSEMBLY__ */
-#endif /* __ASM_CPCI405_H__ */
+#endif /* __CPCI405_H__ */
#endif /* __KERNEL__ */
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 01c5c082f970..821a141889de 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -446,6 +446,14 @@ config NO_IDLE_HZ_INIT
The HZ timer is switched off in idle by default. That means the
HZ timer is already disabled at boot time.
+config S390_HYPFS_FS
+ bool "s390 hypervisor file system support"
+ select SYS_HYPERVISOR
+ default y
+ help
+ This is a virtual file system intended to provide accounting
+ information in an s390 hypervisor environment.
+
config KEXEC
bool "kexec system call (EXPERIMENTAL)"
depends on EXPERIMENTAL
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index 7bb16fb97d4f..b3791fb094a8 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -76,7 +76,7 @@ LDFLAGS_vmlinux := -e start
head-y := arch/$(ARCH)/kernel/head.o arch/$(ARCH)/kernel/init_task.o
core-y += arch/$(ARCH)/mm/ arch/$(ARCH)/kernel/ arch/$(ARCH)/crypto/ \
- arch/$(ARCH)/appldata/
+ arch/$(ARCH)/appldata/ arch/$(ARCH)/hypfs/
libs-y += arch/$(ARCH)/lib/
drivers-y += drivers/s390/
drivers-$(CONFIG_MATHEMU) += arch/$(ARCH)/math-emu/
diff --git a/arch/s390/hypfs/Makefile b/arch/s390/hypfs/Makefile
new file mode 100644
index 000000000000..f4b00cd81f7c
--- /dev/null
+++ b/arch/s390/hypfs/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the linux hypfs filesystem routines.
+#
+
+obj-$(CONFIG_S390_HYPFS_FS) += s390_hypfs.o
+
+s390_hypfs-objs := inode.o hypfs_diag.o
diff --git a/arch/s390/hypfs/hypfs.h b/arch/s390/hypfs/hypfs.h
new file mode 100644
index 000000000000..ea5567be00fc
--- /dev/null
+++ b/arch/s390/hypfs/hypfs.h
@@ -0,0 +1,30 @@
+/*
+ * fs/hypfs/hypfs.h
+ * Hypervisor filesystem for Linux on s390.
+ *
+ * Copyright (C) IBM Corp. 2006
+ * Author(s): Michael Holzheu <holzheu@de.ibm.com>
+ */
+
+#ifndef _HYPFS_H_
+#define _HYPFS_H_
+
+#include <linux/fs.h>
+#include <linux/types.h>
+
+#define REG_FILE_MODE 0440
+#define UPDATE_FILE_MODE 0220
+#define DIR_MODE 0550
+
+extern struct dentry *hypfs_mkdir(struct super_block *sb, struct dentry *parent,
+ const char *name);
+
+extern struct dentry *hypfs_create_u64(struct super_block *sb,
+ struct dentry *dir, const char *name,
+ __u64 value);
+
+extern struct dentry *hypfs_create_str(struct super_block *sb,
+ struct dentry *dir, const char *name,
+ char *string);
+
+#endif /* _HYPFS_H_ */
diff --git a/arch/s390/hypfs/hypfs_diag.c b/arch/s390/hypfs/hypfs_diag.c
new file mode 100644
index 000000000000..efa74af7f04a
--- /dev/null
+++ b/arch/s390/hypfs/hypfs_diag.c
@@ -0,0 +1,696 @@
+/*
+ * fs/hypfs/hypfs_diag.c
+ * Hypervisor filesystem for Linux on s390. Diag 204 and 224
+ * implementation.
+ *
+ * Copyright (C) IBM Corp. 2006
+ * Author(s): Michael Holzheu <holzheu@de.ibm.com>
+ */
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/vmalloc.h>
+#include <asm/ebcdic.h>
+#include "hypfs.h"
+
+#define LPAR_NAME_LEN 8 /* lpar name len in diag 204 data */
+#define CPU_NAME_LEN 16 /* type name len of cpus in diag224 name table */
+#define TMP_SIZE 64 /* size of temporary buffers */
+
+/* diag 204 subcodes */
+enum diag204_sc {
+ SUBC_STIB4 = 4,
+ SUBC_RSI = 5,
+ SUBC_STIB6 = 6,
+ SUBC_STIB7 = 7
+};
+
+/* The two available diag 204 data formats */
+enum diag204_format {
+ INFO_SIMPLE = 0,
+ INFO_EXT = 0x00010000
+};
+
+/* bit is set in flags, when physical cpu info is included in diag 204 data */
+#define LPAR_PHYS_FLG 0x80
+
+static char *diag224_cpu_names; /* diag 224 name table */
+static enum diag204_sc diag204_store_sc; /* used subcode for store */
+static enum diag204_format diag204_info_type; /* used diag 204 data format */
+
+static void *diag204_buf; /* 4K aligned buffer for diag204 data */
+static void *diag204_buf_vmalloc; /* vmalloc pointer for diag204 data */
+static int diag204_buf_pages; /* number of pages for diag204 data */
+
+/*
+ * DIAG 204 data structures and member access functions.
+ *
+ * Since we have two different diag 204 data formats for old and new s390
+ * machines, we do not access the structs directly, but use getter functions for
+ * each struct member instead. This should make the code more readable.
+ */
+
+/* Time information block */
+
+struct info_blk_hdr {
+ __u8 npar;
+ __u8 flags;
+ __u16 tslice;
+ __u16 phys_cpus;
+ __u16 this_part;
+ __u64 curtod;
+} __attribute__ ((packed));
+
+struct x_info_blk_hdr {
+ __u8 npar;
+ __u8 flags;
+ __u16 tslice;
+ __u16 phys_cpus;
+ __u16 this_part;
+ __u64 curtod1;
+ __u64 curtod2;
+ char reserved[40];
+} __attribute__ ((packed));
+
+static inline int info_blk_hdr__size(enum diag204_format type)
+{
+ if (type == INFO_SIMPLE)
+ return sizeof(struct info_blk_hdr);
+ else /* INFO_EXT */
+ return sizeof(struct x_info_blk_hdr);
+}
+
+static inline __u8 info_blk_hdr__npar(enum diag204_format type, void *hdr)
+{
+ if (type == INFO_SIMPLE)
+ return ((struct info_blk_hdr *)hdr)->npar;
+ else /* INFO_EXT */
+ return ((struct x_info_blk_hdr *)hdr)->npar;
+}
+
+static inline __u8 info_blk_hdr__flags(enum diag204_format type, void *hdr)
+{
+ if (type == INFO_SIMPLE)
+ return ((struct info_blk_hdr *)hdr)->flags;
+ else /* INFO_EXT */
+ return ((struct x_info_blk_hdr *)hdr)->flags;
+}
+
+static inline __u16 info_blk_hdr__pcpus(enum diag204_format type, void *hdr)
+{
+ if (type == INFO_SIMPLE)
+ return ((struct info_blk_hdr *)hdr)->phys_cpus;
+ else /* INFO_EXT */
+ return ((struct x_info_blk_hdr *)hdr)->phys_cpus;
+}
+
+/* Partition header */
+
+struct part_hdr {
+ __u8 pn;
+ __u8 cpus;
+ char reserved[6];
+ char part_name[LPAR_NAME_LEN];
+} __attribute__ ((packed));
+
+struct x_part_hdr {
+ __u8 pn;
+ __u8 cpus;
+ __u8 rcpus;
+ __u8 pflag;
+ __u32 mlu;
+ char part_name[LPAR_NAME_LEN];
+ char lpc_name[8];
+ char os_name[8];
+ __u64 online_cs;
+ __u64 online_es;
+ __u8 upid;
+ char reserved1[3];
+ __u32 group_mlu;
+ char group_name[8];
+ char reserved2[32];
+} __attribute__ ((packed));
+
+static inline int part_hdr__size(enum diag204_format type)
+{
+ if (type == INFO_SIMPLE)
+ return sizeof(struct part_hdr);
+ else /* INFO_EXT */
+ return sizeof(struct x_part_hdr);
+}
+
+static inline __u8 part_hdr__rcpus(enum diag204_format type, void *hdr)
+{
+ if (type == INFO_SIMPLE)
+ return ((struct part_hdr *)hdr)->cpus;
+ else /* INFO_EXT */
+ return ((struct x_part_hdr *)hdr)->rcpus;
+}
+
+static inline void part_hdr__part_name(enum diag204_format type, void *hdr,
+ char *name)
+{
+ if (type == INFO_SIMPLE)
+ memcpy(name, ((struct part_hdr *)hdr)->part_name,
+ LPAR_NAME_LEN);
+ else /* INFO_EXT */
+ memcpy(name, ((struct x_part_hdr *)hdr)->part_name,
+ LPAR_NAME_LEN);
+ EBCASC(name, LPAR_NAME_LEN);
+ name[LPAR_NAME_LEN] = 0;
+ strstrip(name);
+}
+
+struct cpu_info {
+ __u16 cpu_addr;
+ char reserved1[2];
+ __u8 ctidx;
+ __u8 cflag;
+ __u16 weight;
+ __u64 acc_time;
+ __u64 lp_time;
+} __attribute__ ((packed));
+
+struct x_cpu_info {
+ __u16 cpu_addr;
+ char reserved1[2];
+ __u8 ctidx;
+ __u8 cflag;
+ __u16 weight;
+ __u64 acc_time;
+ __u64 lp_time;
+ __u16 min_weight;
+ __u16 cur_weight;
+ __u16 max_weight;
+ char reseved2[2];
+ __u64 online_time;
+ __u64 wait_time;
+ __u32 pma_weight;
+ __u32 polar_weight;
+ char reserved3[40];
+} __attribute__ ((packed));
+
+/* CPU info block */
+
+static inline int cpu_info__size(enum diag204_format type)
+{
+ if (type == INFO_SIMPLE)
+ return sizeof(struct cpu_info);
+ else /* INFO_EXT */
+ return sizeof(struct x_cpu_info);
+}
+
+static inline __u8 cpu_info__ctidx(enum diag204_format type, void *hdr)
+{
+ if (type == INFO_SIMPLE)
+ return ((struct cpu_info *)hdr)->ctidx;
+ else /* INFO_EXT */
+ return ((struct x_cpu_info *)hdr)->ctidx;
+}
+
+static inline __u16 cpu_info__cpu_addr(enum diag204_format type, void *hdr)
+{
+ if (type == INFO_SIMPLE)
+ return ((struct cpu_info *)hdr)->cpu_addr;
+ else /* INFO_EXT */
+ return ((struct x_cpu_info *)hdr)->cpu_addr;
+}
+
+static inline __u64 cpu_info__acc_time(enum diag204_format type, void *hdr)
+{
+ if (type == INFO_SIMPLE)
+ return ((struct cpu_info *)hdr)->acc_time;
+ else /* INFO_EXT */
+ return ((struct x_cpu_info *)hdr)->acc_time;
+}
+
+static inline __u64 cpu_info__lp_time(enum diag204_format type, void *hdr)
+{
+ if (type == INFO_SIMPLE)
+ return ((struct cpu_info *)hdr)->lp_time;
+ else /* INFO_EXT */
+ return ((struct x_cpu_info *)hdr)->lp_time;
+}
+
+static inline __u64 cpu_info__online_time(enum diag204_format type, void *hdr)
+{
+ if (type == INFO_SIMPLE)
+ return 0; /* online_time not available in simple info */
+ else /* INFO_EXT */
+ return ((struct x_cpu_info *)hdr)->online_time;
+}
+
+/* Physical header */
+
+struct phys_hdr {
+ char reserved1[1];
+ __u8 cpus;
+ char reserved2[6];
+ char mgm_name[8];
+} __attribute__ ((packed));
+
+struct x_phys_hdr {
+ char reserved1[1];
+ __u8 cpus;
+ char reserved2[6];
+ char mgm_name[8];
+ char reserved3[80];
+} __attribute__ ((packed));
+
+static inline int phys_hdr__size(enum diag204_format type)
+{
+ if (type == INFO_SIMPLE)
+ return sizeof(struct phys_hdr);
+ else /* INFO_EXT */
+ return sizeof(struct x_phys_hdr);
+}
+
+static inline __u8 phys_hdr__cpus(enum diag204_format type, void *hdr)
+{
+ if (type == INFO_SIMPLE)
+ return ((struct phys_hdr *)hdr)->cpus;
+ else /* INFO_EXT */
+ return ((struct x_phys_hdr *)hdr)->cpus;
+}
+
+/* Physical CPU info block */
+
+struct phys_cpu {
+ __u16 cpu_addr;
+ char reserved1[2];
+ __u8 ctidx;
+ char reserved2[3];
+ __u64 mgm_time;
+ char reserved3[8];
+} __attribute__ ((packed));
+
+struct x_phys_cpu {
+ __u16 cpu_addr;
+ char reserved1[2];
+ __u8 ctidx;
+ char reserved2[3];
+ __u64 mgm_time;
+ char reserved3[80];
+} __attribute__ ((packed));
+
+static inline int phys_cpu__size(enum diag204_format type)
+{
+ if (type == INFO_SIMPLE)
+ return sizeof(struct phys_cpu);
+ else /* INFO_EXT */
+ return sizeof(struct x_phys_cpu);
+}
+
+static inline __u16 phys_cpu__cpu_addr(enum diag204_format type, void *hdr)
+{
+ if (type == INFO_SIMPLE)
+ return ((struct phys_cpu *)hdr)->cpu_addr;
+ else /* INFO_EXT */
+ return ((struct x_phys_cpu *)hdr)->cpu_addr;
+}
+
+static inline __u64 phys_cpu__mgm_time(enum diag204_format type, void *hdr)
+{
+ if (type == INFO_SIMPLE)
+ return ((struct phys_cpu *)hdr)->mgm_time;
+ else /* INFO_EXT */
+ return ((struct x_phys_cpu *)hdr)->mgm_time;
+}
+
+static inline __u64 phys_cpu__ctidx(enum diag204_format type, void *hdr)
+{
+ if (type == INFO_SIMPLE)
+ return ((struct phys_cpu *)hdr)->ctidx;
+ else /* INFO_EXT */
+ return ((struct x_phys_cpu *)hdr)->ctidx;
+}
+
+/* Diagnose 204 functions */
+
+static int diag204(unsigned long subcode, unsigned long size, void *addr)
+{
+ register unsigned long _subcode asm("0") = subcode;
+ register unsigned long _size asm("1") = size;
+
+ asm volatile (" diag %2,%0,0x204\n"
+ "0: \n" ".section __ex_table,\"a\"\n"
+#ifndef __s390x__
+ " .align 4\n"
+ " .long 0b,0b\n"
+#else
+ " .align 8\n"
+ " .quad 0b,0b\n"
+#endif
+ ".previous":"+d" (_subcode), "+d"(_size)
+ :"d"(addr)
+ :"memory");
+ if (_subcode)
+ return -1;
+ else
+ return _size;
+}
+
+/*
+ * For the old diag subcode 4 with simple data format we have to use real
+ * memory. If we use subcode 6 or 7 with extended data format, we can (and
+ * should) use vmalloc, since we need a lot of memory in that case. Currently
+ * up to 93 pages!
+ */
+
+static void diag204_free_buffer(void)
+{
+ if (!diag204_buf)
+ return;
+ if (diag204_buf_vmalloc) {
+ vfree(diag204_buf_vmalloc);
+ diag204_buf_vmalloc = NULL;
+ } else {
+ free_pages((unsigned long) diag204_buf, 0);
+ }
+ diag204_buf_pages = 0;
+ diag204_buf = NULL;
+}
+
+static void *diag204_alloc_vbuf(int pages)
+{
+ /* The buffer has to be page aligned! */
+ diag204_buf_vmalloc = vmalloc(PAGE_SIZE * (pages + 1));
+ if (!diag204_buf_vmalloc)
+ return ERR_PTR(-ENOMEM);
+ diag204_buf = (void*)((unsigned long)diag204_buf_vmalloc
+ & ~0xfffUL) + 0x1000;
+ diag204_buf_pages = pages;
+ return diag204_buf;
+}
+
+static void *diag204_alloc_rbuf(void)
+{
+ diag204_buf = (void*)__get_free_pages(GFP_KERNEL,0);
+ if (diag204_buf)
+ return ERR_PTR(-ENOMEM);
+ diag204_buf_pages = 1;
+ return diag204_buf;
+}
+
+static void *diag204_get_buffer(enum diag204_format fmt, int *pages)
+{
+ if (diag204_buf) {
+ *pages = diag204_buf_pages;
+ return diag204_buf;
+ }
+ if (fmt == INFO_SIMPLE) {
+ *pages = 1;
+ return diag204_alloc_rbuf();
+ } else {/* INFO_EXT */
+ *pages = diag204(SUBC_RSI | INFO_EXT, 0, 0);
+ if (*pages <= 0)
+ return ERR_PTR(-ENOSYS);
+ else
+ return diag204_alloc_vbuf(*pages);
+ }
+}
+
+/*
+ * diag204_probe() has to find out, which type of diagnose 204 implementation
+ * we have on our machine. Currently there are three possible scanarios:
+ * - subcode 4 + simple data format (only one page)
+ * - subcode 4-6 + extended data format
+ * - subcode 4-7 + extended data format
+ *
+ * Subcode 5 is used to retrieve the size of the data, provided by subcodes
+ * 6 and 7. Subcode 7 basically has the same function as subcode 6. In addition
+ * to subcode 6 it provides also information about secondary cpus.
+ * In order to get as much information as possible, we first try
+ * subcode 7, then 6 and if both fail, we use subcode 4.
+ */
+
+static int diag204_probe(void)
+{
+ void *buf;
+ int pages, rc;
+
+ buf = diag204_get_buffer(INFO_EXT, &pages);
+ if (!IS_ERR(buf)) {
+ if (diag204(SUBC_STIB7 | INFO_EXT, pages, buf) >= 0) {
+ diag204_store_sc = SUBC_STIB7;
+ diag204_info_type = INFO_EXT;
+ goto out;
+ }
+ if (diag204(SUBC_STIB6 | INFO_EXT, pages, buf) >= 0) {
+ diag204_store_sc = SUBC_STIB7;
+ diag204_info_type = INFO_EXT;
+ goto out;
+ }
+ diag204_free_buffer();
+ }
+
+ /* subcodes 6 and 7 failed, now try subcode 4 */
+
+ buf = diag204_get_buffer(INFO_SIMPLE, &pages);
+ if (IS_ERR(buf)) {
+ rc = PTR_ERR(buf);
+ goto fail_alloc;
+ }
+ if (diag204(SUBC_STIB4 | INFO_SIMPLE, pages, buf) >= 0) {
+ diag204_store_sc = SUBC_STIB4;
+ diag204_info_type = INFO_SIMPLE;
+ goto out;
+ } else {
+ rc = -ENOSYS;
+ goto fail_store;
+ }
+out:
+ rc = 0;
+fail_store:
+ diag204_free_buffer();
+fail_alloc:
+ return rc;
+}
+
+static void *diag204_store(void)
+{
+ void *buf;
+ int pages;
+
+ buf = diag204_get_buffer(diag204_info_type, &pages);
+ if (IS_ERR(buf))
+ goto out;
+ if (diag204(diag204_store_sc | diag204_info_type, pages, buf) < 0)
+ return ERR_PTR(-ENOSYS);
+out:
+ return buf;
+}
+
+/* Diagnose 224 functions */
+
+static void diag224(void *ptr)
+{
+ asm volatile(" diag %0,%1,0x224\n"
+ : :"d" (0), "d"(ptr) : "memory");
+}
+
+static int diag224_get_name_table(void)
+{
+ /* memory must be below 2GB */
+ diag224_cpu_names = kmalloc(PAGE_SIZE, GFP_KERNEL | GFP_DMA);
+ if (!diag224_cpu_names)
+ return -ENOMEM;
+ diag224(diag224_cpu_names);
+ EBCASC(diag224_cpu_names + 16, (*diag224_cpu_names + 1) * 16);
+ return 0;
+}
+
+static void diag224_delete_name_table(void)
+{
+ kfree(diag224_cpu_names);
+}
+
+static int diag224_idx2name(int index, char *name)
+{
+ memcpy(name, diag224_cpu_names + ((index + 1) * CPU_NAME_LEN),
+ CPU_NAME_LEN);
+ name[CPU_NAME_LEN] = 0;
+ strstrip(name);
+ return 0;
+}
+
+__init int hypfs_diag_init(void)
+{
+ int rc;
+
+ if (diag204_probe()) {
+ printk(KERN_ERR "hypfs: diag 204 not working.");
+ return -ENODATA;
+ }
+ rc = diag224_get_name_table();
+ if (rc) {
+ diag224_delete_name_table();
+ printk(KERN_ERR "hypfs: could not get name table.\n");
+ }
+ return rc;
+}
+
+__exit void hypfs_diag_exit(void)
+{
+ diag224_delete_name_table();
+ diag204_free_buffer();
+}
+
+/*
+ * Functions to create the directory structure
+ * *******************************************
+ */
+
+static int hypfs_create_cpu_files(struct super_block *sb,
+ struct dentry *cpus_dir, void *cpu_info)
+{
+ struct dentry *cpu_dir;
+ char buffer[TMP_SIZE];
+ void *rc;
+
+ snprintf(buffer, TMP_SIZE, "%d", cpu_info__cpu_addr(diag204_info_type,
+ cpu_info));
+ cpu_dir = hypfs_mkdir(sb, cpus_dir, buffer);
+ rc = hypfs_create_u64(sb, cpu_dir, "mgmtime",
+ cpu_info__acc_time(diag204_info_type, cpu_info) -
+ cpu_info__lp_time(diag204_info_type, cpu_info));
+ if (IS_ERR(rc))
+ return PTR_ERR(rc);
+ rc = hypfs_create_u64(sb, cpu_dir, "cputime",
+ cpu_info__lp_time(diag204_info_type, cpu_info));
+ if (IS_ERR(rc))
+ return PTR_ERR(rc);
+ if (diag204_info_type == INFO_EXT) {
+ rc = hypfs_create_u64(sb, cpu_dir, "onlinetime",
+ cpu_info__online_time(diag204_info_type,
+ cpu_info));
+ if (IS_ERR(rc))
+ return PTR_ERR(rc);
+ }
+ diag224_idx2name(cpu_info__ctidx(diag204_info_type, cpu_info), buffer);
+ rc = hypfs_create_str(sb, cpu_dir, "type", buffer);
+ if (IS_ERR(rc))
+ return PTR_ERR(rc);
+ return 0;
+}
+
+static void *hypfs_create_lpar_files(struct super_block *sb,
+ struct dentry *systems_dir, void *part_hdr)
+{
+ struct dentry *cpus_dir;
+ struct dentry *lpar_dir;
+ char lpar_name[LPAR_NAME_LEN + 1];
+ void *cpu_info;
+ int i;
+
+ part_hdr__part_name(diag204_info_type, part_hdr, lpar_name);
+ lpar_name[LPAR_NAME_LEN] = 0;
+ lpar_dir = hypfs_mkdir(sb, systems_dir, lpar_name);
+ if (IS_ERR(lpar_dir))
+ return lpar_dir;
+ cpus_dir = hypfs_mkdir(sb, lpar_dir, "cpus");
+ if (IS_ERR(cpus_dir))
+ return cpus_dir;
+ cpu_info = part_hdr + part_hdr__size(diag204_info_type);
+ for (i = 0; i < part_hdr__rcpus(diag204_info_type, part_hdr); i++) {
+ int rc;
+ rc = hypfs_create_cpu_files(sb, cpus_dir, cpu_info);
+ if (rc)
+ return ERR_PTR(rc);
+ cpu_info += cpu_info__size(diag204_info_type);
+ }
+ return cpu_info;
+}
+
+static int hypfs_create_phys_cpu_files(struct super_block *sb,
+ struct dentry *cpus_dir, void *cpu_info)
+{
+ struct dentry *cpu_dir;
+ char buffer[TMP_SIZE];
+ void *rc;
+
+ snprintf(buffer, TMP_SIZE, "%i", phys_cpu__cpu_addr(diag204_info_type,
+ cpu_info));
+ cpu_dir = hypfs_mkdir(sb, cpus_dir, buffer);
+ if (IS_ERR(cpu_dir))
+ return PTR_ERR(cpu_dir);
+ rc = hypfs_create_u64(sb, cpu_dir, "mgmtime",
+ phys_cpu__mgm_time(diag204_info_type, cpu_info));
+ if (IS_ERR(rc))
+ return PTR_ERR(rc);
+ diag224_idx2name(phys_cpu__ctidx(diag204_info_type, cpu_info), buffer);
+ rc = hypfs_create_str(sb, cpu_dir, "type", buffer);
+ if (IS_ERR(rc))
+ return PTR_ERR(rc);
+ return 0;
+}
+
+static void *hypfs_create_phys_files(struct super_block *sb,
+ struct dentry *parent_dir, void *phys_hdr)
+{
+ int i;
+ void *cpu_info;
+ struct dentry *cpus_dir;
+
+ cpus_dir = hypfs_mkdir(sb, parent_dir, "cpus");
+ if (IS_ERR(cpus_dir))
+ return cpus_dir;
+ cpu_info = phys_hdr + phys_hdr__size(diag204_info_type);
+ for (i = 0; i < phys_hdr__cpus(diag204_info_type, phys_hdr); i++) {
+ int rc;
+ rc = hypfs_create_phys_cpu_files(sb, cpus_dir, cpu_info);
+ if (rc)
+ return ERR_PTR(rc);
+ cpu_info += phys_cpu__size(diag204_info_type);
+ }
+ return cpu_info;
+}
+
+int hypfs_diag_create_files(struct super_block *sb, struct dentry *root)
+{
+ struct dentry *systems_dir, *hyp_dir;
+ void *time_hdr, *part_hdr;
+ int i, rc;
+ void *buffer, *ptr;
+
+ buffer = diag204_store();
+ if (IS_ERR(buffer))
+ return PTR_ERR(buffer);
+
+ systems_dir = hypfs_mkdir(sb, root, "systems");
+ if (IS_ERR(systems_dir)) {
+ rc = PTR_ERR(systems_dir);
+ goto err_out;
+ }
+ time_hdr = (struct x_info_blk_hdr *)buffer;
+ part_hdr = time_hdr + info_blk_hdr__size(diag204_info_type);
+ for (i = 0; i < info_blk_hdr__npar(diag204_info_type, time_hdr); i++) {
+ part_hdr = hypfs_create_lpar_files(sb, systems_dir, part_hdr);
+ if (IS_ERR(part_hdr)) {
+ rc = PTR_ERR(part_hdr);
+ goto err_out;
+ }
+ }
+ if (info_blk_hdr__flags(diag204_info_type, time_hdr) & LPAR_PHYS_FLG) {
+ ptr = hypfs_create_phys_files(sb, root, part_hdr);
+ if (IS_ERR(ptr)) {
+ rc = PTR_ERR(ptr);
+ goto err_out;
+ }
+ }
+ hyp_dir = hypfs_mkdir(sb, root, "hyp");
+ if (IS_ERR(hyp_dir)) {
+ rc = PTR_ERR(hyp_dir);
+ goto err_out;
+ }
+ ptr = hypfs_create_str(sb, hyp_dir, "type", "LPAR Hypervisor");
+ if (IS_ERR(ptr)) {
+ rc = PTR_ERR(ptr);
+ goto err_out;
+ }
+ rc = 0;
+
+err_out:
+ return rc;
+}
diff --git a/arch/s390/hypfs/hypfs_diag.h b/arch/s390/hypfs/hypfs_diag.h
new file mode 100644
index 000000000000..793dea6b9bb6
--- /dev/null
+++ b/arch/s390/hypfs/hypfs_diag.h
@@ -0,0 +1,16 @@
+/*
+ * fs/hypfs/hypfs_diag.h
+ * Hypervisor filesystem for Linux on s390.
+ *
+ * Copyright (C) IBM Corp. 2006
+ * Author(s): Michael Holzheu <holzheu@de.ibm.com>
+ */
+
+#ifndef _HYPFS_DIAG_H_
+#define _HYPFS_DIAG_H_
+
+extern int hypfs_diag_init(void);
+extern void hypfs_diag_exit(void);
+extern int hypfs_diag_create_files(struct super_block *sb, struct dentry *root);
+
+#endif /* _HYPFS_DIAG_H_ */
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
new file mode 100644
index 000000000000..18c091925ea5
--- /dev/null
+++ b/arch/s390/hypfs/inode.c
@@ -0,0 +1,491 @@
+/*
+ * fs/hypfs/inode.c
+ * Hypervisor filesystem for Linux on s390.
+ *
+ * Copyright (C) IBM Corp. 2006
+ * Author(s): Michael Holzheu <holzheu@de.ibm.com>
+ */
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/namei.h>
+#include <linux/vfs.h>
+#include <linux/pagemap.h>
+#include <linux/gfp.h>
+#include <linux/time.h>
+#include <linux/parser.h>
+#include <linux/sysfs.h>
+#include <linux/module.h>
+#include <asm/ebcdic.h>
+#include "hypfs.h"
+#include "hypfs_diag.h"
+
+#define HYPFS_MAGIC 0x687970 /* ASCII 'hyp' */
+#define TMP_SIZE 64 /* size of temporary buffers */
+
+static struct dentry *hypfs_create_update_file(struct super_block *sb,
+ struct dentry *dir);
+
+struct hypfs_sb_info {
+ uid_t uid; /* uid used for files and dirs */
+ gid_t gid; /* gid used for files and dirs */
+ struct dentry *update_file; /* file to trigger update */
+ time_t last_update; /* last update time in secs since 1970 */
+ struct mutex lock; /* lock to protect update process */
+};
+
+static struct file_operations hypfs_file_ops;
+static struct file_system_type hypfs_type;
+static struct super_operations hypfs_s_ops;
+
+/* start of list of all dentries, which have to be deleted on update */
+static struct dentry *hypfs_last_dentry;
+
+static void hypfs_update_update(struct super_block *sb)
+{
+ struct hypfs_sb_info *sb_info = sb->s_fs_info;
+ struct inode *inode = sb_info->update_file->d_inode;
+
+ sb_info->last_update = get_seconds();
+ inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+}
+
+/* directory tree removal functions */
+
+static void hypfs_add_dentry(struct dentry *dentry)
+{
+ dentry->d_fsdata = hypfs_last_dentry;
+ hypfs_last_dentry = dentry;
+}
+
+static void hypfs_remove(struct dentry *dentry)
+{
+ struct dentry *parent;
+
+ parent = dentry->d_parent;
+ if (S_ISDIR(dentry->d_inode->i_mode))
+ simple_rmdir(parent->d_inode, dentry);
+ else
+ simple_unlink(parent->d_inode, dentry);
+ d_delete(dentry);
+ dput(dentry);
+}
+
+static void hypfs_delete_tree(struct dentry *root)
+{
+ while (hypfs_last_dentry) {
+ struct dentry *next_dentry;
+ next_dentry = hypfs_last_dentry->d_fsdata;
+ hypfs_remove(hypfs_last_dentry);
+ hypfs_last_dentry = next_dentry;
+ }
+}
+
+static struct inode *hypfs_make_inode(struct super_block *sb, int mode)
+{
+ struct inode *ret = new_inode(sb);
+
+ if (ret) {
+ struct hypfs_sb_info *hypfs_info = sb->s_fs_info;
+ ret->i_mode = mode;
+ ret->i_uid = hypfs_info->uid;
+ ret->i_gid = hypfs_info->gid;
+ ret->i_blksize = PAGE_CACHE_SIZE;
+ ret->i_blocks = 0;
+ ret->i_atime = ret->i_mtime = ret->i_ctime = CURRENT_TIME;
+ if (mode & S_IFDIR)
+ ret->i_nlink = 2;
+ else
+ ret->i_nlink = 1;
+ }
+ return ret;
+}
+
+static void hypfs_drop_inode(struct inode *inode)
+{
+ kfree(inode->u.generic_ip);
+ generic_delete_inode(inode);
+}
+
+static int hypfs_open(struct inode *inode, struct file *filp)
+{
+ char *data = filp->f_dentry->d_inode->u.generic_ip;
+ struct hypfs_sb_info *fs_info;
+
+ if (filp->f_mode & FMODE_WRITE) {
+ if (!(inode->i_mode & S_IWUGO))
+ return -EACCES;
+ }
+ if (filp->f_mode & FMODE_READ) {
+ if (!(inode->i_mode & S_IRUGO))
+ return -EACCES;
+ }
+
+ fs_info = inode->i_sb->s_fs_info;
+ if(data) {
+ mutex_lock(&fs_info->lock);
+ filp->private_data = kstrdup(data, GFP_KERNEL);
+ if (!filp->private_data) {
+ mutex_unlock(&fs_info->lock);
+ return -ENOMEM;
+ }
+ mutex_unlock(&fs_info->lock);
+ }
+ return 0;
+}
+
+static ssize_t hypfs_aio_read(struct kiocb *iocb, __user char *buf,
+ size_t count, loff_t offset)
+{
+ char *data;
+ size_t len;
+ struct file *filp = iocb->ki_filp;
+
+ data = filp->private_data;
+ len = strlen(data);
+ if (offset > len) {
+ count = 0;
+ goto out;
+ }
+ if (count > len - offset)
+ count = len - offset;
+ if (copy_to_user(buf, data + offset, count)) {
+ count = -EFAULT;
+ goto out;
+ }
+ iocb->ki_pos += count;
+ file_accessed(filp);
+out:
+ return count;
+}
+static ssize_t hypfs_aio_write(struct kiocb *iocb, const char __user *buf,
+ size_t count, loff_t pos)
+{
+ int rc;
+ struct super_block *sb;
+ struct hypfs_sb_info *fs_info;
+
+ sb = iocb->ki_filp->f_dentry->d_inode->i_sb;
+ fs_info = sb->s_fs_info;
+ /*
+ * Currently we only allow one update per second for two reasons:
+ * 1. diag 204 is VERY expensive
+ * 2. If several processes do updates in parallel and then read the
+ * hypfs data, the likelihood of collisions is reduced, if we restrict
+ * the minimum update interval. A collision occurs, if during the
+ * data gathering of one process another process triggers an update
+ * If the first process wants to ensure consistent data, it has
+ * to restart data collection in this case.
+ */
+ mutex_lock(&fs_info->lock);
+ if (fs_info->last_update == get_seconds()) {
+ rc = -EBUSY;
+ goto out;
+ }
+ hypfs_delete_tree(sb->s_root);
+ rc = hypfs_diag_create_files(sb, sb->s_root);
+ if (rc) {
+ printk(KERN_ERR "hypfs: Update failed\n");
+ hypfs_delete_tree(sb->s_root);
+ goto out;
+ }
+ hypfs_update_update(sb);
+ rc = count;
+out:
+ mutex_unlock(&fs_info->lock);
+ return rc;
+}
+
+static int hypfs_release(struct inode *inode, struct file *filp)
+{
+ kfree(filp->private_data);
+ return 0;
+}
+
+enum { opt_uid, opt_gid, opt_err };
+
+static match_table_t hypfs_tokens = {
+ {opt_uid, "uid=%u"},
+ {opt_gid, "gid=%u"},
+ {opt_err, NULL}
+};
+
+static int hypfs_parse_options(char *options, struct super_block *sb)
+{
+ char *str;
+ substring_t args[MAX_OPT_ARGS];
+
+ if (!options)
+ return 0;
+ while ((str = strsep(&options, ",")) != NULL) {
+ int token, option;
+ struct hypfs_sb_info *hypfs_info = sb->s_fs_info;
+
+ if (!*str)
+ continue;
+ token = match_token(str, hypfs_tokens, args);
+ switch (token) {
+ case opt_uid:
+ if (match_int(&args[0], &option))
+ return -EINVAL;
+ hypfs_info->uid = option;
+ break;
+ case opt_gid:
+ if (match_int(&args[0], &option))
+ return -EINVAL;
+ hypfs_info->gid = option;
+ break;
+ case opt_err:
+ default:
+ printk(KERN_ERR "hypfs: Unrecognized mount option "
+ "\"%s\" or missing value\n", str);
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static int hypfs_fill_super(struct super_block *sb, void *data, int silent)
+{
+ struct inode *root_inode;
+ struct dentry *root_dentry;
+ int rc = 0;
+ struct hypfs_sb_info *sbi;
+
+ sbi = kzalloc(sizeof(struct hypfs_sb_info), GFP_KERNEL);
+ if (!sbi)
+ return -ENOMEM;
+ mutex_init(&sbi->lock);
+ sbi->uid = current->uid;
+ sbi->gid = current->gid;
+ sb->s_fs_info = sbi;
+ sb->s_blocksize = PAGE_CACHE_SIZE;
+ sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+ sb->s_magic = HYPFS_MAGIC;
+ sb->s_op = &hypfs_s_ops;
+ if (hypfs_parse_options(data, sb)) {
+ rc = -EINVAL;
+ goto err_alloc;
+ }
+ root_inode = hypfs_make_inode(sb, S_IFDIR | 0755);
+ if (!root_inode) {
+ rc = -ENOMEM;
+ goto err_alloc;
+ }
+ root_inode->i_op = &simple_dir_inode_operations;
+ root_inode->i_fop = &simple_dir_operations;
+ root_dentry = d_alloc_root(root_inode);
+ if (!root_dentry) {
+ iput(root_inode);
+ rc = -ENOMEM;
+ goto err_alloc;
+ }
+ rc = hypfs_diag_create_files(sb, root_dentry);
+ if (rc)
+ goto err_tree;
+ sbi->update_file = hypfs_create_update_file(sb, root_dentry);
+ if (IS_ERR(sbi->update_file)) {
+ rc = PTR_ERR(sbi->update_file);
+ goto err_tree;
+ }
+ hypfs_update_update(sb);
+ sb->s_root = root_dentry;
+ return 0;
+
+err_tree:
+ hypfs_delete_tree(root_dentry);
+ d_genocide(root_dentry);
+ dput(root_dentry);
+err_alloc:
+ kfree(sbi);
+ return rc;
+}
+
+static int hypfs_get_super(struct file_system_type *fst, int flags,
+ const char *devname, void *data, struct vfsmount *mnt)
+{
+ return get_sb_single(fst, flags, data, hypfs_fill_super, mnt);
+}
+
+static void hypfs_kill_super(struct super_block *sb)
+{
+ struct hypfs_sb_info *sb_info = sb->s_fs_info;
+
+ hypfs_delete_tree(sb->s_root);
+ hypfs_remove(sb_info->update_file);
+ kfree(sb->s_fs_info);
+ sb->s_fs_info = NULL;
+ kill_litter_super(sb);
+}
+
+static struct dentry *hypfs_create_file(struct super_block *sb,
+ struct dentry *parent, const char *name,
+ char *data, mode_t mode)
+{
+ struct dentry *dentry;
+ struct inode *inode;
+ struct qstr qname;
+
+ qname.name = name;
+ qname.len = strlen(name);
+ qname.hash = full_name_hash(name, qname.len);
+ dentry = lookup_one_len(name, parent, strlen(name));
+ if (IS_ERR(dentry))
+ return ERR_PTR(-ENOMEM);
+ inode = hypfs_make_inode(sb, mode);
+ if (!inode) {
+ dput(dentry);
+ return ERR_PTR(-ENOMEM);
+ }
+ if (mode & S_IFREG) {
+ inode->i_fop = &hypfs_file_ops;
+ if (data)
+ inode->i_size = strlen(data);
+ else
+ inode->i_size = 0;
+ } else if (mode & S_IFDIR) {
+ inode->i_op = &simple_dir_inode_operations;
+ inode->i_fop = &simple_dir_operations;
+ parent->d_inode->i_nlink++;
+ } else
+ BUG();
+ inode->u.generic_ip = data;
+ d_instantiate(dentry, inode);
+ dget(dentry);
+ return dentry;
+}
+
+struct dentry *hypfs_mkdir(struct super_block *sb, struct dentry *parent,
+ const char *name)
+{
+ struct dentry *dentry;
+
+ dentry = hypfs_create_file(sb, parent, name, NULL, S_IFDIR | DIR_MODE);
+ if (IS_ERR(dentry))
+ return dentry;
+ hypfs_add_dentry(dentry);
+ parent->d_inode->i_nlink++;
+ return dentry;
+}
+
+static struct dentry *hypfs_create_update_file(struct super_block *sb,
+ struct dentry *dir)
+{
+ struct dentry *dentry;
+
+ dentry = hypfs_create_file(sb, dir, "update", NULL,
+ S_IFREG | UPDATE_FILE_MODE);
+ /*
+ * We do not put the update file on the 'delete' list with
+ * hypfs_add_dentry(), since it should not be removed when the tree
+ * is updated.
+ */
+ return dentry;
+}
+
+struct dentry *hypfs_create_u64(struct super_block *sb, struct dentry *dir,
+ const char *name, __u64 value)
+{
+ char *buffer;
+ char tmp[TMP_SIZE];
+ struct dentry *dentry;
+
+ snprintf(tmp, TMP_SIZE, "%lld\n", (unsigned long long int)value);
+ buffer = kstrdup(tmp, GFP_KERNEL);
+ if (!buffer)
+ return ERR_PTR(-ENOMEM);
+ dentry =
+ hypfs_create_file(sb, dir, name, buffer, S_IFREG | REG_FILE_MODE);
+ if (IS_ERR(dentry)) {
+ kfree(buffer);
+ return ERR_PTR(-ENOMEM);
+ }
+ hypfs_add_dentry(dentry);
+ return dentry;
+}
+
+struct dentry *hypfs_create_str(struct super_block *sb, struct dentry *dir,
+ const char *name, char *string)
+{
+ char *buffer;
+ struct dentry *dentry;
+
+ buffer = kmalloc(strlen(string) + 2, GFP_KERNEL);
+ if (!buffer)
+ return ERR_PTR(-ENOMEM);
+ sprintf(buffer, "%s\n", string);
+ dentry =
+ hypfs_create_file(sb, dir, name, buffer, S_IFREG | REG_FILE_MODE);
+ if (IS_ERR(dentry)) {
+ kfree(buffer);
+ return ERR_PTR(-ENOMEM);
+ }
+ hypfs_add_dentry(dentry);
+ return dentry;
+}
+
+static struct file_operations hypfs_file_ops = {
+ .open = hypfs_open,
+ .release = hypfs_release,
+ .read = do_sync_read,
+ .write = do_sync_write,
+ .aio_read = hypfs_aio_read,
+ .aio_write = hypfs_aio_write,
+};
+
+static struct file_system_type hypfs_type = {
+ .owner = THIS_MODULE,
+ .name = "s390_hypfs",
+ .get_sb = hypfs_get_super,
+ .kill_sb = hypfs_kill_super
+};
+
+static struct super_operations hypfs_s_ops = {
+ .statfs = simple_statfs,
+ .drop_inode = hypfs_drop_inode,
+};
+
+static decl_subsys(s390, NULL, NULL);
+
+static int __init hypfs_init(void)
+{
+ int rc;
+
+ if (MACHINE_IS_VM)
+ return -ENODATA;
+ if (hypfs_diag_init()) {
+ rc = -ENODATA;
+ goto fail_diag;
+ }
+ kset_set_kset_s(&s390_subsys, hypervisor_subsys);
+ rc = subsystem_register(&s390_subsys);
+ if (rc)
+ goto fail_sysfs;
+ rc = register_filesystem(&hypfs_type);
+ if (rc)
+ goto fail_filesystem;
+ return 0;
+
+fail_filesystem:
+ subsystem_unregister(&s390_subsys);
+fail_sysfs:
+ hypfs_diag_exit();
+fail_diag:
+ printk(KERN_ERR "hypfs: Initialization failed with rc = %i.\n", rc);
+ return rc;
+}
+
+static void __exit hypfs_exit(void)
+{
+ hypfs_diag_exit();
+ unregister_filesystem(&hypfs_type);
+ subsystem_unregister(&s390_subsys);
+}
+
+module_init(hypfs_init)
+module_exit(hypfs_exit)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Michael Holzheu <holzheu@de.ibm.com>");
+MODULE_DESCRIPTION("s390 Hypervisor Filesystem");
diff --git a/arch/sh64/kernel/signal.c b/arch/sh64/kernel/signal.c
index 3ea8929e483b..9e2ffc45c0e0 100644
--- a/arch/sh64/kernel/signal.c
+++ b/arch/sh64/kernel/signal.c
@@ -407,7 +407,7 @@ setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
static inline void __user *
get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size)
{
- if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! on_sig_stack(sp))
+ if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! sas_ss_flags(sp))
sp = current->sas_ss_sp + current->sas_ss_size;
return (void __user *)((sp - frame_size) & -8ul);
diff --git a/arch/sparc64/solaris/fs.c b/arch/sparc64/solaris/fs.c
index 4885ca6cbc77..0f0eb6aa1c40 100644
--- a/arch/sparc64/solaris/fs.c
+++ b/arch/sparc64/solaris/fs.c
@@ -356,7 +356,7 @@ static int report_statvfs(struct vfsmount *mnt, struct inode *inode, u32 buf)
int error;
struct sol_statvfs __user *ss = A(buf);
- error = vfs_statfs(mnt->mnt_sb, &s);
+ error = vfs_statfs(mnt->mnt_root, &s);
if (!error) {
const char *p = mnt->mnt_sb->s_type->name;
int i = 0;
@@ -392,7 +392,7 @@ static int report_statvfs64(struct vfsmount *mnt, struct inode *inode, u32 buf)
int error;
struct sol_statvfs64 __user *ss = A(buf);
- error = vfs_statfs(mnt->mnt_sb, &s);
+ error = vfs_statfs(mnt->mnt_root, &s);
if (!error) {
const char *p = mnt->mnt_sb->s_type->name;
int i = 0;
diff --git a/arch/um/Kconfig.debug b/arch/um/Kconfig.debug
index 5681a8bd370b..bab51d619173 100644
--- a/arch/um/Kconfig.debug
+++ b/arch/um/Kconfig.debug
@@ -49,7 +49,6 @@ config GCOV
config SYSCALL_DEBUG
bool "Enable system call debugging"
- default N
depends on DEBUG_INFO
help
This adds some system debugging to UML, including keeping a ring buffer
diff --git a/arch/um/sys-ppc/misc.S b/arch/um/sys-ppc/misc.S
index 11b7bd768cfd..f0c971db47e4 100644
--- a/arch/um/sys-ppc/misc.S
+++ b/arch/um/sys-ppc/misc.S
@@ -23,14 +23,10 @@
#define CACHE_LINE_SIZE 16
#define LG_CACHE_LINE_SIZE 4
#define MAX_COPY_PREFETCH 1
-#elif !defined(CONFIG_PPC64BRIDGE)
+#else
#define CACHE_LINE_SIZE 32
#define LG_CACHE_LINE_SIZE 5
#define MAX_COPY_PREFETCH 4
-#else
-#define CACHE_LINE_SIZE 128
-#define LG_CACHE_LINE_SIZE 7
-#define MAX_COPY_PREFETCH 1
#endif /* CONFIG_4xx || CONFIG_8xx */
.text
diff --git a/arch/v850/kernel/signal.c b/arch/v850/kernel/signal.c
index 633e4e1b825f..17c2d4359b04 100644
--- a/arch/v850/kernel/signal.c
+++ b/arch/v850/kernel/signal.c
@@ -274,7 +274,7 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size)
/* Default to using normal stack */
unsigned long sp = regs->gpr[GPR_SP];
- if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! on_sig_stack(sp))
+ if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! sas_ss_flags(sp))
sp = current->sas_ss_sp + current->sas_ss_size;
return (void *)((sp - frame_size) & -8UL);
diff --git a/arch/x86_64/Kconfig b/arch/x86_64/Kconfig
index 7d3bc5ac5db0..af44130f0d65 100644
--- a/arch/x86_64/Kconfig
+++ b/arch/x86_64/Kconfig
@@ -299,6 +299,7 @@ config X86_64_ACPI_NUMA
bool "ACPI NUMA detection"
depends on NUMA
select ACPI
+ select PCI
select ACPI_NUMA
default y
help
diff --git a/arch/x86_64/ia32/ia32entry.S b/arch/x86_64/ia32/ia32entry.S
index 5a92fed2d1d5..4ec594ab1a98 100644
--- a/arch/x86_64/ia32/ia32entry.S
+++ b/arch/x86_64/ia32/ia32entry.S
@@ -696,4 +696,5 @@ ia32_sys_call_table:
.quad sys_sync_file_range
.quad sys_tee
.quad compat_sys_vmsplice
+ .quad compat_sys_move_pages
ia32_syscall_end:
diff --git a/arch/x86_64/kernel/acpi/Makefile b/arch/x86_64/kernel/acpi/Makefile
index 4fe97071f297..080b9963f1bc 100644
--- a/arch/x86_64/kernel/acpi/Makefile
+++ b/arch/x86_64/kernel/acpi/Makefile
@@ -4,5 +4,6 @@ obj-$(CONFIG_ACPI_SLEEP) += sleep.o wakeup.o
ifneq ($(CONFIG_ACPI_PROCESSOR),)
obj-y += processor.o
+processor-y := ../../../i386/kernel/acpi/processor.o ../../../i386/kernel/acpi/cstate.o
endif
diff --git a/arch/x86_64/kernel/acpi/processor.c b/arch/x86_64/kernel/acpi/processor.c
deleted file mode 100644
index 3bdc2baa5bb1..000000000000
--- a/arch/x86_64/kernel/acpi/processor.c
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * arch/x86_64/kernel/acpi/processor.c
- *
- * Copyright (C) 2005 Intel Corporation
- * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
- * - Added _PDC for platforms with Intel CPUs
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/acpi.h>
-
-#include <acpi/processor.h>
-#include <asm/acpi.h>
-
-static void init_intel_pdc(struct acpi_processor *pr, struct cpuinfo_x86 *c)
-{
- struct acpi_object_list *obj_list;
- union acpi_object *obj;
- u32 *buf;
-
- /* allocate and initialize pdc. It will be used later. */
- obj_list = kmalloc(sizeof(struct acpi_object_list), GFP_KERNEL);
- if (!obj_list) {
- printk(KERN_ERR "Memory allocation error\n");
- return;
- }
-
- obj = kmalloc(sizeof(union acpi_object), GFP_KERNEL);
- if (!obj) {
- printk(KERN_ERR "Memory allocation error\n");
- kfree(obj_list);
- return;
- }
-
- buf = kmalloc(12, GFP_KERNEL);
- if (!buf) {
- printk(KERN_ERR "Memory allocation error\n");
- kfree(obj);
- kfree(obj_list);
- return;
- }
-
- buf[0] = ACPI_PDC_REVISION_ID;
- buf[1] = 1;
- buf[2] = ACPI_PDC_EST_CAPABILITY_SMP;
-
- obj->type = ACPI_TYPE_BUFFER;
- obj->buffer.length = 12;
- obj->buffer.pointer = (u8 *) buf;
- obj_list->count = 1;
- obj_list->pointer = obj;
- pr->pdc = obj_list;
-
- return;
-}
-
-/* Initialize _PDC data based on the CPU vendor */
-void arch_acpi_processor_init_pdc(struct acpi_processor *pr)
-{
- unsigned int cpu = pr->id;
- struct cpuinfo_x86 *c = cpu_data + cpu;
-
- pr->pdc = NULL;
- if (c->x86_vendor == X86_VENDOR_INTEL && cpu_has(c, X86_FEATURE_EST))
- init_intel_pdc(pr, c);
-
- return;
-}
-
-EXPORT_SYMBOL(arch_acpi_processor_init_pdc);
diff --git a/arch/x86_64/kernel/acpi/sleep.c b/arch/x86_64/kernel/acpi/sleep.c
index 867a0ebee177..091bc79c888f 100644
--- a/arch/x86_64/kernel/acpi/sleep.c
+++ b/arch/x86_64/kernel/acpi/sleep.c
@@ -35,6 +35,8 @@
#include <linux/pci.h>
#include <linux/bootmem.h>
#include <linux/acpi.h>
+#include <linux/cpumask.h>
+
#include <asm/mpspec.h>
#include <asm/io.h>
#include <asm/apic.h>
@@ -66,7 +68,8 @@ static void init_low_mapping(void)
pgd_t *slot0 = pgd_offset(current->mm, 0UL);
low_ptr = *slot0;
set_pgd(slot0, *pgd_offset(current->mm, PAGE_OFFSET));
- flush_tlb_all();
+ WARN_ON(num_online_cpus() != 1);
+ local_flush_tlb();
}
/**
@@ -92,7 +95,7 @@ int acpi_save_state_mem(void)
void acpi_restore_state_mem(void)
{
set_pgd(pgd_offset(current->mm, 0UL), low_ptr);
- flush_tlb_all();
+ local_flush_tlb();
}
/**
diff --git a/arch/x86_64/kernel/apic.c b/arch/x86_64/kernel/apic.c
index 100a30c40044..29ef99001e05 100644
--- a/arch/x86_64/kernel/apic.c
+++ b/arch/x86_64/kernel/apic.c
@@ -51,7 +51,7 @@ int disable_apic_timer __initdata;
static cpumask_t timer_interrupt_broadcast_ipi_mask;
/* Using APIC to generate smp_local_timer_interrupt? */
-int using_apic_timer = 0;
+int using_apic_timer __read_mostly = 0;
static void apic_pm_activate(void);
diff --git a/arch/x86_64/kernel/i387.c b/arch/x86_64/kernel/i387.c
index a5d7e16b928e..44ddb1ec808d 100644
--- a/arch/x86_64/kernel/i387.c
+++ b/arch/x86_64/kernel/i387.c
@@ -24,7 +24,7 @@
#include <asm/ptrace.h>
#include <asm/uaccess.h>
-unsigned int mxcsr_feature_mask = 0xffffffff;
+unsigned int mxcsr_feature_mask __read_mostly = 0xffffffff;
void mxcsr_feature_mask_init(void)
{
diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c
index 655b9192eeb3..b8d5116d7371 100644
--- a/arch/x86_64/kernel/setup.c
+++ b/arch/x86_64/kernel/setup.c
@@ -47,6 +47,7 @@
#include <linux/dmi.h>
#include <linux/dma-mapping.h>
#include <linux/ctype.h>
+#include <linux/suspend.h>
#include <asm/mtrr.h>
#include <asm/uaccess.h>
@@ -595,6 +596,100 @@ static void discover_ebda(void)
ebda_size = 64*1024;
}
+#ifdef CONFIG_SOFTWARE_SUSPEND
+static void __init mark_nosave_page_range(unsigned long start, unsigned long end)
+{
+ struct page *page;
+ while (start <= end) {
+ page = pfn_to_page(start);
+ SetPageNosave(page);
+ start++;
+ }
+}
+
+static void __init e820_nosave_reserved_pages(void)
+{
+ int i;
+ unsigned long r_start = 0, r_end = 0;
+
+ /* Assume e820 map is sorted */
+ for (i = 0; i < e820.nr_map; i++) {
+ struct e820entry *ei = &e820.map[i];
+ unsigned long start, end;
+
+ start = round_down(ei->addr, PAGE_SIZE);
+ end = round_up(ei->addr + ei->size, PAGE_SIZE);
+ if (start >= end)
+ continue;
+ if (ei->type == E820_RESERVED)
+ continue;
+ r_end = start>>PAGE_SHIFT;
+ /* swsusp ignores invalid pfn, ignore these pages here */
+ if (r_end > end_pfn)
+ r_end = end_pfn;
+ if (r_end > r_start)
+ mark_nosave_page_range(r_start, r_end-1);
+ if (r_end >= end_pfn)
+ break;
+ r_start = end>>PAGE_SHIFT;
+ }
+}
+
+static void __init e820_save_acpi_pages(void)
+{
+ int i;
+
+ /* Assume e820 map is sorted */
+ for (i = 0; i < e820.nr_map; i++) {
+ struct e820entry *ei = &e820.map[i];
+ unsigned long start, end;
+
+ start = ei->addr, PAGE_SIZE;
+ end = ei->addr + ei->size;
+ if (start >= end)
+ continue;
+ if (ei->type != E820_ACPI && ei->type != E820_NVS)
+ continue;
+ /*
+ * If the region is below end_pfn, it will be
+ * saved/restored by swsusp follow 'RAM' type.
+ */
+ if (start < (end_pfn << PAGE_SHIFT))
+ start = end_pfn << PAGE_SHIFT;
+ if (end > start)
+ swsusp_add_arch_pages(start, end);
+ }
+}
+
+extern char __start_rodata, __end_rodata;
+/*
+ * BIOS reserved region/hole - no save/restore
+ * ACPI NVS - save/restore
+ * ACPI Data - this is a little tricky, the mem could be used by OS after OS
+ * reads tables from the region, but anyway save/restore the memory hasn't any
+ * side effect and Linux runtime module load/unload might use it.
+ * kernel rodata - no save/restore (kernel rodata isn't changed)
+ */
+static int __init mark_nosave_pages(void)
+{
+ unsigned long pfn_start, pfn_end;
+
+ /* BIOS reserved regions & holes */
+ e820_nosave_reserved_pages();
+
+ /* kernel rodata */
+ pfn_start = round_up(__pa_symbol(&__start_rodata), PAGE_SIZE) >> PAGE_SHIFT;
+ pfn_end = round_down(__pa_symbol(&__end_rodata), PAGE_SIZE) >> PAGE_SHIFT;
+ mark_nosave_page_range(pfn_start, pfn_end-1);
+
+ /* record ACPI Data/NVS as saveable */
+ e820_save_acpi_pages();
+
+ return 0;
+}
+core_initcall(mark_nosave_pages);
+#endif
+
void __init setup_arch(char **cmdline_p)
{
unsigned long kernel_end;
diff --git a/arch/x86_64/mm/srat.c b/arch/x86_64/mm/srat.c
index 474df22c6ed2..502fce65e96a 100644
--- a/arch/x86_64/mm/srat.c
+++ b/arch/x86_64/mm/srat.c
@@ -30,7 +30,6 @@
static struct acpi_table_slit *acpi_slit;
static nodemask_t nodes_parsed __initdata;
-static nodemask_t nodes_found __initdata;
static struct bootnode nodes[MAX_NUMNODES] __initdata;
static struct bootnode nodes_add[MAX_NUMNODES] __initdata;
static int found_add_area __initdata;
@@ -38,33 +37,14 @@ int hotadd_percent __initdata = 0;
#ifndef RESERVE_HOTADD
#define hotadd_percent 0 /* Ignore all settings */
#endif
-static u8 pxm2node[256] = { [0 ... 255] = 0xff };
/* Too small nodes confuse the VM badly. Usually they result
from BIOS bugs. */
#define NODE_MIN_SIZE (4*1024*1024)
-static int node_to_pxm(int n);
-
-int pxm_to_node(int pxm)
-{
- if ((unsigned)pxm >= 256)
- return -1;
- /* Extend 0xff to (int)-1 */
- return (signed char)pxm2node[pxm];
-}
-
static __init int setup_node(int pxm)
{
- unsigned node = pxm2node[pxm];
- if (node == 0xff) {
- if (nodes_weight(nodes_found) >= MAX_NUMNODES)
- return -1;
- node = first_unset_node(nodes_found);
- node_set(node, nodes_found);
- pxm2node[pxm] = node;
- }
- return pxm2node[pxm];
+ return acpi_map_pxm_to_node(pxm);
}
static __init int conflicting_nodes(unsigned long start, unsigned long end)
@@ -440,17 +420,6 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end)
return 0;
}
-static int node_to_pxm(int n)
-{
- int i;
- if (pxm2node[n] == n)
- return n;
- for (i = 0; i < 256; i++)
- if (pxm2node[i] == n)
- return i;
- return 0;
-}
-
void __init srat_reserve_add_area(int nodeid)
{
if (found_add_area && nodes_add[nodeid].end) {
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index dbeb3504c3c8..848f173db257 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -34,10 +34,6 @@ config GENERIC_HARDIRQS
bool
default y
-config RWSEM_GENERIC_SPINLOCK
- bool
- default y
-
source "init/Kconfig"
menu "Processor type and features"
diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S
index 5c018c503dfa..89e409e9e0de 100644
--- a/arch/xtensa/kernel/entry.S
+++ b/arch/xtensa/kernel/entry.S
@@ -1102,7 +1102,7 @@ ENTRY(fast_syscall_sysxtensa)
s32i a7, a2, PT_AREG7
movi a7, 4 # sizeof(unsigned int)
- verify_area a3, a7, a0, a2, .Leac
+ access_ok a0, a3, a7, a2, .Leac
_beqi a6, SYSXTENSA_ATOMIC_SET, .Lset
_beqi a6, SYSXTENSA_ATOMIC_EXG_ADD, .Lexg
diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c
index e252b61e45a5..c494f0826fc5 100644
--- a/arch/xtensa/kernel/signal.c
+++ b/arch/xtensa/kernel/signal.c
@@ -104,7 +104,7 @@ sys_sigaction(int sig, const struct old_sigaction *act,
if (act) {
old_sigset_t mask;
- if (verify_area(VERIFY_READ, act, sizeof(*act)) ||
+ if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
__get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
__get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
return -EFAULT;
@@ -116,7 +116,7 @@ sys_sigaction(int sig, const struct old_sigaction *act,
ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
if (!ret && oact) {
- if (verify_area(VERIFY_WRITE, oact, sizeof(*oact)) ||
+ if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
__put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
__put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
return -EFAULT;
@@ -236,7 +236,7 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext *sc)
err |= __copy_from_user (regs->areg, sc->sc_areg, XCHAL_NUM_AREGS*4);
err |= __get_user(buf, &sc->sc_cpstate);
if (buf) {
- if (verify_area(VERIFY_READ, buf, sizeof(*buf)))
+ if (!access_ok(VERIFY_READ, buf, sizeof(*buf)))
goto badframe;
err |= restore_cpextra(buf);
}
@@ -357,7 +357,7 @@ asmlinkage int sys_sigreturn(struct pt_regs *regs)
if (regs->depc > 64)
panic ("Double exception sys_sigreturn\n");
- if (verify_area(VERIFY_READ, frame, sizeof(*frame)))
+ if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
goto badframe;
if (__get_user(set.sig[0], &frame->sc.oldmask)
@@ -394,7 +394,7 @@ asmlinkage int sys_rt_sigreturn(struct pt_regs *regs)
return 0;
}
- if (verify_area(VERIFY_READ, frame, sizeof(*frame)))
+ if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
goto badframe;
if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
@@ -433,7 +433,7 @@ badframe:
static inline void *
get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size)
{
- if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! on_sig_stack(sp))
+ if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! sas_ss_flags(sp))
sp = current->sas_ss_sp + current->sas_ss_size;
return (void *)((sp - frame_size) & -16ul);
diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched
index f3b7753aac99..48d090e266fc 100644
--- a/block/Kconfig.iosched
+++ b/block/Kconfig.iosched
@@ -40,7 +40,7 @@ config IOSCHED_CFQ
choice
prompt "Default I/O scheduler"
- default DEFAULT_AS
+ default DEFAULT_CFQ
help
Select the I/O scheduler which will be used by default for all
block devices.
diff --git a/block/as-iosched.c b/block/as-iosched.c
index 0c750393be4a..1ec5df466708 100644
--- a/block/as-iosched.c
+++ b/block/as-iosched.c
@@ -96,7 +96,7 @@ struct as_data {
struct as_rq *next_arq[2]; /* next in sort order */
sector_t last_sector[2]; /* last REQ_SYNC & REQ_ASYNC sectors */
- struct list_head *hash; /* request hash */
+ struct hlist_head *hash; /* request hash */
unsigned long exit_prob; /* probability a task will exit while
being waited on */
@@ -165,8 +165,7 @@ struct as_rq {
/*
* request hash, key is the ending offset (for back merge lookup)
*/
- struct list_head hash;
- unsigned int on_hash;
+ struct hlist_node hash;
/*
* expire fifo
@@ -282,17 +281,15 @@ static const int as_hash_shift = 6;
#define AS_HASH_FN(sec) (hash_long(AS_HASH_BLOCK((sec)), as_hash_shift))
#define AS_HASH_ENTRIES (1 << as_hash_shift)
#define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors)
-#define list_entry_hash(ptr) list_entry((ptr), struct as_rq, hash)
static inline void __as_del_arq_hash(struct as_rq *arq)
{
- arq->on_hash = 0;
- list_del_init(&arq->hash);
+ hlist_del_init(&arq->hash);
}
static inline void as_del_arq_hash(struct as_rq *arq)
{
- if (arq->on_hash)
+ if (!hlist_unhashed(&arq->hash))
__as_del_arq_hash(arq);
}
@@ -300,10 +297,9 @@ static void as_add_arq_hash(struct as_data *ad, struct as_rq *arq)
{
struct request *rq = arq->request;
- BUG_ON(arq->on_hash);
+ BUG_ON(!hlist_unhashed(&arq->hash));
- arq->on_hash = 1;
- list_add(&arq->hash, &ad->hash[AS_HASH_FN(rq_hash_key(rq))]);
+ hlist_add_head(&arq->hash, &ad->hash[AS_HASH_FN(rq_hash_key(rq))]);
}
/*
@@ -312,31 +308,29 @@ static void as_add_arq_hash(struct as_data *ad, struct as_rq *arq)
static inline void as_hot_arq_hash(struct as_data *ad, struct as_rq *arq)
{
struct request *rq = arq->request;
- struct list_head *head = &ad->hash[AS_HASH_FN(rq_hash_key(rq))];
+ struct hlist_head *head = &ad->hash[AS_HASH_FN(rq_hash_key(rq))];
- if (!arq->on_hash) {
+ if (hlist_unhashed(&arq->hash)) {
WARN_ON(1);
return;
}
- if (arq->hash.prev != head) {
- list_del(&arq->hash);
- list_add(&arq->hash, head);
+ if (&arq->hash != head->first) {
+ hlist_del(&arq->hash);
+ hlist_add_head(&arq->hash, head);
}
}
static struct request *as_find_arq_hash(struct as_data *ad, sector_t offset)
{
- struct list_head *hash_list = &ad->hash[AS_HASH_FN(offset)];
- struct list_head *entry, *next = hash_list->next;
+ struct hlist_head *hash_list = &ad->hash[AS_HASH_FN(offset)];
+ struct hlist_node *entry, *next;
+ struct as_rq *arq;
- while ((entry = next) != hash_list) {
- struct as_rq *arq = list_entry_hash(entry);
+ hlist_for_each_entry_safe(arq, entry, next, hash_list, hash) {
struct request *__rq = arq->request;
- next = entry->next;
-
- BUG_ON(!arq->on_hash);
+ BUG_ON(hlist_unhashed(&arq->hash));
if (!rq_mergeable(__rq)) {
as_del_arq_hash(arq);
@@ -353,9 +347,6 @@ static struct request *as_find_arq_hash(struct as_data *ad, sector_t offset)
/*
* rb tree support functions
*/
-#define RB_EMPTY(root) ((root)->rb_node == NULL)
-#define ON_RB(node) (rb_parent(node) != node)
-#define RB_CLEAR(node) (rb_set_parent(node, node))
#define rb_entry_arq(node) rb_entry((node), struct as_rq, rb_node)
#define ARQ_RB_ROOT(ad, arq) (&(ad)->sort_list[(arq)->is_sync])
#define rq_rb_key(rq) (rq)->sector
@@ -424,13 +415,13 @@ static void as_add_arq_rb(struct as_data *ad, struct as_rq *arq)
static inline void as_del_arq_rb(struct as_data *ad, struct as_rq *arq)
{
- if (!ON_RB(&arq->rb_node)) {
+ if (!RB_EMPTY_NODE(&arq->rb_node)) {
WARN_ON(1);
return;
}
rb_erase(&arq->rb_node, ARQ_RB_ROOT(ad, arq));
- RB_CLEAR(&arq->rb_node);
+ RB_CLEAR_NODE(&arq->rb_node);
}
static struct request *
@@ -551,7 +542,7 @@ static struct as_rq *as_find_next_arq(struct as_data *ad, struct as_rq *last)
struct rb_node *rbprev = rb_prev(&last->rb_node);
struct as_rq *arq_next, *arq_prev;
- BUG_ON(!ON_RB(&last->rb_node));
+ BUG_ON(!RB_EMPTY_NODE(&last->rb_node));
if (rbprev)
arq_prev = rb_entry_arq(rbprev);
@@ -1128,7 +1119,7 @@ static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq)
struct request *rq = arq->request;
const int data_dir = arq->is_sync;
- BUG_ON(!ON_RB(&arq->rb_node));
+ BUG_ON(!RB_EMPTY_NODE(&arq->rb_node));
as_antic_stop(ad);
ad->antic_status = ANTIC_OFF;
@@ -1253,7 +1244,7 @@ static int as_dispatch_request(request_queue_t *q, int force)
*/
if (reads) {
- BUG_ON(RB_EMPTY(&ad->sort_list[REQ_SYNC]));
+ BUG_ON(RB_EMPTY_ROOT(&ad->sort_list[REQ_SYNC]));
if (writes && ad->batch_data_dir == REQ_SYNC)
/*
@@ -1277,7 +1268,7 @@ static int as_dispatch_request(request_queue_t *q, int force)
if (writes) {
dispatch_writes:
- BUG_ON(RB_EMPTY(&ad->sort_list[REQ_ASYNC]));
+ BUG_ON(RB_EMPTY_ROOT(&ad->sort_list[REQ_ASYNC]));
if (ad->batch_data_dir == REQ_SYNC) {
ad->changed_batch = 1;
@@ -1345,7 +1336,7 @@ static void as_add_request(request_queue_t *q, struct request *rq)
arq->state = AS_RQ_NEW;
if (rq_data_dir(arq->request) == READ
- || current->flags&PF_SYNCWRITE)
+ || (arq->request->flags & REQ_RW_SYNC))
arq->is_sync = 1;
else
arq->is_sync = 0;
@@ -1597,12 +1588,11 @@ static int as_set_request(request_queue_t *q, struct request *rq,
if (arq) {
memset(arq, 0, sizeof(*arq));
- RB_CLEAR(&arq->rb_node);
+ RB_CLEAR_NODE(&arq->rb_node);
arq->request = rq;
arq->state = AS_RQ_PRESCHED;
arq->io_context = NULL;
- INIT_LIST_HEAD(&arq->hash);
- arq->on_hash = 0;
+ INIT_HLIST_NODE(&arq->hash);
INIT_LIST_HEAD(&arq->fifo);
rq->elevator_private = arq;
return 0;
@@ -1662,7 +1652,7 @@ static void *as_init_queue(request_queue_t *q, elevator_t *e)
ad->q = q; /* Identify what queue the data belongs to */
- ad->hash = kmalloc_node(sizeof(struct list_head)*AS_HASH_ENTRIES,
+ ad->hash = kmalloc_node(sizeof(struct hlist_head)*AS_HASH_ENTRIES,
GFP_KERNEL, q->node);
if (!ad->hash) {
kfree(ad);
@@ -1684,7 +1674,7 @@ static void *as_init_queue(request_queue_t *q, elevator_t *e)
INIT_WORK(&ad->antic_work, as_work_handler, q);
for (i = 0; i < AS_HASH_ENTRIES; i++)
- INIT_LIST_HEAD(&ad->hash[i]);
+ INIT_HLIST_HEAD(&ad->hash[i]);
INIT_LIST_HEAD(&ad->fifo_list[REQ_SYNC]);
INIT_LIST_HEAD(&ad->fifo_list[REQ_ASYNC]);
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index e2e6ad0a158e..e25223e147a2 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -26,7 +26,7 @@ static const int cfq_back_penalty = 2; /* penalty of a backwards seek */
static const int cfq_slice_sync = HZ / 10;
static int cfq_slice_async = HZ / 25;
static const int cfq_slice_async_rq = 2;
-static int cfq_slice_idle = HZ / 70;
+static int cfq_slice_idle = HZ / 125;
#define CFQ_IDLE_GRACE (HZ / 10)
#define CFQ_SLICE_SCALE (5)
@@ -60,11 +60,6 @@ static DEFINE_SPINLOCK(cfq_exit_lock);
/*
* rb-tree defines
*/
-#define RB_EMPTY(node) ((node)->rb_node == NULL)
-#define RB_CLEAR(node) do { \
- memset(node, 0, sizeof(*node)); \
-} while (0)
-#define RB_CLEAR_ROOT(root) ((root)->rb_node = NULL)
#define rb_entry_crq(node) rb_entry((node), struct cfq_rq, rb_node)
#define rq_rb_key(rq) (rq)->sector
@@ -123,8 +118,6 @@ struct cfq_data {
*/
struct hlist_head *crq_hash;
- unsigned int max_queued;
-
mempool_t *crq_pool;
int rq_in_driver;
@@ -279,8 +272,6 @@ static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned int, unsi
static void cfq_dispatch_insert(request_queue_t *, struct cfq_rq *);
static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, unsigned int key, struct task_struct *tsk, gfp_t gfp_mask);
-#define process_sync(tsk) ((tsk)->flags & PF_SYNCWRITE)
-
/*
* lots of deadline iosched dupes, can be abstracted later...
*/
@@ -336,7 +327,7 @@ static int cfq_queue_empty(request_queue_t *q)
static inline pid_t cfq_queue_pid(struct task_struct *task, int rw)
{
- if (rw == READ || process_sync(task))
+ if (rw == READ || rw == WRITE_SYNC)
return task->pid;
return CFQ_KEY_ASYNC;
@@ -563,7 +554,7 @@ static inline void cfq_del_crq_rb(struct cfq_rq *crq)
rb_erase(&crq->rb_node, &cfqq->sort_list);
- if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY(&cfqq->sort_list))
+ if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
cfq_del_cfqq_rr(cfqd, cfqq);
}
@@ -910,13 +901,15 @@ static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd)
return cfqq;
}
+#define CIC_SEEKY(cic) ((cic)->seek_mean > (128 * 1024))
+
static int cfq_arm_slice_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
struct cfq_io_context *cic;
unsigned long sl;
- WARN_ON(!RB_EMPTY(&cfqq->sort_list));
+ WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
WARN_ON(cfqq != cfqd->active_queue);
/*
@@ -943,7 +936,7 @@ static int cfq_arm_slice_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
* fair distribution of slice time for a process doing back-to-back
* seeks. so allow a little bit of time for him to submit a new rq
*/
- if (sample_valid(cic->seek_samples) && cic->seek_mean > 131072)
+ if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic))
sl = 2;
mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
@@ -954,11 +947,15 @@ static void cfq_dispatch_insert(request_queue_t *q, struct cfq_rq *crq)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
struct cfq_queue *cfqq = crq->cfq_queue;
+ struct request *rq;
cfqq->next_crq = cfq_find_next_crq(cfqd, cfqq, crq);
cfq_remove_request(crq->request);
cfqq->on_dispatch[cfq_crq_is_sync(crq)]++;
elv_dispatch_sort(q, crq->request);
+
+ rq = list_entry(q->queue_head.prev, struct request, queuelist);
+ cfqd->last_sector = rq->sector + rq->nr_sectors;
}
/*
@@ -1040,10 +1037,12 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
* if queue has requests, dispatch one. if not, check if
* enough slice is left to wait for one
*/
- if (!RB_EMPTY(&cfqq->sort_list))
+ if (!RB_EMPTY_ROOT(&cfqq->sort_list))
goto keep_queue;
- else if (cfq_cfqq_class_sync(cfqq) &&
- time_before(now, cfqq->slice_end)) {
+ else if (cfq_cfqq_dispatched(cfqq)) {
+ cfqq = NULL;
+ goto keep_queue;
+ } else if (cfq_cfqq_class_sync(cfqq)) {
if (cfq_arm_slice_timer(cfqd, cfqq))
return NULL;
}
@@ -1062,7 +1061,7 @@ __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
{
int dispatched = 0;
- BUG_ON(RB_EMPTY(&cfqq->sort_list));
+ BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
do {
struct cfq_rq *crq;
@@ -1086,14 +1085,13 @@ __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
cfqd->active_cic = crq->io_context;
}
- if (RB_EMPTY(&cfqq->sort_list))
+ if (RB_EMPTY_ROOT(&cfqq->sort_list))
break;
} while (dispatched < max_dispatch);
/*
- * if slice end isn't set yet, set it. if at least one request was
- * sync, use the sync time slice value
+ * if slice end isn't set yet, set it.
*/
if (!cfqq->slice_end)
cfq_set_prio_slice(cfqd, cfqq);
@@ -1104,7 +1102,8 @@ __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
*/
if ((!cfq_cfqq_sync(cfqq) &&
cfqd->dispatch_slice >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
- cfq_class_idle(cfqq))
+ cfq_class_idle(cfqq) ||
+ !cfq_cfqq_idle_window(cfqq))
cfq_slice_expired(cfqd, 0);
return dispatched;
@@ -1113,10 +1112,11 @@ __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
static int
cfq_forced_dispatch_cfqqs(struct list_head *list)
{
- int dispatched = 0;
struct cfq_queue *cfqq, *next;
struct cfq_rq *crq;
+ int dispatched;
+ dispatched = 0;
list_for_each_entry_safe(cfqq, next, list, cfq_list) {
while ((crq = cfqq->next_crq)) {
cfq_dispatch_insert(cfqq->cfqd->queue, crq);
@@ -1124,6 +1124,7 @@ cfq_forced_dispatch_cfqqs(struct list_head *list)
}
BUG_ON(!list_empty(&cfqq->fifo));
}
+
return dispatched;
}
@@ -1150,7 +1151,8 @@ static int
cfq_dispatch_requests(request_queue_t *q, int force)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
- struct cfq_queue *cfqq;
+ struct cfq_queue *cfqq, *prev_cfqq;
+ int dispatched;
if (!cfqd->busy_queues)
return 0;
@@ -1158,10 +1160,17 @@ cfq_dispatch_requests(request_queue_t *q, int force)
if (unlikely(force))
return cfq_forced_dispatch(cfqd);
- cfqq = cfq_select_queue(cfqd);
- if (cfqq) {
+ dispatched = 0;
+ prev_cfqq = NULL;
+ while ((cfqq = cfq_select_queue(cfqd)) != NULL) {
int max_dispatch;
+ /*
+ * Don't repeat dispatch from the previous queue.
+ */
+ if (prev_cfqq == cfqq)
+ break;
+
cfq_clear_cfqq_must_dispatch(cfqq);
cfq_clear_cfqq_wait_request(cfqq);
del_timer(&cfqd->idle_slice_timer);
@@ -1170,10 +1179,19 @@ cfq_dispatch_requests(request_queue_t *q, int force)
if (cfq_class_idle(cfqq))
max_dispatch = 1;
- return __cfq_dispatch_requests(cfqd, cfqq, max_dispatch);
+ dispatched += __cfq_dispatch_requests(cfqd, cfqq, max_dispatch);
+
+ /*
+ * If the dispatch cfqq has idling enabled and is still
+ * the active queue, break out.
+ */
+ if (cfq_cfqq_idle_window(cfqq) && cfqd->active_queue)
+ break;
+
+ prev_cfqq = cfqq;
}
- return 0;
+ return dispatched;
}
/*
@@ -1379,25 +1397,28 @@ static inline void changed_ioprio(struct cfq_io_context *cic)
{
struct cfq_data *cfqd = cic->key;
struct cfq_queue *cfqq;
- if (cfqd) {
- spin_lock(cfqd->queue->queue_lock);
- cfqq = cic->cfqq[ASYNC];
- if (cfqq) {
- struct cfq_queue *new_cfqq;
- new_cfqq = cfq_get_queue(cfqd, CFQ_KEY_ASYNC,
- cic->ioc->task, GFP_ATOMIC);
- if (new_cfqq) {
- cic->cfqq[ASYNC] = new_cfqq;
- cfq_put_queue(cfqq);
- }
- }
- cfqq = cic->cfqq[SYNC];
- if (cfqq) {
- cfq_mark_cfqq_prio_changed(cfqq);
- cfq_init_prio_data(cfqq);
+
+ if (unlikely(!cfqd))
+ return;
+
+ spin_lock(cfqd->queue->queue_lock);
+
+ cfqq = cic->cfqq[ASYNC];
+ if (cfqq) {
+ struct cfq_queue *new_cfqq;
+ new_cfqq = cfq_get_queue(cfqd, CFQ_KEY_ASYNC, cic->ioc->task,
+ GFP_ATOMIC);
+ if (new_cfqq) {
+ cic->cfqq[ASYNC] = new_cfqq;
+ cfq_put_queue(cfqq);
}
- spin_unlock(cfqd->queue->queue_lock);
}
+
+ cfqq = cic->cfqq[SYNC];
+ if (cfqq)
+ cfq_mark_cfqq_prio_changed(cfqq);
+
+ spin_unlock(cfqd->queue->queue_lock);
}
/*
@@ -1454,7 +1475,6 @@ retry:
INIT_HLIST_NODE(&cfqq->cfq_hash);
INIT_LIST_HEAD(&cfqq->cfq_list);
- RB_CLEAR_ROOT(&cfqq->sort_list);
INIT_LIST_HEAD(&cfqq->fifo);
cfqq->key = key;
@@ -1466,8 +1486,7 @@ retry:
* set ->slice_left to allow preemption for a new process
*/
cfqq->slice_left = 2 * cfqd->cfq_slice_idle;
- if (!cfqd->hw_tag)
- cfq_mark_cfqq_idle_window(cfqq);
+ cfq_mark_cfqq_idle_window(cfqq);
cfq_mark_cfqq_prio_changed(cfqq);
cfq_init_prio_data(cfqq);
}
@@ -1658,7 +1677,8 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
{
int enable_idle = cfq_cfqq_idle_window(cfqq);
- if (!cic->ioc->task || !cfqd->cfq_slice_idle || cfqd->hw_tag)
+ if (!cic->ioc->task || !cfqd->cfq_slice_idle ||
+ (cfqd->hw_tag && CIC_SEEKY(cic)))
enable_idle = 0;
else if (sample_valid(cic->ttime_samples)) {
if (cic->ttime_mean > cfqd->cfq_slice_idle)
@@ -1688,7 +1708,7 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
return 0;
if (!cfqq)
- return 1;
+ return 0;
if (cfq_class_idle(cfqq))
return 1;
@@ -1720,7 +1740,7 @@ static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
cfqq->slice_left = cfq_prio_to_slice(cfqd, cfqq) / 2;
cfqq->slice_end = cfqq->slice_left + jiffies;
- __cfq_slice_expired(cfqd, cfqq, 1);
+ cfq_slice_expired(cfqd, 1);
__cfq_set_active_queue(cfqd, cfqq);
}
@@ -1745,11 +1765,7 @@ static void
cfq_crq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
struct cfq_rq *crq)
{
- struct cfq_io_context *cic;
-
- cfqq->next_crq = cfq_choose_req(cfqd, cfqq->next_crq, crq);
-
- cic = crq->io_context;
+ struct cfq_io_context *cic = crq->io_context;
/*
* we never wait for an async request and we don't allow preemption
@@ -1839,11 +1855,23 @@ static void cfq_completed_request(request_queue_t *q, struct request *rq)
cfqq->service_last = now;
cfq_resort_rr_list(cfqq, 0);
}
- cfq_schedule_dispatch(cfqd);
}
- if (cfq_crq_is_sync(crq))
+ if (sync)
crq->io_context->last_end_request = now;
+
+ /*
+ * If this is the active queue, check if it needs to be expired,
+ * or if we want to idle in case it has no pending requests.
+ */
+ if (cfqd->active_queue == cfqq) {
+ if (time_after(now, cfqq->slice_end))
+ cfq_slice_expired(cfqd, 0);
+ else if (sync && RB_EMPTY_ROOT(&cfqq->sort_list)) {
+ if (!cfq_arm_slice_timer(cfqd, cfqq))
+ cfq_schedule_dispatch(cfqd);
+ }
+ }
}
static struct request *
@@ -1910,7 +1938,6 @@ static inline int
__cfq_may_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq,
struct task_struct *task, int rw)
{
-#if 1
if ((cfq_cfqq_wait_request(cfqq) || cfq_cfqq_must_alloc(cfqq)) &&
!cfq_cfqq_must_alloc_slice(cfqq)) {
cfq_mark_cfqq_must_alloc_slice(cfqq);
@@ -1918,39 +1945,6 @@ __cfq_may_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq,
}
return ELV_MQUEUE_MAY;
-#else
- if (!cfqq || task->flags & PF_MEMALLOC)
- return ELV_MQUEUE_MAY;
- if (!cfqq->allocated[rw] || cfq_cfqq_must_alloc(cfqq)) {
- if (cfq_cfqq_wait_request(cfqq))
- return ELV_MQUEUE_MUST;
-
- /*
- * only allow 1 ELV_MQUEUE_MUST per slice, otherwise we
- * can quickly flood the queue with writes from a single task
- */
- if (rw == READ || !cfq_cfqq_must_alloc_slice(cfqq)) {
- cfq_mark_cfqq_must_alloc_slice(cfqq);
- return ELV_MQUEUE_MUST;
- }
-
- return ELV_MQUEUE_MAY;
- }
- if (cfq_class_idle(cfqq))
- return ELV_MQUEUE_NO;
- if (cfqq->allocated[rw] >= cfqd->max_queued) {
- struct io_context *ioc = get_io_context(GFP_ATOMIC);
- int ret = ELV_MQUEUE_NO;
-
- if (ioc && ioc->nr_batch_requests)
- ret = ELV_MQUEUE_MAY;
-
- put_io_context(ioc);
- return ret;
- }
-
- return ELV_MQUEUE_MAY;
-#endif
}
static int cfq_may_queue(request_queue_t *q, int rw, struct bio *bio)
@@ -1979,16 +1973,13 @@ static int cfq_may_queue(request_queue_t *q, int rw, struct bio *bio)
static void cfq_check_waiters(request_queue_t *q, struct cfq_queue *cfqq)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
- struct request_list *rl = &q->rq;
- if (cfqq->allocated[READ] <= cfqd->max_queued || cfqd->rq_starved) {
+ if (unlikely(cfqd->rq_starved)) {
+ struct request_list *rl = &q->rq;
+
smp_mb();
if (waitqueue_active(&rl->wait[READ]))
wake_up(&rl->wait[READ]);
- }
-
- if (cfqq->allocated[WRITE] <= cfqd->max_queued || cfqd->rq_starved) {
- smp_mb();
if (waitqueue_active(&rl->wait[WRITE]))
wake_up(&rl->wait[WRITE]);
}
@@ -2062,7 +2053,7 @@ cfq_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
crq = mempool_alloc(cfqd->crq_pool, gfp_mask);
if (crq) {
- RB_CLEAR(&crq->rb_node);
+ RB_CLEAR_NODE(&crq->rb_node);
crq->rb_key = 0;
crq->request = rq;
INIT_HLIST_NODE(&crq->hash);
@@ -2148,16 +2139,13 @@ static void cfq_idle_slice_timer(unsigned long data)
* only expire and reinvoke request handler, if there are
* other queues with pending requests
*/
- if (!cfqd->busy_queues) {
- cfqd->idle_slice_timer.expires = min(now + cfqd->cfq_slice_idle, cfqq->slice_end);
- add_timer(&cfqd->idle_slice_timer);
+ if (!cfqd->busy_queues)
goto out_cont;
- }
/*
* not expired and it has a request pending, let it dispatch
*/
- if (!RB_EMPTY(&cfqq->sort_list)) {
+ if (!RB_EMPTY_ROOT(&cfqq->sort_list)) {
cfq_mark_cfqq_must_dispatch(cfqq);
goto out_kick;
}
@@ -2278,9 +2266,6 @@ static void *cfq_init_queue(request_queue_t *q, elevator_t *e)
cfqd->queue = q;
- cfqd->max_queued = q->nr_requests / 4;
- q->nr_batching = cfq_queued;
-
init_timer(&cfqd->idle_slice_timer);
cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
cfqd->idle_slice_timer.data = (unsigned long) cfqd;
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c
index c94de8e12fbf..4469dd84623c 100644
--- a/block/deadline-iosched.c
+++ b/block/deadline-iosched.c
@@ -30,8 +30,7 @@ static const int deadline_hash_shift = 5;
#define DL_HASH_FN(sec) (hash_long(DL_HASH_BLOCK((sec)), deadline_hash_shift))
#define DL_HASH_ENTRIES (1 << deadline_hash_shift)
#define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors)
-#define list_entry_hash(ptr) list_entry((ptr), struct deadline_rq, hash)
-#define ON_HASH(drq) (drq)->on_hash
+#define ON_HASH(drq) (!hlist_unhashed(&(drq)->hash))
struct deadline_data {
/*
@@ -48,7 +47,7 @@ struct deadline_data {
* next in sort order. read, write or both are NULL
*/
struct deadline_rq *next_drq[2];
- struct list_head *hash; /* request hash */
+ struct hlist_head *hash; /* request hash */
unsigned int batching; /* number of sequential requests made */
sector_t last_sector; /* head position */
unsigned int starved; /* times reads have starved writes */
@@ -79,8 +78,7 @@ struct deadline_rq {
/*
* request hash, key is the ending offset (for back merge lookup)
*/
- struct list_head hash;
- char on_hash;
+ struct hlist_node hash;
/*
* expire fifo
@@ -100,8 +98,7 @@ static kmem_cache_t *drq_pool;
*/
static inline void __deadline_del_drq_hash(struct deadline_rq *drq)
{
- drq->on_hash = 0;
- list_del_init(&drq->hash);
+ hlist_del_init(&drq->hash);
}
static inline void deadline_del_drq_hash(struct deadline_rq *drq)
@@ -117,8 +114,7 @@ deadline_add_drq_hash(struct deadline_data *dd, struct deadline_rq *drq)
BUG_ON(ON_HASH(drq));
- drq->on_hash = 1;
- list_add(&drq->hash, &dd->hash[DL_HASH_FN(rq_hash_key(rq))]);
+ hlist_add_head(&drq->hash, &dd->hash[DL_HASH_FN(rq_hash_key(rq))]);
}
/*
@@ -128,26 +124,24 @@ static inline void
deadline_hot_drq_hash(struct deadline_data *dd, struct deadline_rq *drq)
{
struct request *rq = drq->request;
- struct list_head *head = &dd->hash[DL_HASH_FN(rq_hash_key(rq))];
+ struct hlist_head *head = &dd->hash[DL_HASH_FN(rq_hash_key(rq))];
- if (ON_HASH(drq) && drq->hash.prev != head) {
- list_del(&drq->hash);
- list_add(&drq->hash, head);
+ if (ON_HASH(drq) && &drq->hash != head->first) {
+ hlist_del(&drq->hash);
+ hlist_add_head(&drq->hash, head);
}
}
static struct request *
deadline_find_drq_hash(struct deadline_data *dd, sector_t offset)
{
- struct list_head *hash_list = &dd->hash[DL_HASH_FN(offset)];
- struct list_head *entry, *next = hash_list->next;
+ struct hlist_head *hash_list = &dd->hash[DL_HASH_FN(offset)];
+ struct hlist_node *entry, *next;
+ struct deadline_rq *drq;
- while ((entry = next) != hash_list) {
- struct deadline_rq *drq = list_entry_hash(entry);
+ hlist_for_each_entry_safe(drq, entry, next, hash_list, hash) {
struct request *__rq = drq->request;
- next = entry->next;
-
BUG_ON(!ON_HASH(drq));
if (!rq_mergeable(__rq)) {
@@ -165,9 +159,6 @@ deadline_find_drq_hash(struct deadline_data *dd, sector_t offset)
/*
* rb tree support functions
*/
-#define RB_EMPTY(root) ((root)->rb_node == NULL)
-#define ON_RB(node) (rb_parent(node) != node)
-#define RB_CLEAR(node) (rb_set_parent(node, node))
#define rb_entry_drq(node) rb_entry((node), struct deadline_rq, rb_node)
#define DRQ_RB_ROOT(dd, drq) (&(dd)->sort_list[rq_data_dir((drq)->request)])
#define rq_rb_key(rq) (rq)->sector
@@ -226,9 +217,9 @@ deadline_del_drq_rb(struct deadline_data *dd, struct deadline_rq *drq)
dd->next_drq[data_dir] = rb_entry_drq(rbnext);
}
- BUG_ON(!ON_RB(&drq->rb_node));
+ BUG_ON(!RB_EMPTY_NODE(&drq->rb_node));
rb_erase(&drq->rb_node, DRQ_RB_ROOT(dd, drq));
- RB_CLEAR(&drq->rb_node);
+ RB_CLEAR_NODE(&drq->rb_node);
}
static struct request *
@@ -502,7 +493,7 @@ static int deadline_dispatch_requests(request_queue_t *q, int force)
*/
if (reads) {
- BUG_ON(RB_EMPTY(&dd->sort_list[READ]));
+ BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[READ]));
if (writes && (dd->starved++ >= dd->writes_starved))
goto dispatch_writes;
@@ -518,7 +509,7 @@ static int deadline_dispatch_requests(request_queue_t *q, int force)
if (writes) {
dispatch_writes:
- BUG_ON(RB_EMPTY(&dd->sort_list[WRITE]));
+ BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[WRITE]));
dd->starved = 0;
@@ -625,7 +616,7 @@ static void *deadline_init_queue(request_queue_t *q, elevator_t *e)
return NULL;
memset(dd, 0, sizeof(*dd));
- dd->hash = kmalloc_node(sizeof(struct list_head)*DL_HASH_ENTRIES,
+ dd->hash = kmalloc_node(sizeof(struct hlist_head)*DL_HASH_ENTRIES,
GFP_KERNEL, q->node);
if (!dd->hash) {
kfree(dd);
@@ -641,7 +632,7 @@ static void *deadline_init_queue(request_queue_t *q, elevator_t *e)
}
for (i = 0; i < DL_HASH_ENTRIES; i++)
- INIT_LIST_HEAD(&dd->hash[i]);
+ INIT_HLIST_HEAD(&dd->hash[i]);
INIT_LIST_HEAD(&dd->fifo_list[READ]);
INIT_LIST_HEAD(&dd->fifo_list[WRITE]);
@@ -674,11 +665,10 @@ deadline_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
drq = mempool_alloc(dd->drq_pool, gfp_mask);
if (drq) {
memset(drq, 0, sizeof(*drq));
- RB_CLEAR(&drq->rb_node);
+ RB_CLEAR_NODE(&drq->rb_node);
drq->request = rq;
- INIT_LIST_HEAD(&drq->hash);
- drq->on_hash = 0;
+ INIT_HLIST_NODE(&drq->hash);
INIT_LIST_HEAD(&drq->fifo);
diff --git a/block/elevator.c b/block/elevator.c
index a0afdd317cef..d00b283f31d2 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -850,12 +850,9 @@ fail_register:
* one again (along with re-adding the sysfs dir)
*/
elevator_exit(e);
- e = NULL;
q->elevator = old_elevator;
elv_register_queue(q);
clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
- if (e)
- kobject_put(&e->kobj);
return 0;
}
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index 7eb36c53f4b7..0603ab2f3692 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -638,7 +638,7 @@ void blk_queue_bounce_limit(request_queue_t *q, u64 dma_addr)
/* Assume anything <= 4GB can be handled by IOMMU.
Actually some IOMMUs can handle everything, but I don't
know of a way to test this here. */
- if (bounce_pfn < (0xffffffff>>PAGE_SHIFT))
+ if (bounce_pfn < (min_t(u64,0xffffffff,BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
dma = 1;
q->bounce_pfn = max_low_pfn;
#else
@@ -1663,6 +1663,8 @@ static void blk_unplug_timeout(unsigned long data)
**/
void blk_start_queue(request_queue_t *q)
{
+ WARN_ON(!irqs_disabled());
+
clear_bit(QUEUE_FLAG_STOPPED, &q->queue_flags);
/*
@@ -1878,7 +1880,8 @@ EXPORT_SYMBOL(blk_alloc_queue_node);
* get dealt with eventually.
*
* The queue spin lock must be held while manipulating the requests on the
- * request queue.
+ * request queue; this lock will be taken also from interrupt context, so irq
+ * disabling is needed for it.
*
* Function returns a pointer to the initialized request queue, or NULL if
* it didn't succeed.
@@ -2824,6 +2827,9 @@ static void init_request_from_bio(struct request *req, struct bio *bio)
if (unlikely(bio_barrier(bio)))
req->flags |= (REQ_HARDBARRIER | REQ_NOMERGE);
+ if (bio_sync(bio))
+ req->flags |= REQ_RW_SYNC;
+
req->errors = 0;
req->hard_sector = req->sector = bio->bi_sector;
req->hard_nr_sectors = req->nr_sectors = bio_sectors(bio);
@@ -3359,12 +3365,11 @@ EXPORT_SYMBOL(end_that_request_chunk);
*/
static void blk_done_softirq(struct softirq_action *h)
{
- struct list_head *cpu_list;
- LIST_HEAD(local_list);
+ struct list_head *cpu_list, local_list;
local_irq_disable();
cpu_list = &__get_cpu_var(blk_cpu_done);
- list_splice_init(cpu_list, &local_list);
+ list_replace_init(cpu_list, &local_list);
local_irq_enable();
while (!list_empty(&local_list)) {
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index c24652d31bf9..94b8d820c512 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -10,9 +10,8 @@ menu "ACPI (Advanced Configuration and Power Interface) Support"
config ACPI
bool "ACPI Support"
depends on IA64 || X86
+ depends on PCI
select PM
- select PCI
-
default y
---help---
Advanced Configuration and Power Interface (ACPI) support for
@@ -162,7 +161,7 @@ config ACPI_THERMAL
config ACPI_NUMA
bool "NUMA support"
depends on NUMA
- depends on (IA64 || X86_64)
+ depends on (X86 || IA64)
default y if IA64_GENERIC || IA64_SGI_SN2
config ACPI_ASUS
diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c
index d882bf87fa96..e0a95ba72371 100644
--- a/drivers/acpi/acpi_memhotplug.c
+++ b/drivers/acpi/acpi_memhotplug.c
@@ -74,7 +74,7 @@ struct acpi_memory_device {
unsigned short caching; /* memory cache attribute */
unsigned short write_protect; /* memory read/write attribute */
u64 start_addr; /* Memory Range start physical addr */
- u64 end_addr; /* Memory Range end physical addr */
+ u64 length; /* Memory Range length */
};
static int
@@ -97,12 +97,11 @@ acpi_memory_get_device_resources(struct acpi_memory_device *mem_device)
if (ACPI_SUCCESS(status)) {
if (address64.resource_type == ACPI_MEMORY_RANGE) {
/* Populate the structure */
- mem_device->caching =
- address64.info.mem.caching;
+ mem_device->caching = address64.info.mem.caching;
mem_device->write_protect =
address64.info.mem.write_protect;
mem_device->start_addr = address64.minimum;
- mem_device->end_addr = address64.maximum;
+ mem_device->length = address64.address_length;
}
}
@@ -199,8 +198,7 @@ static int acpi_memory_enable_device(struct acpi_memory_device *mem_device)
* Tell the VM there is more memory here...
* Note: Assume that this function returns zero on success
*/
- result = add_memory(mem_device->start_addr,
- (mem_device->end_addr - mem_device->start_addr) + 1);
+ result = add_memory(mem_device->start_addr, mem_device->length);
if (result) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "\nadd_memory failed\n"));
mem_device->state = MEMORY_INVALID_STATE;
@@ -249,7 +247,7 @@ static int acpi_memory_disable_device(struct acpi_memory_device *mem_device)
{
int result;
u64 start = mem_device->start_addr;
- u64 len = mem_device->end_addr - start + 1;
+ u64 len = mem_device->length;
ACPI_FUNCTION_TRACE("acpi_memory_disable_device");
diff --git a/drivers/acpi/asus_acpi.c b/drivers/acpi/asus_acpi.c
index f4c87750dbf2..839f423d738d 100644
--- a/drivers/acpi/asus_acpi.c
+++ b/drivers/acpi/asus_acpi.c
@@ -817,7 +817,7 @@ typedef int (proc_writefunc) (struct file * file, const char __user * buffer,
unsigned long count, void *data);
static int
-__init asus_proc_add(char *name, proc_writefunc * writefunc,
+asus_proc_add(char *name, proc_writefunc * writefunc,
proc_readfunc * readfunc, mode_t mode,
struct acpi_device *device)
{
@@ -836,7 +836,7 @@ __init asus_proc_add(char *name, proc_writefunc * writefunc,
return 0;
}
-static int __init asus_hotk_add_fs(struct acpi_device *device)
+static int asus_hotk_add_fs(struct acpi_device *device)
{
struct proc_dir_entry *proc;
mode_t mode;
@@ -954,7 +954,7 @@ static void asus_hotk_notify(acpi_handle handle, u32 event, void *data)
* This function is used to initialize the hotk with right values. In this
* method, we can make all the detection we want, and modify the hotk struct
*/
-static int __init asus_hotk_get_info(void)
+static int asus_hotk_get_info(void)
{
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
struct acpi_buffer dsdt = { ACPI_ALLOCATE_BUFFER, NULL };
@@ -970,7 +970,7 @@ static int __init asus_hotk_get_info(void)
* HID), this bit will be moved. A global variable asus_info contains
* the DSDT header.
*/
- status = acpi_get_table(ACPI_TABLE_DSDT, 1, &dsdt);
+ status = acpi_get_table(ACPI_TABLE_ID_DSDT, 1, &dsdt);
if (ACPI_FAILURE(status))
printk(KERN_WARNING " Couldn't get the DSDT table header\n");
else
@@ -1101,7 +1101,7 @@ static int __init asus_hotk_get_info(void)
return AE_OK;
}
-static int __init asus_hotk_check(void)
+static int asus_hotk_check(void)
{
int result = 0;
@@ -1119,7 +1119,9 @@ static int __init asus_hotk_check(void)
return result;
}
-static int __init asus_hotk_add(struct acpi_device *device)
+static int asus_hotk_found;
+
+static int asus_hotk_add(struct acpi_device *device)
{
acpi_status status = AE_OK;
int result;
@@ -1180,6 +1182,8 @@ static int __init asus_hotk_add(struct acpi_device *device)
}
}
+ asus_hotk_found = 1;
+
end:
if (result) {
kfree(hotk);
@@ -1226,12 +1230,24 @@ static int __init asus_acpi_init(void)
asus_proc_dir->owner = THIS_MODULE;
result = acpi_bus_register_driver(&asus_hotk_driver);
- if (result < 1) {
- acpi_bus_unregister_driver(&asus_hotk_driver);
+ if (result < 0) {
remove_proc_entry(PROC_ASUS, acpi_root_dir);
return -ENODEV;
}
+ /*
+ * This is a bit of a kludge. We only want this module loaded
+ * for ASUS systems, but there's currently no way to probe the
+ * ACPI namespace for ASUS HIDs. So we just return failure if
+ * we didn't find one, which will cause the module to be
+ * unloaded.
+ */
+ if (!asus_hotk_found) {
+ acpi_bus_unregister_driver(&asus_hotk_driver);
+ remove_proc_entry(PROC_ASUS, acpi_root_dir);
+ return result;
+ }
+
return 0;
}
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 606f8733a776..dd3983cece92 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -43,7 +43,7 @@ ACPI_MODULE_NAME("acpi_bus")
extern void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger);
#endif
-FADT_DESCRIPTOR acpi_fadt;
+struct fadt_descriptor acpi_fadt;
EXPORT_SYMBOL(acpi_fadt);
struct acpi_device *acpi_root;
@@ -205,12 +205,14 @@ int acpi_bus_set_power(acpi_handle handle, int state)
* Get device's current power state if it's unknown
* This means device power state isn't initialized or previous setting failed
*/
- if (device->power.state == ACPI_STATE_UNKNOWN)
- acpi_bus_get_power(device->handle, &device->power.state);
- if (state == device->power.state) {
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device is already at D%d\n",
- state));
- return_VALUE(0);
+ if (!device->flags.force_power_state) {
+ if (device->power.state == ACPI_STATE_UNKNOWN)
+ acpi_bus_get_power(device->handle, &device->power.state);
+ if (state == device->power.state) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device is already at D%d\n",
+ state));
+ return_VALUE(0);
+ }
}
if (!device->power.states[state].flags.valid) {
ACPI_DEBUG_PRINT((ACPI_DB_WARN, "Device does not support D%d\n",
@@ -596,6 +598,8 @@ void __init acpi_early_init(void)
if (acpi_disabled)
return_VOID;
+ printk(KERN_INFO PREFIX "Core revision %08x\n", ACPI_CA_VERSION);
+
/* enable workarounds, unless strict ACPI spec. compliance */
if (!acpi_strict)
acpi_gbl_enable_interpreter_slack = TRUE;
@@ -617,7 +621,7 @@ void __init acpi_early_init(void)
/*
* Get a separate copy of the FADT for use by other drivers.
*/
- status = acpi_get_table(ACPI_TABLE_FADT, 1, &buffer);
+ status = acpi_get_table(ACPI_TABLE_ID_FADT, 1, &buffer);
if (ACPI_FAILURE(status)) {
printk(KERN_ERR PREFIX "Unable to get the FADT\n");
goto error0;
@@ -743,8 +747,6 @@ static int __init acpi_init(void)
ACPI_FUNCTION_TRACE("acpi_init");
- printk(KERN_INFO PREFIX "Subsystem revision %08x\n", ACPI_CA_VERSION);
-
if (acpi_disabled) {
printk(KERN_INFO PREFIX "Interpreter disabled.\n");
return_VALUE(-ENODEV);
diff --git a/drivers/acpi/dispatcher/dsfield.c b/drivers/acpi/dispatcher/dsfield.c
index 76bc0463f6de..a6d77efb41a0 100644
--- a/drivers/acpi/dispatcher/dsfield.c
+++ b/drivers/acpi/dispatcher/dsfield.c
@@ -87,7 +87,7 @@ acpi_ds_create_buffer_field(union acpi_parse_object *op,
union acpi_operand_object *second_desc = NULL;
u32 flags;
- ACPI_FUNCTION_TRACE("ds_create_buffer_field");
+ ACPI_FUNCTION_TRACE(ds_create_buffer_field);
/* Get the name_string argument */
@@ -210,7 +210,7 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
acpi_status status;
acpi_integer position;
- ACPI_FUNCTION_TRACE_PTR("ds_get_field_names", info);
+ ACPI_FUNCTION_TRACE_PTR(ds_get_field_names, info);
/* First field starts at bit zero */
@@ -342,7 +342,7 @@ acpi_ds_create_field(union acpi_parse_object *op,
union acpi_parse_object *arg;
struct acpi_create_field_info info;
- ACPI_FUNCTION_TRACE_PTR("ds_create_field", op);
+ ACPI_FUNCTION_TRACE_PTR(ds_create_field, op);
/* First arg is the name of the parent op_region (must already exist) */
@@ -399,7 +399,7 @@ acpi_ds_init_field_objects(union acpi_parse_object *op,
struct acpi_namespace_node *node;
u8 type = 0;
- ACPI_FUNCTION_TRACE_PTR("ds_init_field_objects", op);
+ ACPI_FUNCTION_TRACE_PTR(ds_init_field_objects, op);
switch (walk_state->opcode) {
case AML_FIELD_OP:
@@ -425,6 +425,7 @@ acpi_ds_init_field_objects(union acpi_parse_object *op,
* Walk the list of entries in the field_list
*/
while (arg) {
+
/* Ignore OFFSET and ACCESSAS terms here */
if (arg->common.aml_opcode == AML_INT_NAMEDFIELD_OP) {
@@ -481,7 +482,7 @@ acpi_ds_create_bank_field(union acpi_parse_object *op,
union acpi_parse_object *arg;
struct acpi_create_field_info info;
- ACPI_FUNCTION_TRACE_PTR("ds_create_bank_field", op);
+ ACPI_FUNCTION_TRACE_PTR(ds_create_bank_field, op);
/* First arg is the name of the parent op_region (must already exist) */
@@ -554,7 +555,7 @@ acpi_ds_create_index_field(union acpi_parse_object *op,
union acpi_parse_object *arg;
struct acpi_create_field_info info;
- ACPI_FUNCTION_TRACE_PTR("ds_create_index_field", op);
+ ACPI_FUNCTION_TRACE_PTR(ds_create_index_field, op);
/* First arg is the name of the Index register (must already exist) */
diff --git a/drivers/acpi/dispatcher/dsinit.c b/drivers/acpi/dispatcher/dsinit.c
index e65a07ad2422..bbdf990e9f65 100644
--- a/drivers/acpi/dispatcher/dsinit.c
+++ b/drivers/acpi/dispatcher/dsinit.c
@@ -184,7 +184,7 @@ acpi_ds_init_one_object(acpi_handle obj_handle,
*
* RETURN: Status
*
- * DESCRIPTION: Walk the namespace starting at "start_node" and perform any
+ * DESCRIPTION: Walk the namespace starting at "StartNode" and perform any
* necessary initialization on the objects found therein
*
******************************************************************************/
@@ -196,7 +196,7 @@ acpi_ds_initialize_objects(struct acpi_table_desc * table_desc,
acpi_status status;
struct acpi_init_walk_info info;
- ACPI_FUNCTION_TRACE("ds_initialize_objects");
+ ACPI_FUNCTION_TRACE(ds_initialize_objects);
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"**** Starting initialization of namespace objects ****\n"));
@@ -213,7 +213,7 @@ acpi_ds_initialize_objects(struct acpi_table_desc * table_desc,
status = acpi_walk_namespace(ACPI_TYPE_ANY, start_node, ACPI_UINT32_MAX,
acpi_ds_init_one_object, &info, NULL);
if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status, "During walk_namespace"));
+ ACPI_EXCEPTION((AE_INFO, status, "During WalkNamespace"));
}
ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
diff --git a/drivers/acpi/dispatcher/dsmethod.c b/drivers/acpi/dispatcher/dsmethod.c
index c475546535b6..bc9aca4e7401 100644
--- a/drivers/acpi/dispatcher/dsmethod.c
+++ b/drivers/acpi/dispatcher/dsmethod.c
@@ -81,6 +81,7 @@ acpi_ds_method_error(acpi_status status, struct acpi_walk_state *walk_state)
/* Invoke the global exception handler */
if (acpi_gbl_exception_handler) {
+
/* Exit the interpreter, allow handler to execute methods */
acpi_ex_exit_interpreter();
@@ -100,6 +101,7 @@ acpi_ds_method_error(acpi_status status, struct acpi_walk_state *walk_state)
}
#ifdef ACPI_DISASSEMBLER
if (ACPI_FAILURE(status)) {
+
/* Display method locals/args if disassembler is present */
acpi_dm_dump_method_info(status, walk_state, walk_state->op);
@@ -132,7 +134,7 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node * method_node,
{
acpi_status status = AE_OK;
- ACPI_FUNCTION_TRACE_PTR("ds_begin_method_execution", method_node);
+ ACPI_FUNCTION_TRACE_PTR(ds_begin_method_execution, method_node);
if (!method_node) {
return_ACPI_STATUS(AE_NULL_ENTRY);
@@ -168,11 +170,14 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node * method_node,
/*
* Get a unit from the method semaphore. This releases the
- * interpreter if we block
+ * interpreter if we block (then reacquires it)
*/
status =
acpi_ex_system_wait_semaphore(obj_desc->method.semaphore,
ACPI_WAIT_FOREVER);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
}
/*
@@ -183,7 +188,7 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node * method_node,
if (!obj_desc->method.owner_id) {
status = acpi_ut_allocate_owner_id(&obj_desc->method.owner_id);
if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
+ goto cleanup;
}
}
@@ -193,6 +198,14 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node * method_node,
*/
obj_desc->method.thread_count++;
return_ACPI_STATUS(status);
+
+ cleanup:
+ /* On error, must signal the method semaphore if present */
+
+ if (obj_desc->method.semaphore) {
+ (void)acpi_os_signal_semaphore(obj_desc->method.semaphore, 1);
+ }
+ return_ACPI_STATUS(status);
}
/*******************************************************************************
@@ -218,10 +231,10 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
struct acpi_namespace_node *method_node;
struct acpi_walk_state *next_walk_state = NULL;
union acpi_operand_object *obj_desc;
- struct acpi_parameter_info info;
+ struct acpi_evaluate_info *info;
u32 i;
- ACPI_FUNCTION_TRACE_PTR("ds_call_control_method", this_walk_state);
+ ACPI_FUNCTION_TRACE_PTR(ds_call_control_method, this_walk_state);
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"Execute method %p, currentstate=%p\n",
@@ -240,25 +253,31 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
return_ACPI_STATUS(AE_NULL_OBJECT);
}
- /* Init for new method, wait on concurrency semaphore */
+ /* Init for new method, possibly wait on concurrency semaphore */
status = acpi_ds_begin_method_execution(method_node, obj_desc,
this_walk_state->method_node);
if (ACPI_FAILURE(status)) {
- goto cleanup;
+ return_ACPI_STATUS(status);
}
+ /*
+ * 1) Parse the method. All "normal" methods are parsed for each execution.
+ * Internal methods (_OSI, etc.) do not require parsing.
+ */
if (!(obj_desc->method.method_flags & AML_METHOD_INTERNAL_ONLY)) {
- /* 1) Parse: Create a new walk state for the preempting walk */
+
+ /* Create a new walk state for the parse */
next_walk_state =
acpi_ds_create_walk_state(obj_desc->method.owner_id, op,
obj_desc, NULL);
if (!next_walk_state) {
- return_ACPI_STATUS(AE_NO_MEMORY);
+ status = AE_NO_MEMORY;
+ goto cleanup;
}
- /* Create and init a Root Node */
+ /* Create and init a parse tree root */
op = acpi_ps_create_scope_op();
if (!op) {
@@ -271,17 +290,20 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
obj_desc->method.aml_length,
NULL, 1);
if (ACPI_FAILURE(status)) {
- acpi_ds_delete_walk_state(next_walk_state);
+ acpi_ps_delete_parse_tree(op);
goto cleanup;
}
- /* Begin AML parse */
+ /* Begin AML parse (deletes next_walk_state) */
status = acpi_ps_parse_aml(next_walk_state);
acpi_ps_delete_parse_tree(op);
+ if (ACPI_FAILURE(status)) {
+ goto cleanup;
+ }
}
- /* 2) Execute: Create a new state for the preempting walk */
+ /* 2) Begin method execution. Create a new walk state */
next_walk_state = acpi_ds_create_walk_state(obj_desc->method.owner_id,
NULL, obj_desc, thread);
@@ -289,6 +311,7 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
status = AE_NO_MEMORY;
goto cleanup;
}
+
/*
* The resolved arguments were put on the previous walk state's operand
* stack. Operands on the previous walk state stack always
@@ -296,12 +319,24 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
*/
this_walk_state->operands[this_walk_state->num_operands] = NULL;
- info.parameters = &this_walk_state->operands[0];
- info.parameter_type = ACPI_PARAM_ARGS;
+ /*
+ * Allocate and initialize the evaluation information block
+ * TBD: this is somewhat inefficient, should change interface to
+ * ds_init_aml_walk. For now, keeps this struct off the CPU stack
+ */
+ info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
+ if (!info) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ info->parameters = &this_walk_state->operands[0];
+ info->parameter_type = ACPI_PARAM_ARGS;
status = acpi_ds_init_aml_walk(next_walk_state, NULL, method_node,
obj_desc->method.aml_start,
- obj_desc->method.aml_length, &info, 3);
+ obj_desc->method.aml_length, info, 3);
+
+ ACPI_FREE(info);
if (ACPI_FAILURE(status)) {
goto cleanup;
}
@@ -323,6 +358,8 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
"Starting nested execution, newstate=%p\n",
next_walk_state));
+ /* Invoke an internal method if necessary */
+
if (obj_desc->method.method_flags & AML_METHOD_INTERNAL_ONLY) {
status = obj_desc->method.implementation(next_walk_state);
}
@@ -330,16 +367,14 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
return_ACPI_STATUS(status);
cleanup:
- /* Decrement the thread count on the method parse tree */
- if (next_walk_state && (next_walk_state->method_desc)) {
- next_walk_state->method_desc->method.thread_count--;
- }
+ /* On error, we must terminate the method properly */
- /* On error, we must delete the new walk state */
+ acpi_ds_terminate_control_method(obj_desc, next_walk_state);
+ if (next_walk_state) {
+ acpi_ds_delete_walk_state(next_walk_state);
+ }
- acpi_ds_terminate_control_method(next_walk_state);
- acpi_ds_delete_walk_state(next_walk_state);
return_ACPI_STATUS(status);
}
@@ -362,25 +397,33 @@ acpi_ds_restart_control_method(struct acpi_walk_state *walk_state,
union acpi_operand_object *return_desc)
{
acpi_status status;
+ int same_as_implicit_return;
- ACPI_FUNCTION_TRACE_PTR("ds_restart_control_method", walk_state);
+ ACPI_FUNCTION_TRACE_PTR(ds_restart_control_method, walk_state);
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
- "****Restart [%4.4s] Op %p return_value_from_callee %p\n",
+ "****Restart [%4.4s] Op %p ReturnValueFromCallee %p\n",
(char *)&walk_state->method_node->name,
walk_state->method_call_op, return_desc));
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
- " return_from_this_method_used?=%X res_stack %p Walk %p\n",
+ " ReturnFromThisMethodUsed?=%X ResStack %p Walk %p\n",
walk_state->return_used,
walk_state->results, walk_state));
/* Did the called method return a value? */
if (return_desc) {
+
+ /* Is the implicit return object the same as the return desc? */
+
+ same_as_implicit_return =
+ (walk_state->implicit_return_obj == return_desc);
+
/* Are we actually going to use the return value? */
if (walk_state->return_used) {
+
/* Save the return value from the previous method */
status = acpi_ds_result_push(return_desc, walk_state);
@@ -397,18 +440,23 @@ acpi_ds_restart_control_method(struct acpi_walk_state *walk_state,
}
/*
- * The following code is the
- * optional support for a so-called "implicit return". Some AML code
- * assumes that the last value of the method is "implicitly" returned
- * to the caller. Just save the last result as the return value.
+ * The following code is the optional support for the so-called
+ * "implicit return". Some AML code assumes that the last value of the
+ * method is "implicitly" returned to the caller, in the absence of an
+ * explicit return value.
+ *
+ * Just save the last result of the method as the return value.
+ *
* NOTE: this is optional because the ASL language does not actually
* support this behavior.
*/
else if (!acpi_ds_do_implicit_return
- (return_desc, walk_state, FALSE)) {
+ (return_desc, walk_state, FALSE)
+ || same_as_implicit_return) {
/*
* Delete the return value if it will not be used by the
- * calling method
+ * calling method or remove one reference if the explicit return
+ * is the same as the implicit return value.
*/
acpi_ut_remove_reference(return_desc);
}
@@ -421,7 +469,8 @@ acpi_ds_restart_control_method(struct acpi_walk_state *walk_state,
*
* FUNCTION: acpi_ds_terminate_control_method
*
- * PARAMETERS: walk_state - State of the method
+ * PARAMETERS: method_desc - Method object
+ * walk_state - State associated with the method
*
* RETURN: None
*
@@ -431,95 +480,100 @@ acpi_ds_restart_control_method(struct acpi_walk_state *walk_state,
*
******************************************************************************/
-void acpi_ds_terminate_control_method(struct acpi_walk_state *walk_state)
+void
+acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
+ struct acpi_walk_state *walk_state)
{
- union acpi_operand_object *obj_desc;
struct acpi_namespace_node *method_node;
acpi_status status;
- ACPI_FUNCTION_TRACE_PTR("ds_terminate_control_method", walk_state);
+ ACPI_FUNCTION_TRACE_PTR(ds_terminate_control_method, walk_state);
- if (!walk_state) {
- return_VOID;
- }
+ /* method_desc is required, walk_state is optional */
- /* The current method object was saved in the walk state */
-
- obj_desc = walk_state->method_desc;
- if (!obj_desc) {
+ if (!method_desc) {
return_VOID;
}
- /* Delete all arguments and locals */
+ if (walk_state) {
- acpi_ds_method_data_delete_all(walk_state);
+ /* Delete all arguments and locals */
+
+ acpi_ds_method_data_delete_all(walk_state);
+ }
/*
* Lock the parser while we terminate this method.
* If this is the last thread executing the method,
* we have additional cleanup to perform
*/
- status = acpi_ut_acquire_mutex(ACPI_MTX_PARSER);
+ status = acpi_ut_acquire_mutex(ACPI_MTX_CONTROL_METHOD);
if (ACPI_FAILURE(status)) {
return_VOID;
}
/* Signal completion of the execution of this method if necessary */
- if (walk_state->method_desc->method.semaphore) {
+ if (method_desc->method.semaphore) {
status =
- acpi_os_signal_semaphore(walk_state->method_desc->method.
- semaphore, 1);
+ acpi_os_signal_semaphore(method_desc->method.semaphore, 1);
if (ACPI_FAILURE(status)) {
- ACPI_ERROR((AE_INFO,
- "Could not signal method semaphore"));
- /* Ignore error and continue cleanup */
+ /* Ignore error and continue */
+
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Could not signal method semaphore"));
}
}
- /*
- * There are no more threads executing this method. Perform
- * additional cleanup.
- *
- * The method Node is stored in the walk state
- */
- method_node = walk_state->method_node;
+ if (walk_state) {
+ /*
+ * Delete any objects created by this method during execution.
+ * The method Node is stored in the walk state
+ */
+ method_node = walk_state->method_node;
- /* Lock namespace for possible update */
+ /* Lock namespace for possible update */
- status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
- if (ACPI_FAILURE(status)) {
- goto exit;
- }
+ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ goto exit;
+ }
- /*
- * Delete any namespace entries created immediately underneath
- * the method
- */
- if (method_node->child) {
- acpi_ns_delete_namespace_subtree(method_node);
+ /*
+ * Delete any namespace entries created immediately underneath
+ * the method
+ */
+ if (method_node && method_node->child) {
+ acpi_ns_delete_namespace_subtree(method_node);
+ }
+
+ /*
+ * Delete any namespace entries created anywhere else within
+ * the namespace by the execution of this method
+ */
+ acpi_ns_delete_namespace_by_owner(method_desc->method.owner_id);
+ status = acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
}
- /*
- * Delete any namespace entries created anywhere else within
- * the namespace by the execution of this method
- */
- acpi_ns_delete_namespace_by_owner(walk_state->method_desc->method.
- owner_id);
- status = acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+ /* Decrement the thread count on the method */
+
+ if (method_desc->method.thread_count) {
+ method_desc->method.thread_count--;
+ } else {
+ ACPI_ERROR((AE_INFO, "Invalid zero thread count in method"));
+ }
/* Are there any other threads currently executing this method? */
- if (walk_state->method_desc->method.thread_count) {
+ if (method_desc->method.thread_count) {
/*
* Additional threads. Do not release the owner_id in this case,
* we immediately reuse it for the next thread executing this method
*/
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"*** Completed execution of one thread, %d threads remaining\n",
- walk_state->method_desc->method.
- thread_count));
+ method_desc->method.thread_count));
} else {
/* This is the only executing thread for this method */
@@ -533,22 +587,20 @@ void acpi_ds_terminate_control_method(struct acpi_walk_state *walk_state)
* This code is here because we must wait until the last thread exits
* before creating the synchronization semaphore.
*/
- if ((walk_state->method_desc->method.concurrency == 1) &&
- (!walk_state->method_desc->method.semaphore)) {
+ if ((method_desc->method.concurrency == 1) &&
+ (!method_desc->method.semaphore)) {
status = acpi_os_create_semaphore(1, 1,
- &walk_state->
- method_desc->method.
+ &method_desc->method.
semaphore);
}
/* No more threads, we can free the owner_id */
- acpi_ut_release_owner_id(&walk_state->method_desc->method.
- owner_id);
+ acpi_ut_release_owner_id(&method_desc->method.owner_id);
}
exit:
- (void)acpi_ut_release_mutex(ACPI_MTX_PARSER);
+ (void)acpi_ut_release_mutex(ACPI_MTX_CONTROL_METHOD);
return_VOID;
}
@@ -581,7 +633,7 @@ acpi_status acpi_ds_parse_method(struct acpi_namespace_node *node)
union acpi_parse_object *op;
struct acpi_walk_state *walk_state;
- ACPI_FUNCTION_TRACE_PTR("ds_parse_method", node);
+ ACPI_FUNCTION_TRACE_PTR(ds_parse_method, node);
/* Parameter Validation */
@@ -590,7 +642,7 @@ acpi_status acpi_ds_parse_method(struct acpi_namespace_node *node)
}
ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
- "**** Parsing [%4.4s] **** named_obj=%p\n",
+ "**** Parsing [%4.4s] **** NamedObj=%p\n",
acpi_ut_get_node_name(node), node));
/* Extract the method object from the method Node */
@@ -669,7 +721,7 @@ acpi_status acpi_ds_parse_method(struct acpi_namespace_node *node)
}
ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
- "**** [%4.4s] Parsed **** named_obj=%p Op=%p\n",
+ "**** [%4.4s] Parsed **** NamedObj=%p Op=%p\n",
acpi_ut_get_node_name(node), node, op));
/*
diff --git a/drivers/acpi/dispatcher/dsmthdat.c b/drivers/acpi/dispatcher/dsmthdat.c
index c025674f938b..459160ff9058 100644
--- a/drivers/acpi/dispatcher/dsmthdat.c
+++ b/drivers/acpi/dispatcher/dsmthdat.c
@@ -81,7 +81,7 @@ acpi_ds_method_data_get_type(u16 opcode,
* special data types.
*
* NOTES: walk_state fields are initialized to zero by the
- * ACPI_MEM_CALLOCATE().
+ * ACPI_ALLOCATE_ZEROED().
*
* A pseudo-Namespace Node is assigned to each argument and local
* so that ref_of() can return a pointer to the Node.
@@ -92,7 +92,7 @@ void acpi_ds_method_data_init(struct acpi_walk_state *walk_state)
{
u32 i;
- ACPI_FUNCTION_TRACE("ds_method_data_init");
+ ACPI_FUNCTION_TRACE(ds_method_data_init);
/* Init the method arguments */
@@ -100,10 +100,10 @@ void acpi_ds_method_data_init(struct acpi_walk_state *walk_state)
ACPI_MOVE_32_TO_32(&walk_state->arguments[i].name,
NAMEOF_ARG_NTE);
walk_state->arguments[i].name.integer |= (i << 24);
- walk_state->arguments[i].descriptor = ACPI_DESC_TYPE_NAMED;
+ walk_state->arguments[i].descriptor_type = ACPI_DESC_TYPE_NAMED;
walk_state->arguments[i].type = ACPI_TYPE_ANY;
- walk_state->arguments[i].flags = ANOBJ_END_OF_PEER_LIST |
- ANOBJ_METHOD_ARG;
+ walk_state->arguments[i].flags =
+ ANOBJ_END_OF_PEER_LIST | ANOBJ_METHOD_ARG;
}
/* Init the method locals */
@@ -113,11 +113,11 @@ void acpi_ds_method_data_init(struct acpi_walk_state *walk_state)
NAMEOF_LOCAL_NTE);
walk_state->local_variables[i].name.integer |= (i << 24);
- walk_state->local_variables[i].descriptor =
+ walk_state->local_variables[i].descriptor_type =
ACPI_DESC_TYPE_NAMED;
walk_state->local_variables[i].type = ACPI_TYPE_ANY;
- walk_state->local_variables[i].flags = ANOBJ_END_OF_PEER_LIST |
- ANOBJ_METHOD_LOCAL;
+ walk_state->local_variables[i].flags =
+ ANOBJ_END_OF_PEER_LIST | ANOBJ_METHOD_LOCAL;
}
return_VOID;
@@ -140,7 +140,7 @@ void acpi_ds_method_data_delete_all(struct acpi_walk_state *walk_state)
{
u32 index;
- ACPI_FUNCTION_TRACE("ds_method_data_delete_all");
+ ACPI_FUNCTION_TRACE(ds_method_data_delete_all);
/* Detach the locals */
@@ -199,7 +199,7 @@ acpi_ds_method_data_init_args(union acpi_operand_object **params,
acpi_status status;
u32 index = 0;
- ACPI_FUNCTION_TRACE_PTR("ds_method_data_init_args", params);
+ ACPI_FUNCTION_TRACE_PTR(ds_method_data_init_args, params);
if (!params) {
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
@@ -251,7 +251,7 @@ acpi_ds_method_data_get_node(u16 opcode,
struct acpi_walk_state *walk_state,
struct acpi_namespace_node **node)
{
- ACPI_FUNCTION_TRACE("ds_method_data_get_node");
+ ACPI_FUNCTION_TRACE(ds_method_data_get_node);
/*
* Method Locals and Arguments are supported
@@ -318,10 +318,10 @@ acpi_ds_method_data_set_value(u16 opcode,
acpi_status status;
struct acpi_namespace_node *node;
- ACPI_FUNCTION_TRACE("ds_method_data_set_value");
+ ACPI_FUNCTION_TRACE(ds_method_data_set_value);
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "new_obj %p Opcode %X, Refs=%d [%s]\n", object,
+ "NewObj %p Opcode %X, Refs=%d [%s]\n", object,
opcode, object->common.reference_count,
acpi_ut_get_type_name(object->common.type)));
@@ -336,7 +336,7 @@ acpi_ds_method_data_set_value(u16 opcode,
* Increment ref count so object can't be deleted while installed.
* NOTE: We do not copy the object in order to preserve the call by
* reference semantics of ACPI Control Method invocation.
- * (See ACPI specification 2.0_c)
+ * (See ACPI Specification 2.0_c)
*/
acpi_ut_add_reference(object);
@@ -351,7 +351,7 @@ acpi_ds_method_data_set_value(u16 opcode,
* FUNCTION: acpi_ds_method_data_get_value
*
* PARAMETERS: Opcode - Either AML_LOCAL_OP or AML_ARG_OP
- * Index - which local_var or argument to get
+ * Index - Which local_var or argument to get
* walk_state - Current walk state object
* dest_desc - Where Arg or Local value is returned
*
@@ -372,7 +372,7 @@ acpi_ds_method_data_get_value(u16 opcode,
struct acpi_namespace_node *node;
union acpi_operand_object *object;
- ACPI_FUNCTION_TRACE("ds_method_data_get_value");
+ ACPI_FUNCTION_TRACE(ds_method_data_get_value);
/* Validate the object descriptor */
@@ -459,7 +459,7 @@ acpi_ds_method_data_get_value(u16 opcode,
* FUNCTION: acpi_ds_method_data_delete_value
*
* PARAMETERS: Opcode - Either AML_LOCAL_OP or AML_ARG_OP
- * Index - which local_var or argument to delete
+ * Index - Which local_var or argument to delete
* walk_state - Current walk state object
*
* RETURN: None
@@ -477,7 +477,7 @@ acpi_ds_method_data_delete_value(u16 opcode,
struct acpi_namespace_node *node;
union acpi_operand_object *object;
- ACPI_FUNCTION_TRACE("ds_method_data_delete_value");
+ ACPI_FUNCTION_TRACE(ds_method_data_delete_value);
/* Get the namespace node for the arg/local */
@@ -538,7 +538,7 @@ acpi_ds_store_object_to_local(u16 opcode,
union acpi_operand_object *current_obj_desc;
union acpi_operand_object *new_obj_desc;
- ACPI_FUNCTION_TRACE("ds_store_object_to_local");
+ ACPI_FUNCTION_TRACE(ds_store_object_to_local);
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Opcode=%X Index=%d Obj=%p\n",
opcode, index, obj_desc));
@@ -614,7 +614,7 @@ acpi_ds_store_object_to_local(u16 opcode,
&& (current_obj_desc->reference.opcode ==
AML_REF_OF_OP)) {
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "Arg (%p) is an obj_ref(Node), storing in node %p\n",
+ "Arg (%p) is an ObjRef(Node), storing in node %p\n",
new_obj_desc,
current_obj_desc));
@@ -688,7 +688,7 @@ acpi_ds_method_data_get_type(u16 opcode,
struct acpi_namespace_node *node;
union acpi_operand_object *object;
- ACPI_FUNCTION_TRACE("ds_method_data_get_type");
+ ACPI_FUNCTION_TRACE(ds_method_data_get_type);
/* Get the namespace node for the arg/local */
@@ -701,6 +701,7 @@ acpi_ds_method_data_get_type(u16 opcode,
object = acpi_ns_get_attached_object(node);
if (!object) {
+
/* Uninitialized local/arg, return TYPE_ANY */
return_VALUE(ACPI_TYPE_ANY);
diff --git a/drivers/acpi/dispatcher/dsobject.c b/drivers/acpi/dispatcher/dsobject.c
index 8b21f0f9e517..72190abb1d59 100644
--- a/drivers/acpi/dispatcher/dsobject.c
+++ b/drivers/acpi/dispatcher/dsobject.c
@@ -81,7 +81,7 @@ acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
union acpi_operand_object *obj_desc;
acpi_status status;
- ACPI_FUNCTION_TRACE("ds_build_internal_object");
+ ACPI_FUNCTION_TRACE(ds_build_internal_object);
*obj_desc_ptr = NULL;
if (op->common.aml_opcode == AML_INT_NAMEPATH_OP) {
@@ -103,6 +103,7 @@ acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
common.
node)));
if (ACPI_FAILURE(status)) {
+
/* Check if we are resolving a named reference within a package */
if ((status == AE_NOT_FOUND)
@@ -186,7 +187,7 @@ acpi_ds_build_internal_buffer_obj(struct acpi_walk_state *walk_state,
union acpi_parse_object *byte_list;
u32 byte_list_length = 0;
- ACPI_FUNCTION_TRACE("ds_build_internal_buffer_obj");
+ ACPI_FUNCTION_TRACE(ds_build_internal_buffer_obj);
/*
* If we are evaluating a Named buffer object "Name (xxxx, Buffer)".
@@ -195,6 +196,7 @@ acpi_ds_build_internal_buffer_obj(struct acpi_walk_state *walk_state,
*/
obj_desc = *obj_desc_ptr;
if (!obj_desc) {
+
/* Create a new buffer object */
obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_BUFFER);
@@ -243,7 +245,7 @@ acpi_ds_build_internal_buffer_obj(struct acpi_walk_state *walk_state,
"Buffer defined with zero length in AML, creating\n"));
} else {
obj_desc->buffer.pointer =
- ACPI_MEM_CALLOCATE(obj_desc->buffer.length);
+ ACPI_ALLOCATE_ZEROED(obj_desc->buffer.length);
if (!obj_desc->buffer.pointer) {
acpi_ut_delete_object_desc(obj_desc);
return_ACPI_STATUS(AE_NO_MEMORY);
@@ -291,7 +293,7 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
acpi_status status = AE_OK;
acpi_native_uint i;
- ACPI_FUNCTION_TRACE("ds_build_internal_package_obj");
+ ACPI_FUNCTION_TRACE(ds_build_internal_package_obj);
/* Find the parent of a possibly nested package */
@@ -339,9 +341,10 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
* individual objects). Add an extra pointer slot so
* that the list is always null terminated.
*/
- obj_desc->package.elements = ACPI_MEM_CALLOCATE(((acpi_size) obj_desc->
- package.count +
- 1) * sizeof(void *));
+ obj_desc->package.elements = ACPI_ALLOCATE_ZEROED(((acpi_size)
+ obj_desc->package.
+ count +
+ 1) * sizeof(void *));
if (!obj_desc->package.elements) {
acpi_ut_delete_object_desc(obj_desc);
@@ -355,6 +358,7 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
arg = arg->common.next;
for (i = 0; arg; i++) {
if (arg->common.aml_opcode == AML_INT_RETURN_VALUE_OP) {
+
/* Object (package or buffer) is already built */
obj_desc->package.elements[i] =
@@ -396,7 +400,7 @@ acpi_ds_create_node(struct acpi_walk_state *walk_state,
acpi_status status;
union acpi_operand_object *obj_desc;
- ACPI_FUNCTION_TRACE_PTR("ds_create_node", op);
+ ACPI_FUNCTION_TRACE_PTR(ds_create_node, op);
/*
* Because of the execution pass through the non-control-method
@@ -408,6 +412,7 @@ acpi_ds_create_node(struct acpi_walk_state *walk_state,
}
if (!op->common.value.arg) {
+
/* No arguments, there is nothing to do */
return_ACPI_STATUS(AE_OK);
@@ -464,11 +469,12 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
union acpi_operand_object *obj_desc;
acpi_status status = AE_OK;
- ACPI_FUNCTION_TRACE("ds_init_object_from_op");
+ ACPI_FUNCTION_TRACE(ds_init_object_from_op);
obj_desc = *ret_obj_desc;
op_info = acpi_ps_get_opcode_info(opcode);
if (op_info->class == AML_CLASS_UNKNOWN) {
+
/* Unknown opcode */
return_ACPI_STATUS(AE_TYPE);
@@ -626,6 +632,7 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
default: /* Other literals, etc.. */
if (op->common.aml_opcode == AML_INT_NAMEPATH_OP) {
+
/* Node was saved in Op */
obj_desc->reference.node = op->common.node;
diff --git a/drivers/acpi/dispatcher/dsopcode.c b/drivers/acpi/dispatcher/dsopcode.c
index 6229c10674e1..5b974a8fe614 100644
--- a/drivers/acpi/dispatcher/dsopcode.c
+++ b/drivers/acpi/dispatcher/dsopcode.c
@@ -91,7 +91,7 @@ acpi_ds_execute_arguments(struct acpi_namespace_node *node,
union acpi_parse_object *op;
struct acpi_walk_state *walk_state;
- ACPI_FUNCTION_TRACE("ds_execute_arguments");
+ ACPI_FUNCTION_TRACE(ds_execute_arguments);
/*
* Allocate a new parser op to be the root of the parsed tree
@@ -193,7 +193,7 @@ acpi_ds_get_buffer_field_arguments(union acpi_operand_object *obj_desc)
struct acpi_namespace_node *node;
acpi_status status;
- ACPI_FUNCTION_TRACE_PTR("ds_get_buffer_field_arguments", obj_desc);
+ ACPI_FUNCTION_TRACE_PTR(ds_get_buffer_field_arguments, obj_desc);
if (obj_desc->common.flags & AOPOBJ_DATA_VALID) {
return_ACPI_STATUS(AE_OK);
@@ -206,7 +206,7 @@ acpi_ds_get_buffer_field_arguments(union acpi_operand_object *obj_desc)
ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname
(ACPI_TYPE_BUFFER_FIELD, node, NULL));
- ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "[%4.4s] buffer_field Arg Init\n",
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "[%4.4s] BufferField Arg Init\n",
acpi_ut_get_node_name(node)));
/* Execute the AML code for the term_arg arguments */
@@ -235,7 +235,7 @@ acpi_status acpi_ds_get_buffer_arguments(union acpi_operand_object *obj_desc)
struct acpi_namespace_node *node;
acpi_status status;
- ACPI_FUNCTION_TRACE_PTR("ds_get_buffer_arguments", obj_desc);
+ ACPI_FUNCTION_TRACE_PTR(ds_get_buffer_arguments, obj_desc);
if (obj_desc->common.flags & AOPOBJ_DATA_VALID) {
return_ACPI_STATUS(AE_OK);
@@ -279,7 +279,7 @@ acpi_status acpi_ds_get_package_arguments(union acpi_operand_object *obj_desc)
struct acpi_namespace_node *node;
acpi_status status;
- ACPI_FUNCTION_TRACE_PTR("ds_get_package_arguments", obj_desc);
+ ACPI_FUNCTION_TRACE_PTR(ds_get_package_arguments, obj_desc);
if (obj_desc->common.flags & AOPOBJ_DATA_VALID) {
return_ACPI_STATUS(AE_OK);
@@ -324,7 +324,7 @@ acpi_status acpi_ds_get_region_arguments(union acpi_operand_object *obj_desc)
acpi_status status;
union acpi_operand_object *extra_desc;
- ACPI_FUNCTION_TRACE_PTR("ds_get_region_arguments", obj_desc);
+ ACPI_FUNCTION_TRACE_PTR(ds_get_region_arguments, obj_desc);
if (obj_desc->region.flags & AOPOBJ_DATA_VALID) {
return_ACPI_STATUS(AE_OK);
@@ -342,8 +342,7 @@ acpi_status acpi_ds_get_region_arguments(union acpi_operand_object *obj_desc)
ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname
(ACPI_TYPE_REGION, node, NULL));
- ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "[%4.4s] op_region Arg Init at AML %p\n",
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "[%4.4s] OpRegion Arg Init at AML %p\n",
acpi_ut_get_node_name(node),
extra_desc->extra.aml_start));
@@ -352,6 +351,28 @@ acpi_status acpi_ds_get_region_arguments(union acpi_operand_object *obj_desc)
status = acpi_ds_execute_arguments(node, acpi_ns_get_parent_node(node),
extra_desc->extra.aml_length,
extra_desc->extra.aml_start);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Validate the region address/length via the host OS */
+
+ status = acpi_os_validate_address(obj_desc->region.space_id,
+ obj_desc->region.address,
+ (acpi_size) obj_desc->region.length);
+ if (ACPI_FAILURE(status)) {
+ /*
+ * Invalid address/length. We will emit an error message and mark
+ * the region as invalid, so that it will cause an additional error if
+ * it is ever used. Then return AE_OK.
+ */
+ ACPI_EXCEPTION((AE_INFO, status,
+ "During address validation of OpRegion [%4.4s]",
+ node->name.ascii));
+ obj_desc->common.flags |= AOPOBJ_INVALID;
+ status = AE_OK;
+ }
+
return_ACPI_STATUS(status);
}
@@ -411,7 +432,7 @@ acpi_ds_init_buffer_field(u16 aml_opcode,
u8 field_flags;
acpi_status status;
- ACPI_FUNCTION_TRACE_PTR("ds_init_buffer_field", obj_desc);
+ ACPI_FUNCTION_TRACE_PTR(ds_init_buffer_field, obj_desc);
/* Host object must be a Buffer */
@@ -457,7 +478,7 @@ acpi_ds_init_buffer_field(u16 aml_opcode,
if (bit_count == 0) {
ACPI_ERROR((AE_INFO,
- "Attempt to create_field of length zero"));
+ "Attempt to CreateField of length zero"));
status = AE_AML_OPERAND_VALUE;
goto cleanup;
}
@@ -595,7 +616,7 @@ acpi_ds_eval_buffer_field_operands(struct acpi_walk_state *walk_state,
struct acpi_namespace_node *node;
union acpi_parse_object *next_op;
- ACPI_FUNCTION_TRACE_PTR("ds_eval_buffer_field_operands", op);
+ ACPI_FUNCTION_TRACE_PTR(ds_eval_buffer_field_operands, op);
/*
* This is where we evaluate the address and length fields of the
@@ -627,7 +648,7 @@ acpi_ds_eval_buffer_field_operands(struct acpi_walk_state *walk_state,
ACPI_DUMP_OPERANDS(ACPI_WALK_OPERANDS, ACPI_IMODE_EXECUTE,
acpi_ps_get_opcode_name(op->common.aml_opcode),
walk_state->num_operands,
- "after acpi_ex_resolve_operands");
+ "after AcpiExResolveOperands");
if (ACPI_FAILURE(status)) {
ACPI_ERROR((AE_INFO, "(%s) bad operand(s) (%X)",
@@ -640,6 +661,7 @@ acpi_ds_eval_buffer_field_operands(struct acpi_walk_state *walk_state,
/* Initialize the Buffer Field */
if (op->common.aml_opcode == AML_CREATE_FIELD_OP) {
+
/* NOTE: Slightly different operands for this opcode */
status =
@@ -685,7 +707,7 @@ acpi_ds_eval_region_operands(struct acpi_walk_state *walk_state,
struct acpi_namespace_node *node;
union acpi_parse_object *next_op;
- ACPI_FUNCTION_TRACE_PTR("ds_eval_region_operands", op);
+ ACPI_FUNCTION_TRACE_PTR(ds_eval_region_operands, op);
/*
* This is where we evaluate the address and length fields of the
@@ -718,7 +740,7 @@ acpi_ds_eval_region_operands(struct acpi_walk_state *walk_state,
ACPI_DUMP_OPERANDS(ACPI_WALK_OPERANDS, ACPI_IMODE_EXECUTE,
acpi_ps_get_opcode_name(op->common.aml_opcode),
- 1, "after acpi_ex_resolve_operands");
+ 1, "after AcpiExResolveOperands");
obj_desc = acpi_ns_get_attached_object(node);
if (!obj_desc) {
@@ -744,7 +766,7 @@ acpi_ds_eval_region_operands(struct acpi_walk_state *walk_state,
operand_desc->integer.value;
acpi_ut_remove_reference(operand_desc);
- ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "rgn_obj %p Addr %8.8X%8.8X Len %X\n",
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "RgnObj %p Addr %8.8X%8.8X Len %X\n",
obj_desc,
ACPI_FORMAT_UINT64(obj_desc->region.address),
obj_desc->region.length));
@@ -780,7 +802,7 @@ acpi_ds_eval_data_object_operands(struct acpi_walk_state *walk_state,
union acpi_operand_object *arg_desc;
u32 length;
- ACPI_FUNCTION_TRACE("ds_eval_data_object_operands");
+ ACPI_FUNCTION_TRACE(ds_eval_data_object_operands);
/* The first operand (for all of these data objects) is the length */
@@ -874,7 +896,7 @@ acpi_ds_exec_begin_control_op(struct acpi_walk_state *walk_state,
acpi_status status = AE_OK;
union acpi_generic_state *control_state;
- ACPI_FUNCTION_NAME("ds_exec_begin_control_op");
+ ACPI_FUNCTION_NAME(ds_exec_begin_control_op);
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Op=%p Opcode=%2.2X State=%p\n", op,
op->common.aml_opcode, walk_state));
@@ -952,7 +974,7 @@ acpi_ds_exec_end_control_op(struct acpi_walk_state * walk_state,
acpi_status status = AE_OK;
union acpi_generic_state *control_state;
- ACPI_FUNCTION_NAME("ds_exec_end_control_op");
+ ACPI_FUNCTION_NAME(ds_exec_end_control_op);
switch (op->common.aml_opcode) {
case AML_IF_OP:
@@ -984,6 +1006,7 @@ acpi_ds_exec_end_control_op(struct acpi_walk_state * walk_state,
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "[WHILE_OP] Op=%p\n", op));
if (walk_state->control_state->common.value) {
+
/* Predicate was true, go back and evaluate it again! */
status = AE_CTRL_PENDING;
@@ -1014,6 +1037,7 @@ acpi_ds_exec_end_control_op(struct acpi_walk_state * walk_state,
* has been bubbled up the tree
*/
if (op->common.value.arg) {
+
/* Since we have a real Return(), delete any implicit return */
acpi_ds_clear_implicit_return(walk_state);
@@ -1047,6 +1071,7 @@ acpi_ds_exec_end_control_op(struct acpi_walk_state * walk_state,
walk_state->return_desc = walk_state->operands[0];
} else if ((walk_state->results) &&
(walk_state->results->results.num_results > 0)) {
+
/* Since we have a real Return(), delete any implicit return */
acpi_ds_clear_implicit_return(walk_state);
@@ -1095,7 +1120,7 @@ acpi_ds_exec_end_control_op(struct acpi_walk_state * walk_state,
}
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
- "Completed RETURN_OP State=%p, ret_val=%p\n",
+ "Completed RETURN_OP State=%p, RetVal=%p\n",
walk_state, walk_state->return_desc));
/* End the control method execution right now */
diff --git a/drivers/acpi/dispatcher/dsutils.c b/drivers/acpi/dispatcher/dsutils.c
index 53356a591ac1..05230baf5de8 100644
--- a/drivers/acpi/dispatcher/dsutils.c
+++ b/drivers/acpi/dispatcher/dsutils.c
@@ -68,7 +68,7 @@ ACPI_MODULE_NAME("dsutils")
******************************************************************************/
void acpi_ds_clear_implicit_return(struct acpi_walk_state *walk_state)
{
- ACPI_FUNCTION_NAME("ds_clear_implicit_return");
+ ACPI_FUNCTION_NAME(ds_clear_implicit_return);
/*
* Slack must be enabled for this feature
@@ -115,7 +115,7 @@ u8
acpi_ds_do_implicit_return(union acpi_operand_object *return_desc,
struct acpi_walk_state *walk_state, u8 add_reference)
{
- ACPI_FUNCTION_NAME("ds_do_implicit_return");
+ ACPI_FUNCTION_NAME(ds_do_implicit_return);
/*
* Slack must be enabled for this feature, and we must
@@ -171,7 +171,7 @@ acpi_ds_is_result_used(union acpi_parse_object * op,
{
const struct acpi_opcode_info *parent_info;
- ACPI_FUNCTION_TRACE_PTR("ds_is_result_used", op);
+ ACPI_FUNCTION_TRACE_PTR(ds_is_result_used, op);
/* Must have both an Op and a Result Object */
@@ -202,6 +202,7 @@ acpi_ds_is_result_used(union acpi_parse_object * op,
*/
if ((!op->common.parent) ||
(op->common.parent->common.aml_opcode == AML_SCOPE_OP)) {
+
/* No parent, the return value cannot possibly be used */
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
@@ -340,7 +341,7 @@ acpi_ds_delete_result_if_not_used(union acpi_parse_object *op,
union acpi_operand_object *obj_desc;
acpi_status status;
- ACPI_FUNCTION_TRACE_PTR("ds_delete_result_if_not_used", result_obj);
+ ACPI_FUNCTION_TRACE_PTR(ds_delete_result_if_not_used, result_obj);
if (!op) {
ACPI_ERROR((AE_INFO, "Null Op"));
@@ -352,6 +353,7 @@ acpi_ds_delete_result_if_not_used(union acpi_parse_object *op,
}
if (!acpi_ds_is_result_used(op, walk_state)) {
+
/* Must pop the result stack (obj_desc should be equal to result_obj) */
status = acpi_ds_result_pop(&obj_desc, walk_state);
@@ -382,7 +384,7 @@ acpi_status acpi_ds_resolve_operands(struct acpi_walk_state *walk_state)
u32 i;
acpi_status status = AE_OK;
- ACPI_FUNCTION_TRACE_PTR("ds_resolve_operands", walk_state);
+ ACPI_FUNCTION_TRACE_PTR(ds_resolve_operands, walk_state);
/*
* Attempt to resolve each of the valid operands
@@ -417,7 +419,7 @@ void acpi_ds_clear_operands(struct acpi_walk_state *walk_state)
{
u32 i;
- ACPI_FUNCTION_TRACE_PTR("ds_clear_operands", walk_state);
+ ACPI_FUNCTION_TRACE_PTR(ds_clear_operands, walk_state);
/* Remove a reference on each operand on the stack */
@@ -465,7 +467,7 @@ acpi_ds_create_operand(struct acpi_walk_state *walk_state,
acpi_interpreter_mode interpreter_mode;
const struct acpi_opcode_info *op_info;
- ACPI_FUNCTION_TRACE_PTR("ds_create_operand", arg);
+ ACPI_FUNCTION_TRACE_PTR(ds_create_operand, arg);
/* A valid name must be looked up in the namespace */
@@ -498,7 +500,9 @@ acpi_ds_create_operand(struct acpi_walk_state *walk_state,
*/
if ((walk_state->deferred_node) &&
(walk_state->deferred_node->type == ACPI_TYPE_BUFFER_FIELD)
- && (arg_index != 0)) {
+ && (arg_index ==
+ (u32) ((walk_state->opcode ==
+ AML_CREATE_FIELD_OP) ? 3 : 2))) {
obj_desc =
ACPI_CAST_PTR(union acpi_operand_object,
walk_state->deferred_node);
@@ -521,6 +525,7 @@ acpi_ds_create_operand(struct acpi_walk_state *walk_state,
&& (parent_op->common.aml_opcode != AML_REGION_OP)
&& (parent_op->common.aml_opcode !=
AML_INT_NAMEPATH_OP)) {
+
/* Enter name into namespace if not found */
interpreter_mode = ACPI_IMODE_LOAD_PASS2;
@@ -572,7 +577,7 @@ acpi_ds_create_operand(struct acpi_walk_state *walk_state,
/* Free the namestring created above */
- ACPI_MEM_FREE(name_string);
+ ACPI_FREE(name_string);
/* Check status from the lookup */
@@ -696,7 +701,7 @@ acpi_ds_create_operands(struct acpi_walk_state *walk_state,
union acpi_parse_object *arg;
u32 arg_count = 0;
- ACPI_FUNCTION_TRACE_PTR("ds_create_operands", first_arg);
+ ACPI_FUNCTION_TRACE_PTR(ds_create_operands, first_arg);
/* For all arguments in the list... */
diff --git a/drivers/acpi/dispatcher/dswexec.c b/drivers/acpi/dispatcher/dswexec.c
index f1af655ff113..3acbd9145d72 100644
--- a/drivers/acpi/dispatcher/dswexec.c
+++ b/drivers/acpi/dispatcher/dswexec.c
@@ -49,7 +49,6 @@
#include <acpi/acinterp.h>
#include <acpi/acnamesp.h>
#include <acpi/acdebug.h>
-#include <acpi/acdisasm.h>
#define _COMPONENT ACPI_DISPATCHER
ACPI_MODULE_NAME("dswexec")
@@ -93,7 +92,7 @@ acpi_ds_get_predicate_value(struct acpi_walk_state *walk_state,
union acpi_operand_object *obj_desc;
union acpi_operand_object *local_obj_desc = NULL;
- ACPI_FUNCTION_TRACE_PTR("ds_get_predicate_value", walk_state);
+ ACPI_FUNCTION_TRACE_PTR(ds_get_predicate_value, walk_state);
walk_state->control_state->common.state = 0;
@@ -123,7 +122,7 @@ acpi_ds_get_predicate_value(struct acpi_walk_state *walk_state,
if (!obj_desc) {
ACPI_ERROR((AE_INFO,
- "No predicate obj_desc=%p State=%p",
+ "No predicate ObjDesc=%p State=%p",
obj_desc, walk_state));
return_ACPI_STATUS(AE_AML_NO_OPERAND);
@@ -140,7 +139,7 @@ acpi_ds_get_predicate_value(struct acpi_walk_state *walk_state,
if (ACPI_GET_OBJECT_TYPE(local_obj_desc) != ACPI_TYPE_INTEGER) {
ACPI_ERROR((AE_INFO,
- "Bad predicate (not an integer) obj_desc=%p State=%p Type=%X",
+ "Bad predicate (not an integer) ObjDesc=%p State=%p Type=%X",
obj_desc, walk_state,
ACPI_GET_OBJECT_TYPE(obj_desc)));
@@ -214,7 +213,7 @@ acpi_ds_exec_begin_op(struct acpi_walk_state *walk_state,
acpi_status status = AE_OK;
u32 opcode_class;
- ACPI_FUNCTION_TRACE_PTR("ds_exec_begin_op", walk_state);
+ ACPI_FUNCTION_TRACE_PTR(ds_exec_begin_op, walk_state);
op = walk_state->op;
if (!op) {
@@ -296,7 +295,7 @@ acpi_ds_exec_begin_op(struct acpi_walk_state *walk_state,
case AML_CLASS_NAMED_OBJECT:
- if (walk_state->walk_type == ACPI_WALK_METHOD) {
+ if (walk_state->walk_type & ACPI_WALK_METHOD) {
/*
* Found a named object declaration during method execution;
* we must enter this object into the namespace. The created
@@ -354,7 +353,7 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
union acpi_parse_object *next_op;
union acpi_parse_object *first_arg;
- ACPI_FUNCTION_TRACE_PTR("ds_exec_end_op", walk_state);
+ ACPI_FUNCTION_TRACE_PTR(ds_exec_end_op, walk_state);
op = walk_state->op;
op_type = walk_state->op_info->type;
@@ -409,6 +408,7 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
* being the object_type and size_of operators.
*/
if (!(walk_state->op_info->flags & AML_NO_OPERAND_RESOLVE)) {
+
/* Resolve all operands */
status = acpi_ex_resolve_operands(walk_state->opcode,
@@ -423,7 +423,7 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
acpi_ps_get_opcode_name
(walk_state->opcode),
walk_state->num_operands,
- "after ex_resolve_operands");
+ "after ExResolveOperands");
}
}
@@ -437,7 +437,7 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
acpi_gbl_op_type_dispatch[op_type] (walk_state);
} else {
/*
- * Treat constructs of the form "Store(local_x,local_x)" as noops when the
+ * Treat constructs of the form "Store(LocalX,LocalX)" as noops when the
* Local is uninitialized.
*/
if ((status == AE_AML_UNINITIALIZED_LOCAL) &&
@@ -548,6 +548,7 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
*/
status = acpi_ds_resolve_operands(walk_state);
if (ACPI_FAILURE(status)) {
+
/* On error, clear all resolved operands */
acpi_ds_clear_operands(walk_state);
@@ -569,7 +570,7 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
case AML_TYPE_CREATE_FIELD:
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "Executing create_field Buffer/Index Op=%p\n",
+ "Executing CreateField Buffer/Index Op=%p\n",
op));
status = acpi_ds_load2_end_op(walk_state);
@@ -584,7 +585,7 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
case AML_TYPE_CREATE_OBJECT:
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "Executing create_object (Buffer/Package) Op=%p\n",
+ "Executing CreateObject (Buffer/Package) Op=%p\n",
op));
switch (op->common.parent->common.aml_opcode) {
@@ -657,7 +658,7 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
if (op->common.aml_opcode == AML_REGION_OP) {
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "Executing op_region Address/Length Op=%p\n",
+ "Executing OpRegion Address/Length Op=%p\n",
op));
status =
@@ -722,6 +723,7 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
cleanup:
if (walk_state->result_obj) {
+
/* Break to debugger to display result */
ACPI_DEBUGGER_EXEC(acpi_db_display_result_object
diff --git a/drivers/acpi/dispatcher/dswload.c b/drivers/acpi/dispatcher/dswload.c
index d3d24da31c81..35074399c617 100644
--- a/drivers/acpi/dispatcher/dswload.c
+++ b/drivers/acpi/dispatcher/dswload.c
@@ -127,7 +127,7 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,
char *path;
u32 flags;
- ACPI_FUNCTION_TRACE("ds_load1_begin_op");
+ ACPI_FUNCTION_TRACE(ds_load1_begin_op);
op = walk_state->op;
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Op=%p State=%p\n", op,
@@ -178,12 +178,12 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,
* Target of Scope() not found. Generate an External for it, and
* insert the name into the namespace.
*/
- acpi_dm_add_to_external_list(path);
+ acpi_dm_add_to_external_list(path, ACPI_TYPE_DEVICE, 0);
status =
acpi_ns_lookup(walk_state->scope_info, path,
object_type, ACPI_IMODE_LOAD_PASS1,
ACPI_NS_SEARCH_PARENT, walk_state,
- &(node));
+ &node);
}
#endif
if (ACPI_FAILURE(status)) {
@@ -261,6 +261,7 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,
*/
if (walk_state->deferred_node) {
+
/* This name is already in the namespace, get the node */
node = walk_state->deferred_node;
@@ -300,10 +301,41 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,
status =
acpi_ns_lookup(walk_state->scope_info, path, object_type,
ACPI_IMODE_LOAD_PASS1, flags, walk_state,
- &(node));
+ &node);
if (ACPI_FAILURE(status)) {
- ACPI_ERROR_NAMESPACE(path, status);
- return_ACPI_STATUS(status);
+ if (status == AE_ALREADY_EXISTS) {
+
+ /* The name already exists in this scope */
+
+ if (node->flags & ANOBJ_IS_EXTERNAL) {
+ /*
+ * Allow one create on an object or segment that was
+ * previously declared External
+ */
+ node->flags &= ~ANOBJ_IS_EXTERNAL;
+ node->type = (u8) object_type;
+
+ /* Just retyped a node, probably will need to open a scope */
+
+ if (acpi_ns_opens_scope(object_type)) {
+ status =
+ acpi_ds_scope_stack_push
+ (node, object_type,
+ walk_state);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS
+ (status);
+ }
+ }
+ status = AE_OK;
+ }
+ }
+
+ if (ACPI_FAILURE(status)) {
+
+ ACPI_ERROR_NAMESPACE(path, status);
+ return_ACPI_STATUS(status);
+ }
}
break;
}
@@ -311,6 +343,7 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,
/* Common exit */
if (!op) {
+
/* Create a new op */
op = acpi_ps_alloc_op(walk_state->opcode);
@@ -359,7 +392,7 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state *walk_state)
acpi_object_type object_type;
acpi_status status = AE_OK;
- ACPI_FUNCTION_TRACE("ds_load1_end_op");
+ ACPI_FUNCTION_TRACE(ds_load1_end_op);
op = walk_state->op;
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Op=%p State=%p\n", op,
@@ -413,6 +446,7 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state *walk_state)
#endif
if (op->common.aml_opcode == AML_NAME_OP) {
+
/* For Name opcode, get the object type from the argument */
if (op->common.value.arg) {
@@ -445,7 +479,7 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state *walk_state)
* arguments.)
*/
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
- "LOADING-Method: State=%p Op=%p named_obj=%p\n",
+ "LOADING-Method: State=%p Op=%p NamedObj=%p\n",
walk_state, op, op->named.node));
if (!acpi_ns_get_attached_object(op->named.node)) {
@@ -511,7 +545,7 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
acpi_object_type object_type;
char *buffer_ptr;
- ACPI_FUNCTION_TRACE("ds_load2_begin_op");
+ ACPI_FUNCTION_TRACE(ds_load2_begin_op);
op = walk_state->op;
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Op=%p State=%p\n", op,
@@ -521,6 +555,7 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
if ((walk_state->control_state) &&
(walk_state->control_state->common.state ==
ACPI_CONTROL_CONDITIONAL_EXECUTING)) {
+
/* We are executing a while loop outside of a method */
status = acpi_ds_exec_begin_op(walk_state, out_op);
@@ -554,10 +589,12 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
/* Get the name we are going to enter or lookup in the namespace */
if (walk_state->opcode == AML_INT_NAMEPATH_OP) {
+
/* For Namepath op, get the path string */
buffer_ptr = op->common.value.string;
if (!buffer_ptr) {
+
/* No name, just exit */
return_ACPI_STATUS(AE_OK);
@@ -680,6 +717,7 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
/* All other opcodes */
if (op && op->common.node) {
+
/* This op/node was previously entered into the namespace */
node = op->common.node;
@@ -705,6 +743,7 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
* Note: Name may already exist if we are executing a deferred opcode.
*/
if (walk_state->deferred_node) {
+
/* This name is already in the namespace, get the node */
node = walk_state->deferred_node;
@@ -727,6 +766,7 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
}
if (!op) {
+
/* Create a new op */
op = acpi_ps_alloc_op(walk_state->opcode);
@@ -776,7 +816,7 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
u32 i;
#endif
- ACPI_FUNCTION_TRACE("ds_load2_end_op");
+ ACPI_FUNCTION_TRACE(ds_load2_end_op);
op = walk_state->op;
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Opcode [%s] Op %p State %p\n",
@@ -870,7 +910,7 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
*/
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
- "Create-Load [%s] State=%p Op=%p named_obj=%p\n",
+ "Create-Load [%s] State=%p Op=%p NamedObj=%p\n",
acpi_ps_get_opcode_name(op->common.aml_opcode),
walk_state, op, node));
@@ -1045,7 +1085,7 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
* arguments.)
*/
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
- "LOADING-Method: State=%p Op=%p named_obj=%p\n",
+ "LOADING-Method: State=%p Op=%p NamedObj=%p\n",
walk_state, op, op->named.node));
if (!acpi_ns_get_attached_object(op->named.node)) {
@@ -1090,7 +1130,7 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
case AML_CLASS_METHOD_CALL:
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
- "RESOLVING-method_call: State=%p Op=%p named_obj=%p\n",
+ "RESOLVING-MethodCall: State=%p Op=%p NamedObj=%p\n",
walk_state, op, node));
/*
@@ -1104,7 +1144,6 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
ACPI_NS_DONT_OPEN_SCOPE, walk_state,
&(new_node));
if (ACPI_SUCCESS(status)) {
-
/*
* Make sure that what we found is indeed a method
* We didn't search for a method on purpose, to see if the name
diff --git a/drivers/acpi/dispatcher/dswscope.c b/drivers/acpi/dispatcher/dswscope.c
index ada21ef4a174..c9228972f5f6 100644
--- a/drivers/acpi/dispatcher/dswscope.c
+++ b/drivers/acpi/dispatcher/dswscope.c
@@ -63,9 +63,10 @@ void acpi_ds_scope_stack_clear(struct acpi_walk_state *walk_state)
{
union acpi_generic_state *scope_info;
- ACPI_FUNCTION_NAME("ds_scope_stack_clear");
+ ACPI_FUNCTION_NAME(ds_scope_stack_clear);
while (walk_state->scope_info) {
+
/* Pop a scope off the stack */
scope_info = walk_state->scope_info;
@@ -102,9 +103,10 @@ acpi_ds_scope_stack_push(struct acpi_namespace_node *node,
union acpi_generic_state *scope_info;
union acpi_generic_state *old_scope_info;
- ACPI_FUNCTION_TRACE("ds_scope_stack_push");
+ ACPI_FUNCTION_TRACE(ds_scope_stack_push);
if (!node) {
+
/* Invalid scope */
ACPI_ERROR((AE_INFO, "Null scope parameter"));
@@ -126,7 +128,7 @@ acpi_ds_scope_stack_push(struct acpi_namespace_node *node,
/* Init new scope object */
- scope_info->common.data_type = ACPI_DESC_TYPE_STATE_WSCOPE;
+ scope_info->common.descriptor_type = ACPI_DESC_TYPE_STATE_WSCOPE;
scope_info->scope.node = node;
scope_info->common.value = (u16) type;
@@ -176,7 +178,7 @@ acpi_status acpi_ds_scope_stack_pop(struct acpi_walk_state *walk_state)
union acpi_generic_state *scope_info;
union acpi_generic_state *new_scope_info;
- ACPI_FUNCTION_TRACE("ds_scope_stack_pop");
+ ACPI_FUNCTION_TRACE(ds_scope_stack_pop);
/*
* Pop scope info object off the stack.
diff --git a/drivers/acpi/dispatcher/dswstate.c b/drivers/acpi/dispatcher/dswstate.c
index fa78cb74ee36..7817e5522679 100644
--- a/drivers/acpi/dispatcher/dswstate.c
+++ b/drivers/acpi/dispatcher/dswstate.c
@@ -66,7 +66,6 @@ void *acpi_ds_obj_stack_get_value(u32 index,
#endif
#ifdef ACPI_FUTURE_USAGE
-
/*******************************************************************************
*
* FUNCTION: acpi_ds_result_remove
@@ -88,7 +87,7 @@ acpi_ds_result_remove(union acpi_operand_object **object,
{
union acpi_generic_state *state;
- ACPI_FUNCTION_NAME("ds_result_remove");
+ ACPI_FUNCTION_NAME(ds_result_remove);
state = walk_state->results;
if (!state) {
@@ -128,7 +127,6 @@ acpi_ds_result_remove(union acpi_operand_object **object,
return (AE_OK);
}
-
#endif /* ACPI_FUTURE_USAGE */
/*******************************************************************************
@@ -152,7 +150,7 @@ acpi_ds_result_pop(union acpi_operand_object ** object,
acpi_native_uint index;
union acpi_generic_state *state;
- ACPI_FUNCTION_NAME("ds_result_pop");
+ ACPI_FUNCTION_NAME(ds_result_pop);
state = walk_state->results;
if (!state) {
@@ -170,6 +168,7 @@ acpi_ds_result_pop(union acpi_operand_object ** object,
state->results.num_results--;
for (index = ACPI_OBJ_NUM_OPERANDS; index; index--) {
+
/* Check for a valid result object */
if (state->results.obj_desc[index - 1]) {
@@ -213,7 +212,7 @@ acpi_ds_result_pop_from_bottom(union acpi_operand_object ** object,
acpi_native_uint index;
union acpi_generic_state *state;
- ACPI_FUNCTION_NAME("ds_result_pop_from_bottom");
+ ACPI_FUNCTION_NAME(ds_result_pop_from_bottom);
state = walk_state->results;
if (!state) {
@@ -278,7 +277,7 @@ acpi_ds_result_push(union acpi_operand_object * object,
{
union acpi_generic_state *state;
- ACPI_FUNCTION_NAME("ds_result_push");
+ ACPI_FUNCTION_NAME(ds_result_push);
state = walk_state->results;
if (!state) {
@@ -331,14 +330,14 @@ acpi_status acpi_ds_result_stack_push(struct acpi_walk_state * walk_state)
{
union acpi_generic_state *state;
- ACPI_FUNCTION_NAME("ds_result_stack_push");
+ ACPI_FUNCTION_NAME(ds_result_stack_push);
state = acpi_ut_create_generic_state();
if (!state) {
return (AE_NO_MEMORY);
}
- state->common.data_type = ACPI_DESC_TYPE_STATE_RESULT;
+ state->common.descriptor_type = ACPI_DESC_TYPE_STATE_RESULT;
acpi_ut_push_generic_state(&walk_state->results, state);
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Results=%p State=%p\n",
@@ -363,7 +362,7 @@ acpi_status acpi_ds_result_stack_pop(struct acpi_walk_state * walk_state)
{
union acpi_generic_state *state;
- ACPI_FUNCTION_NAME("ds_result_stack_pop");
+ ACPI_FUNCTION_NAME(ds_result_stack_pop);
/* Check for stack underflow */
@@ -376,7 +375,7 @@ acpi_status acpi_ds_result_stack_pop(struct acpi_walk_state * walk_state)
state = acpi_ut_pop_generic_state(&walk_state->results);
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "Result=%p remaining_results=%X State=%p\n",
+ "Result=%p RemainingResults=%X State=%p\n",
state, state->results.num_results, walk_state));
acpi_ut_delete_generic_state(state);
@@ -400,7 +399,7 @@ acpi_status acpi_ds_result_stack_pop(struct acpi_walk_state * walk_state)
acpi_status
acpi_ds_obj_stack_push(void *object, struct acpi_walk_state * walk_state)
{
- ACPI_FUNCTION_NAME("ds_obj_stack_push");
+ ACPI_FUNCTION_NAME(ds_obj_stack_push);
/* Check for stack overflow */
@@ -445,9 +444,10 @@ acpi_ds_obj_stack_pop(u32 pop_count, struct acpi_walk_state * walk_state)
{
u32 i;
- ACPI_FUNCTION_NAME("ds_obj_stack_pop");
+ ACPI_FUNCTION_NAME(ds_obj_stack_pop);
for (i = 0; i < pop_count; i++) {
+
/* Check for stack underflow */
if (walk_state->num_operands == 0) {
@@ -491,9 +491,10 @@ acpi_ds_obj_stack_pop_and_delete(u32 pop_count,
u32 i;
union acpi_operand_object *obj_desc;
- ACPI_FUNCTION_NAME("ds_obj_stack_pop_and_delete");
+ ACPI_FUNCTION_NAME(ds_obj_stack_pop_and_delete);
for (i = 0; i < pop_count; i++) {
+
/* Check for stack underflow */
if (walk_state->num_operands == 0) {
@@ -538,13 +539,13 @@ acpi_ds_obj_stack_pop_and_delete(u32 pop_count,
struct acpi_walk_state *acpi_ds_get_current_walk_state(struct acpi_thread_state
*thread)
{
- ACPI_FUNCTION_NAME("ds_get_current_walk_state");
+ ACPI_FUNCTION_NAME(ds_get_current_walk_state);
if (!thread) {
return (NULL);
}
- ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "Current walk_state %p\n",
+ ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "Current WalkState %p\n",
thread->walk_state_list));
return (thread->walk_state_list);
@@ -567,7 +568,7 @@ void
acpi_ds_push_walk_state(struct acpi_walk_state *walk_state,
struct acpi_thread_state *thread)
{
- ACPI_FUNCTION_TRACE("ds_push_walk_state");
+ ACPI_FUNCTION_TRACE(ds_push_walk_state);
walk_state->next = thread->walk_state_list;
thread->walk_state_list = walk_state;
@@ -593,11 +594,12 @@ struct acpi_walk_state *acpi_ds_pop_walk_state(struct acpi_thread_state *thread)
{
struct acpi_walk_state *walk_state;
- ACPI_FUNCTION_TRACE("ds_pop_walk_state");
+ ACPI_FUNCTION_TRACE(ds_pop_walk_state);
walk_state = thread->walk_state_list;
if (walk_state) {
+
/* Next walk state becomes the current walk state */
thread->walk_state_list = walk_state->next;
@@ -618,7 +620,7 @@ struct acpi_walk_state *acpi_ds_pop_walk_state(struct acpi_thread_state *thread)
*
* PARAMETERS: owner_id - ID for object creation
* Origin - Starting point for this walk
- * mth_desc - Method object
+ * method_desc - Method object
* Thread - Current thread state
*
* RETURN: Pointer to the new walk state.
@@ -632,24 +634,24 @@ struct acpi_walk_state *acpi_ds_create_walk_state(acpi_owner_id owner_id,
union acpi_parse_object
*origin,
union acpi_operand_object
- *mth_desc,
+ *method_desc,
struct acpi_thread_state
*thread)
{
struct acpi_walk_state *walk_state;
acpi_status status;
- ACPI_FUNCTION_TRACE("ds_create_walk_state");
+ ACPI_FUNCTION_TRACE(ds_create_walk_state);
- walk_state = ACPI_MEM_CALLOCATE(sizeof(struct acpi_walk_state));
+ walk_state = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_walk_state));
if (!walk_state) {
return_PTR(NULL);
}
- walk_state->data_type = ACPI_DESC_TYPE_WALK;
+ walk_state->descriptor_type = ACPI_DESC_TYPE_WALK;
+ walk_state->method_desc = method_desc;
walk_state->owner_id = owner_id;
walk_state->origin = origin;
- walk_state->method_desc = mth_desc;
walk_state->thread = thread;
walk_state->parser_state.start_op = origin;
@@ -664,7 +666,7 @@ struct acpi_walk_state *acpi_ds_create_walk_state(acpi_owner_id owner_id,
status = acpi_ds_result_stack_push(walk_state);
if (ACPI_FAILURE(status)) {
- ACPI_MEM_FREE(walk_state);
+ ACPI_FREE(walk_state);
return_PTR(NULL);
}
@@ -701,13 +703,13 @@ acpi_ds_init_aml_walk(struct acpi_walk_state *walk_state,
struct acpi_namespace_node *method_node,
u8 * aml_start,
u32 aml_length,
- struct acpi_parameter_info *info, u8 pass_number)
+ struct acpi_evaluate_info *info, u8 pass_number)
{
acpi_status status;
struct acpi_parse_state *parser_state = &walk_state->parser_state;
union acpi_parse_object *extra_op;
- ACPI_FUNCTION_TRACE("ds_init_aml_walk");
+ ACPI_FUNCTION_TRACE(ds_init_aml_walk);
walk_state->parser_state.aml =
walk_state->parser_state.aml_start = aml_start;
@@ -778,6 +780,7 @@ acpi_ds_init_aml_walk(struct acpi_walk_state *walk_state,
}
if (parser_state->start_node) {
+
/* Push start scope on scope stack and make it current */
status =
@@ -810,21 +813,24 @@ void acpi_ds_delete_walk_state(struct acpi_walk_state *walk_state)
{
union acpi_generic_state *state;
- ACPI_FUNCTION_TRACE_PTR("ds_delete_walk_state", walk_state);
+ ACPI_FUNCTION_TRACE_PTR(ds_delete_walk_state, walk_state);
if (!walk_state) {
return;
}
- if (walk_state->data_type != ACPI_DESC_TYPE_WALK) {
+ if (walk_state->descriptor_type != ACPI_DESC_TYPE_WALK) {
ACPI_ERROR((AE_INFO, "%p is not a valid walk state",
walk_state));
return;
}
+ /* There should not be any open scopes */
+
if (walk_state->parser_state.scope) {
ACPI_ERROR((AE_INFO, "%p walk still has a scope list",
walk_state));
+ acpi_ps_cleanup_scope(&walk_state->parser_state);
}
/* Always must free any linked control states */
@@ -854,7 +860,7 @@ void acpi_ds_delete_walk_state(struct acpi_walk_state *walk_state)
acpi_ut_delete_generic_state(state);
}
- ACPI_MEM_FREE(walk_state);
+ ACPI_FREE(walk_state);
return_VOID;
}
@@ -879,7 +885,7 @@ acpi_ds_result_insert(void *object,
{
union acpi_generic_state *state;
- ACPI_FUNCTION_NAME("ds_result_insert");
+ ACPI_FUNCTION_NAME(ds_result_insert);
state = walk_state->results;
if (!state) {
@@ -937,7 +943,7 @@ acpi_status acpi_ds_obj_stack_delete_all(struct acpi_walk_state * walk_state)
{
u32 i;
- ACPI_FUNCTION_TRACE_PTR("ds_obj_stack_delete_all", walk_state);
+ ACPI_FUNCTION_TRACE_PTR(ds_obj_stack_delete_all, walk_state);
/* The stack size is configurable, but fixed */
@@ -969,7 +975,7 @@ acpi_status
acpi_ds_obj_stack_pop_object(union acpi_operand_object **object,
struct acpi_walk_state *walk_state)
{
- ACPI_FUNCTION_NAME("ds_obj_stack_pop_object");
+ ACPI_FUNCTION_NAME(ds_obj_stack_pop_object);
/* Check for stack underflow */
@@ -1025,7 +1031,7 @@ acpi_ds_obj_stack_pop_object(union acpi_operand_object **object,
void *acpi_ds_obj_stack_get_value(u32 index, struct acpi_walk_state *walk_state)
{
- ACPI_FUNCTION_TRACE_PTR("ds_obj_stack_get_value", walk_state);
+ ACPI_FUNCTION_TRACE_PTR(ds_obj_stack_get_value, walk_state);
/* Can't do it if the stack is empty */
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index eee0864ba300..18b3ea9dace2 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -116,7 +116,7 @@ union acpi_ec {
struct acpi_generic_address command_addr;
struct acpi_generic_address data_addr;
unsigned long global_lock;
- spinlock_t lock;
+ struct semaphore sem;
} poll;
};
@@ -323,7 +323,6 @@ static int acpi_ec_poll_read(union acpi_ec *ec, u8 address, u32 * data)
{
acpi_status status = AE_OK;
int result = 0;
- unsigned long flags = 0;
u32 glk = 0;
ACPI_FUNCTION_TRACE("acpi_ec_read");
@@ -339,8 +338,11 @@ static int acpi_ec_poll_read(union acpi_ec *ec, u8 address, u32 * data)
return_VALUE(-ENODEV);
}
- spin_lock_irqsave(&ec->poll.lock, flags);
-
+ if (down_interruptible(&ec->poll.sem)) {
+ result = -ERESTARTSYS;
+ goto end_nosem;
+ }
+
acpi_hw_low_level_write(8, ACPI_EC_COMMAND_READ,
&ec->common.command_addr);
result = acpi_ec_wait(ec, ACPI_EC_EVENT_IBE);
@@ -358,8 +360,8 @@ static int acpi_ec_poll_read(union acpi_ec *ec, u8 address, u32 * data)
*data, address));
end:
- spin_unlock_irqrestore(&ec->poll.lock, flags);
-
+ up(&ec->poll.sem);
+end_nosem:
if (ec->common.global_lock)
acpi_release_global_lock(glk);
@@ -370,7 +372,6 @@ static int acpi_ec_poll_write(union acpi_ec *ec, u8 address, u8 data)
{
int result = 0;
acpi_status status = AE_OK;
- unsigned long flags = 0;
u32 glk = 0;
ACPI_FUNCTION_TRACE("acpi_ec_write");
@@ -384,8 +385,11 @@ static int acpi_ec_poll_write(union acpi_ec *ec, u8 address, u8 data)
return_VALUE(-ENODEV);
}
- spin_lock_irqsave(&ec->poll.lock, flags);
-
+ if (down_interruptible(&ec->poll.sem)) {
+ result = -ERESTARTSYS;
+ goto end_nosem;
+ }
+
acpi_hw_low_level_write(8, ACPI_EC_COMMAND_WRITE,
&ec->common.command_addr);
result = acpi_ec_wait(ec, ACPI_EC_EVENT_IBE);
@@ -406,8 +410,8 @@ static int acpi_ec_poll_write(union acpi_ec *ec, u8 address, u8 data)
data, address));
end:
- spin_unlock_irqrestore(&ec->poll.lock, flags);
-
+ up(&ec->poll.sem);
+end_nosem:
if (ec->common.global_lock)
acpi_release_global_lock(glk);
@@ -568,7 +572,6 @@ static int acpi_ec_poll_query(union acpi_ec *ec, u32 * data)
{
int result = 0;
acpi_status status = AE_OK;
- unsigned long flags = 0;
u32 glk = 0;
ACPI_FUNCTION_TRACE("acpi_ec_query");
@@ -589,8 +592,11 @@ static int acpi_ec_poll_query(union acpi_ec *ec, u32 * data)
* Note that successful completion of the query causes the ACPI_EC_SCI
* bit to be cleared (and thus clearing the interrupt source).
*/
- spin_lock_irqsave(&ec->poll.lock, flags);
-
+ if (down_interruptible(&ec->poll.sem)) {
+ result = -ERESTARTSYS;
+ goto end_nosem;
+ }
+
acpi_hw_low_level_write(8, ACPI_EC_COMMAND_QUERY,
&ec->common.command_addr);
result = acpi_ec_wait(ec, ACPI_EC_EVENT_OBF);
@@ -602,8 +608,8 @@ static int acpi_ec_poll_query(union acpi_ec *ec, u32 * data)
result = -ENODATA;
end:
- spin_unlock_irqrestore(&ec->poll.lock, flags);
-
+ up(&ec->poll.sem);
+end_nosem:
if (ec->common.global_lock)
acpi_release_global_lock(glk);
@@ -680,7 +686,6 @@ static void acpi_ec_gpe_poll_query(void *ec_cxt)
{
union acpi_ec *ec = (union acpi_ec *)ec_cxt;
u32 value = 0;
- unsigned long flags = 0;
static char object_name[5] = { '_', 'Q', '0', '0', '\0' };
const char hex[] = { '0', '1', '2', '3', '4', '5', '6', '7',
'8', '9', 'A', 'B', 'C', 'D', 'E', 'F'
@@ -691,9 +696,11 @@ static void acpi_ec_gpe_poll_query(void *ec_cxt)
if (!ec_cxt)
goto end;
- spin_lock_irqsave(&ec->poll.lock, flags);
+ if (down_interruptible (&ec->poll.sem)) {
+ return_VOID;
+ }
acpi_hw_low_level_read(8, &value, &ec->common.command_addr);
- spin_unlock_irqrestore(&ec->poll.lock, flags);
+ up(&ec->poll.sem);
/* TBD: Implement asynch events!
* NOTE: All we care about are EC-SCI's. Other EC events are
@@ -763,8 +770,7 @@ static u32 acpi_ec_gpe_poll_handler(void *data)
acpi_disable_gpe(NULL, ec->common.gpe_bit, ACPI_ISR);
- status = acpi_os_queue_for_execution(OSD_PRIORITY_GPE,
- acpi_ec_gpe_query, ec);
+ status = acpi_os_execute(OSL_EC_POLL_HANDLER, acpi_ec_gpe_query, ec);
if (status == AE_OK)
return ACPI_INTERRUPT_HANDLED;
@@ -799,7 +805,7 @@ static u32 acpi_ec_gpe_intr_handler(void *data)
if (value & ACPI_EC_FLAG_SCI) {
atomic_add(1, &ec->intr.pending_gpe);
- status = acpi_os_queue_for_execution(OSD_PRIORITY_GPE,
+ status = acpi_os_execute(OSL_EC_BURST_HANDLER,
acpi_ec_gpe_query, ec);
return status == AE_OK ?
ACPI_INTERRUPT_HANDLED : ACPI_INTERRUPT_NOT_HANDLED;
@@ -991,7 +997,6 @@ static int acpi_ec_poll_add(struct acpi_device *device)
int result = 0;
acpi_status status = AE_OK;
union acpi_ec *ec = NULL;
- unsigned long uid;
ACPI_FUNCTION_TRACE("acpi_ec_add");
@@ -1005,7 +1010,7 @@ static int acpi_ec_poll_add(struct acpi_device *device)
ec->common.handle = device->handle;
ec->common.uid = -1;
- spin_lock_init(&ec->poll.lock);
+ init_MUTEX(&ec->poll.sem);
strcpy(acpi_device_name(device), ACPI_EC_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_EC_CLASS);
acpi_driver_data(device) = ec;
@@ -1014,10 +1019,9 @@ static int acpi_ec_poll_add(struct acpi_device *device)
acpi_evaluate_integer(ec->common.handle, "_GLK", NULL,
&ec->common.global_lock);
- /* If our UID matches the UID for the ECDT-enumerated EC,
- we now have the *real* EC info, so kill the makeshift one. */
- acpi_evaluate_integer(ec->common.handle, "_UID", NULL, &uid);
- if (ec_ecdt && ec_ecdt->common.uid == uid) {
+ /* XXX we don't test uids, because on some boxes ecdt uid = 0, see:
+ http://bugzilla.kernel.org/show_bug.cgi?id=6111 */
+ if (ec_ecdt) {
acpi_remove_address_space_handler(ACPI_ROOT_OBJECT,
ACPI_ADR_SPACE_EC,
&acpi_ec_space_handler);
@@ -1062,7 +1066,6 @@ static int acpi_ec_intr_add(struct acpi_device *device)
int result = 0;
acpi_status status = AE_OK;
union acpi_ec *ec = NULL;
- unsigned long uid;
ACPI_FUNCTION_TRACE("acpi_ec_add");
@@ -1088,10 +1091,9 @@ static int acpi_ec_intr_add(struct acpi_device *device)
acpi_evaluate_integer(ec->common.handle, "_GLK", NULL,
&ec->common.global_lock);
- /* If our UID matches the UID for the ECDT-enumerated EC,
- we now have the *real* EC info, so kill the makeshift one. */
- acpi_evaluate_integer(ec->common.handle, "_UID", NULL, &uid);
- if (ec_ecdt && ec_ecdt->common.uid == uid) {
+ /* XXX we don't test uids, because on some boxes ecdt uid = 0, see:
+ http://bugzilla.kernel.org/show_bug.cgi?id=6111 */
+ if (ec_ecdt) {
acpi_remove_address_space_handler(ACPI_ROOT_OBJECT,
ACPI_ADR_SPACE_EC,
&acpi_ec_space_handler);
@@ -1300,7 +1302,7 @@ acpi_fake_ecdt_poll_callback(acpi_handle handle,
&ec_ecdt->common.gpe_bit);
if (ACPI_FAILURE(status))
return status;
- spin_lock_init(&ec_ecdt->poll.lock);
+ init_MUTEX(&ec_ecdt->poll.sem);
ec_ecdt->common.global_lock = TRUE;
ec_ecdt->common.handle = handle;
@@ -1416,7 +1418,7 @@ static int __init acpi_ec_poll_get_real_ecdt(void)
ec_ecdt->common.status_addr = ecdt_ptr->ec_control;
ec_ecdt->common.data_addr = ecdt_ptr->ec_data;
ec_ecdt->common.gpe_bit = ecdt_ptr->gpe_bit;
- spin_lock_init(&ec_ecdt->poll.lock);
+ init_MUTEX(&ec_ecdt->poll.sem);
/* use the GL just to be safe */
ec_ecdt->common.global_lock = TRUE;
ec_ecdt->common.uid = ecdt_ptr->uid;
diff --git a/drivers/acpi/events/evevent.c b/drivers/acpi/events/evevent.c
index c9ac05c4685f..919037d6acff 100644
--- a/drivers/acpi/events/evevent.c
+++ b/drivers/acpi/events/evevent.c
@@ -68,7 +68,7 @@ acpi_status acpi_ev_initialize_events(void)
{
acpi_status status;
- ACPI_FUNCTION_TRACE("ev_initialize_events");
+ ACPI_FUNCTION_TRACE(ev_initialize_events);
/* Make sure we have ACPI tables */
@@ -118,7 +118,7 @@ acpi_status acpi_ev_install_fadt_gpes(void)
{
acpi_status status;
- ACPI_FUNCTION_TRACE("ev_install_fadt_gpes");
+ ACPI_FUNCTION_TRACE(ev_install_fadt_gpes);
/* Namespace must be locked */
@@ -157,7 +157,7 @@ acpi_status acpi_ev_install_xrupt_handlers(void)
{
acpi_status status;
- ACPI_FUNCTION_TRACE("ev_install_xrupt_handlers");
+ ACPI_FUNCTION_TRACE(ev_install_xrupt_handlers);
/* Install the SCI handler */
@@ -241,7 +241,7 @@ u32 acpi_ev_fixed_event_detect(void)
u32 fixed_enable;
acpi_native_uint i;
- ACPI_FUNCTION_NAME("ev_fixed_event_detect");
+ ACPI_FUNCTION_NAME(ev_fixed_event_detect);
/*
* Read the fixed feature status and enable registers, as all the cases
@@ -260,12 +260,14 @@ u32 acpi_ev_fixed_event_detect(void)
* Check for all possible Fixed Events and dispatch those that are active
*/
for (i = 0; i < ACPI_NUM_FIXED_EVENTS; i++) {
+
/* Both the status and enable bits must be on for this event */
if ((fixed_status & acpi_gbl_fixed_event_info[i].
status_bit_mask)
&& (fixed_enable & acpi_gbl_fixed_event_info[i].
enable_bit_mask)) {
+
/* Found an active (signalled) event */
int_status |= acpi_ev_fixed_event_dispatch((u32) i);
diff --git a/drivers/acpi/events/evgpe.c b/drivers/acpi/events/evgpe.c
index f64f977dd3d5..f01d339407f8 100644
--- a/drivers/acpi/events/evgpe.c
+++ b/drivers/acpi/events/evgpe.c
@@ -69,7 +69,7 @@ acpi_ev_set_gpe_type(struct acpi_gpe_event_info *gpe_event_info, u8 type)
{
acpi_status status;
- ACPI_FUNCTION_TRACE("ev_set_gpe_type");
+ ACPI_FUNCTION_TRACE(ev_set_gpe_type);
/* Validate type and update register enable masks */
@@ -115,7 +115,7 @@ acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info,
struct acpi_gpe_register_info *gpe_register_info;
u8 register_bit;
- ACPI_FUNCTION_TRACE("ev_update_gpe_enable_masks");
+ ACPI_FUNCTION_TRACE(ev_update_gpe_enable_masks);
gpe_register_info = gpe_event_info->register_info;
if (!gpe_register_info) {
@@ -178,7 +178,7 @@ acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info,
{
acpi_status status;
- ACPI_FUNCTION_TRACE("ev_enable_gpe");
+ ACPI_FUNCTION_TRACE(ev_enable_gpe);
/* Make sure HW enable masks are updated */
@@ -207,6 +207,7 @@ acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info,
ACPI_SET_BIT(gpe_event_info->flags, ACPI_GPE_RUN_ENABLED);
if (write_to_hardware) {
+
/* Clear the GPE (of stale events), then enable it */
status = acpi_hw_clear_gpe(gpe_event_info);
@@ -243,7 +244,7 @@ acpi_status acpi_ev_disable_gpe(struct acpi_gpe_event_info *gpe_event_info)
{
acpi_status status;
- ACPI_FUNCTION_TRACE("ev_disable_gpe");
+ ACPI_FUNCTION_TRACE(ev_disable_gpe);
if (!(gpe_event_info->flags & ACPI_GPE_ENABLE_MASK)) {
return_ACPI_STATUS(AE_OK);
@@ -313,6 +314,7 @@ struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device,
/* A NULL gpe_block means use the FADT-defined GPE block(s) */
if (!gpe_device) {
+
/* Examine GPE Block 0 and 1 (These blocks are permanent) */
for (i = 0; i < ACPI_MAX_GPE_BLOCKS; i++) {
@@ -380,10 +382,11 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
u32 status_reg;
u32 enable_reg;
acpi_cpu_flags flags;
+ acpi_cpu_flags hw_flags;
acpi_native_uint i;
acpi_native_uint j;
- ACPI_FUNCTION_NAME("ev_gpe_detect");
+ ACPI_FUNCTION_NAME(ev_gpe_detect);
/* Check for the case where there are no GPEs */
@@ -391,9 +394,12 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
return (int_status);
}
- /* Examine all GPE blocks attached to this interrupt level */
+ /* We need to hold the GPE lock now, hardware lock in the loop */
flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+
+ /* Examine all GPE blocks attached to this interrupt level */
+
gpe_block = gpe_xrupt_list->gpe_block_list_head;
while (gpe_block) {
/*
@@ -402,10 +408,13 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
* Find all currently active GP events.
*/
for (i = 0; i < gpe_block->register_count; i++) {
+
/* Get the next status/enable pair */
gpe_register_info = &gpe_block->register_info[i];
+ hw_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
+
/* Read the Status Register */
status =
@@ -414,6 +423,8 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
&gpe_register_info->
status_address);
if (ACPI_FAILURE(status)) {
+ acpi_os_release_lock(acpi_gbl_hardware_lock,
+ hw_flags);
goto unlock_and_exit;
}
@@ -424,6 +435,8 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
&enable_reg,
&gpe_register_info->
enable_address);
+ acpi_os_release_lock(acpi_gbl_hardware_lock, hw_flags);
+
if (ACPI_FAILURE(status)) {
goto unlock_and_exit;
}
@@ -437,6 +450,7 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
enabled_status_byte = (u8) (status_reg & enable_reg);
if (!enabled_status_byte) {
+
/* No active GPEs in this register, move on */
continue;
@@ -445,6 +459,7 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
/* Now look at the individual GPEs in this byte register */
for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
+
/* Examine one GPE bit */
if (enabled_status_byte &
@@ -483,9 +498,9 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
*
* RETURN: None
*
- * DESCRIPTION: Perform the actual execution of a GPE control method. This
- * function is called from an invocation of acpi_os_queue_for_execution
- * (and therefore does NOT execute at interrupt level) so that
+ * DESCRIPTION: Perform the actual execution of a GPE control method. This
+ * function is called from an invocation of acpi_os_execute and
+ * therefore does NOT execute at interrupt level - so that
* the control method itself is not executed in the context of
* an interrupt handler.
*
@@ -494,12 +509,11 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
{
struct acpi_gpe_event_info *gpe_event_info = (void *)context;
- u32 gpe_number = 0;
acpi_status status;
struct acpi_gpe_event_info local_gpe_event_info;
- struct acpi_parameter_info info;
+ struct acpi_evaluate_info *info;
- ACPI_FUNCTION_TRACE("ev_asynch_execute_gpe_method");
+ ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method);
status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
if (ACPI_FAILURE(status)) {
@@ -535,22 +549,35 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
*/
if ((local_gpe_event_info.flags & ACPI_GPE_DISPATCH_MASK) ==
ACPI_GPE_DISPATCH_METHOD) {
- /*
- * Invoke the GPE Method (_Lxx, _Exx) i.e., evaluate the _Lxx/_Exx
- * control method that corresponds to this GPE
- */
- info.node = local_gpe_event_info.dispatch.method_node;
- info.parameters =
- ACPI_CAST_PTR(union acpi_operand_object *, gpe_event_info);
- info.parameter_type = ACPI_PARAM_GPE;
- status = acpi_ns_evaluate_by_handle(&info);
+ /* Allocate the evaluation information block */
+
+ info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
+ if (!info) {
+ status = AE_NO_MEMORY;
+ } else {
+ /*
+ * Invoke the GPE Method (_Lxx, _Exx) i.e., evaluate the _Lxx/_Exx
+ * control method that corresponds to this GPE
+ */
+ info->prefix_node =
+ local_gpe_event_info.dispatch.method_node;
+ info->parameters =
+ ACPI_CAST_PTR(union acpi_operand_object *,
+ gpe_event_info);
+ info->parameter_type = ACPI_PARAM_GPE;
+ info->flags = ACPI_IGNORE_RETURN_VALUE;
+
+ status = acpi_ns_evaluate(info);
+ ACPI_FREE(info);
+ }
+
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
- "While evaluating method [%4.4s] for GPE[%2X]",
+ "While evaluating GPE method [%4.4s]",
acpi_ut_get_node_name
(local_gpe_event_info.dispatch.
- method_node), gpe_number));
+ method_node)));
}
}
@@ -593,7 +620,7 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
{
acpi_status status;
- ACPI_FUNCTION_TRACE("ev_gpe_dispatch");
+ ACPI_FUNCTION_TRACE(ev_gpe_dispatch);
/*
* If edge-triggered, clear the GPE status bit now. Note that
@@ -669,9 +696,9 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
* Execute the method associated with the GPE
* NOTE: Level-triggered GPEs are cleared after the method completes.
*/
- status = acpi_os_queue_for_execution(OSD_PRIORITY_GPE,
- acpi_ev_asynch_execute_gpe_method,
- gpe_event_info);
+ status = acpi_os_execute(OSL_GPE_HANDLER,
+ acpi_ev_asynch_execute_gpe_method,
+ gpe_event_info);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"Unable to queue handler for GPE[%2X] - event disabled",
@@ -716,7 +743,7 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
*
* DESCRIPTION: Determine if a a GPE is "wake-only".
*
- * Called from Notify() code in interpreter when a "device_wake"
+ * Called from Notify() code in interpreter when a "DeviceWake"
* Notify comes in.
*
******************************************************************************/
@@ -726,7 +753,7 @@ acpi_ev_check_for_wake_only_gpe(struct acpi_gpe_event_info *gpe_event_info)
{
acpi_status status;
- ACPI_FUNCTION_TRACE("ev_check_for_wake_only_gpe");
+ ACPI_FUNCTION_TRACE(ev_check_for_wake_only_gpe);
if ((gpe_event_info) && /* Only >0 for _Lxx/_Exx */
((gpe_event_info->flags & ACPI_GPE_SYSTEM_MASK) == ACPI_GPE_SYSTEM_RUNNING)) { /* System state at GPE time */
diff --git a/drivers/acpi/events/evgpeblk.c b/drivers/acpi/events/evgpeblk.c
index 0fd00b5ad650..95ddeb48bc0f 100644
--- a/drivers/acpi/events/evgpeblk.c
+++ b/drivers/acpi/events/evgpeblk.c
@@ -131,14 +131,14 @@ u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info)
*
******************************************************************************/
-acpi_status acpi_ev_walk_gpe_list(ACPI_GPE_CALLBACK gpe_walk_callback)
+acpi_status acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback)
{
struct acpi_gpe_block_info *gpe_block;
struct acpi_gpe_xrupt_info *gpe_xrupt_info;
acpi_status status = AE_OK;
acpi_cpu_flags flags;
- ACPI_FUNCTION_TRACE("ev_walk_gpe_list");
+ ACPI_FUNCTION_TRACE(ev_walk_gpe_list);
flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
@@ -146,10 +146,12 @@ acpi_status acpi_ev_walk_gpe_list(ACPI_GPE_CALLBACK gpe_walk_callback)
gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head;
while (gpe_xrupt_info) {
+
/* Walk all Gpe Blocks attached to this interrupt level */
gpe_block = gpe_xrupt_info->gpe_block_list_head;
while (gpe_block) {
+
/* One callback per GPE block */
status = gpe_walk_callback(gpe_xrupt_info, gpe_block);
@@ -190,11 +192,12 @@ acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
acpi_native_uint i;
acpi_native_uint j;
- ACPI_FUNCTION_TRACE("ev_delete_gpe_handlers");
+ ACPI_FUNCTION_TRACE(ev_delete_gpe_handlers);
/* Examine each GPE Register within the block */
for (i = 0; i < gpe_block->register_count; i++) {
+
/* Now look at the individual GPEs in this byte register */
for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
@@ -204,7 +207,7 @@ acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
ACPI_GPE_DISPATCH_HANDLER) {
- ACPI_MEM_FREE(gpe_event_info->dispatch.handler);
+ ACPI_FREE(gpe_event_info->dispatch.handler);
gpe_event_info->dispatch.handler = NULL;
gpe_event_info->flags &=
~ACPI_GPE_DISPATCH_MASK;
@@ -248,7 +251,7 @@ acpi_ev_save_method_info(acpi_handle obj_handle,
u8 type;
acpi_status status;
- ACPI_FUNCTION_TRACE("ev_save_method_info");
+ ACPI_FUNCTION_TRACE(ev_save_method_info);
/*
* _Lxx and _Exx GPE method support
@@ -279,9 +282,9 @@ acpi_ev_save_method_info(acpi_handle obj_handle,
default:
/* Unknown method type, just ignore it! */
- ACPI_ERROR((AE_INFO,
- "Unknown GPE method type: %s (name not of form _Lxx or _Exx)",
- name));
+ ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
+ "Ignoring unknown GPE method type: %s (name not of form _Lxx or _Exx)",
+ name));
return_ACPI_STATUS(AE_OK);
}
@@ -289,11 +292,12 @@ acpi_ev_save_method_info(acpi_handle obj_handle,
gpe_number = ACPI_STRTOUL(&name[2], NULL, 16);
if (gpe_number == ACPI_UINT32_MAX) {
+
/* Conversion failed; invalid method, just ignore it */
- ACPI_ERROR((AE_INFO,
- "Could not extract GPE number from name: %s (name is not of form _Lxx or _Exx)",
- name));
+ ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
+ "Could not extract GPE number from name: %s (name is not of form _Lxx or _Exx)",
+ name));
return_ACPI_STATUS(AE_OK);
}
@@ -364,13 +368,14 @@ acpi_ev_match_prw_and_gpe(acpi_handle obj_handle,
u32 gpe_number;
acpi_status status;
- ACPI_FUNCTION_TRACE("ev_match_prw_and_gpe");
+ ACPI_FUNCTION_TRACE(ev_match_prw_and_gpe);
/* Check for a _PRW method under this device */
status = acpi_ut_evaluate_object(obj_handle, METHOD_NAME__PRW,
ACPI_BTYPE_PACKAGE, &pkg_desc);
if (ACPI_FAILURE(status)) {
+
/* Ignore all errors from _PRW, we don't want to abort the subsystem */
return_ACPI_STATUS(AE_OK);
@@ -394,6 +399,7 @@ acpi_ev_match_prw_and_gpe(acpi_handle obj_handle,
obj_desc = pkg_desc->package.elements[0];
if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_INTEGER) {
+
/* Use FADT-defined GPE device (from definition of _PRW) */
target_gpe_device = acpi_gbl_fadt_gpe_device;
@@ -402,6 +408,7 @@ acpi_ev_match_prw_and_gpe(acpi_handle obj_handle,
gpe_number = (u32) obj_desc->integer.value;
} else if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_PACKAGE) {
+
/* Package contains a GPE reference and GPE number within a GPE block */
if ((obj_desc->package.count < 2) ||
@@ -482,7 +489,7 @@ static struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32
acpi_status status;
acpi_cpu_flags flags;
- ACPI_FUNCTION_TRACE("ev_get_gpe_xrupt_block");
+ ACPI_FUNCTION_TRACE(ev_get_gpe_xrupt_block);
/* No need for lock since we are not changing any list elements here */
@@ -497,7 +504,7 @@ static struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32
/* Not found, must allocate a new xrupt descriptor */
- gpe_xrupt = ACPI_MEM_CALLOCATE(sizeof(struct acpi_gpe_xrupt_info));
+ gpe_xrupt = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_xrupt_info));
if (!gpe_xrupt) {
return_PTR(NULL);
}
@@ -556,7 +563,7 @@ acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt)
acpi_status status;
acpi_cpu_flags flags;
- ACPI_FUNCTION_TRACE("ev_delete_gpe_xrupt");
+ ACPI_FUNCTION_TRACE(ev_delete_gpe_xrupt);
/* We never want to remove the SCI interrupt handler */
@@ -588,7 +595,7 @@ acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt)
/* Free the block */
- ACPI_MEM_FREE(gpe_xrupt);
+ ACPI_FREE(gpe_xrupt);
return_ACPI_STATUS(AE_OK);
}
@@ -614,7 +621,7 @@ acpi_ev_install_gpe_block(struct acpi_gpe_block_info *gpe_block,
acpi_status status;
acpi_cpu_flags flags;
- ACPI_FUNCTION_TRACE("ev_install_gpe_block");
+ ACPI_FUNCTION_TRACE(ev_install_gpe_block);
status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
if (ACPI_FAILURE(status)) {
@@ -667,7 +674,7 @@ acpi_status acpi_ev_delete_gpe_block(struct acpi_gpe_block_info *gpe_block)
acpi_status status;
acpi_cpu_flags flags;
- ACPI_FUNCTION_TRACE("ev_install_gpe_block");
+ ACPI_FUNCTION_TRACE(ev_install_gpe_block);
status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
if (ACPI_FAILURE(status)) {
@@ -679,6 +686,7 @@ acpi_status acpi_ev_delete_gpe_block(struct acpi_gpe_block_info *gpe_block)
status = acpi_hw_disable_gpe_block(gpe_block->xrupt_block, gpe_block);
if (!gpe_block->previous && !gpe_block->next) {
+
/* This is the last gpe_block on this interrupt */
status = acpi_ev_delete_gpe_xrupt(gpe_block->xrupt_block);
@@ -704,9 +712,9 @@ acpi_status acpi_ev_delete_gpe_block(struct acpi_gpe_block_info *gpe_block)
/* Free the gpe_block */
- ACPI_MEM_FREE(gpe_block->register_info);
- ACPI_MEM_FREE(gpe_block->event_info);
- ACPI_MEM_FREE(gpe_block);
+ ACPI_FREE(gpe_block->register_info);
+ ACPI_FREE(gpe_block->event_info);
+ ACPI_FREE(gpe_block);
unlock_and_exit:
status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
@@ -736,17 +744,17 @@ acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block)
acpi_native_uint j;
acpi_status status;
- ACPI_FUNCTION_TRACE("ev_create_gpe_info_blocks");
+ ACPI_FUNCTION_TRACE(ev_create_gpe_info_blocks);
/* Allocate the GPE register information block */
- gpe_register_info = ACPI_MEM_CALLOCATE((acpi_size) gpe_block->
- register_count *
- sizeof(struct
- acpi_gpe_register_info));
+ gpe_register_info = ACPI_ALLOCATE_ZEROED((acpi_size) gpe_block->
+ register_count *
+ sizeof(struct
+ acpi_gpe_register_info));
if (!gpe_register_info) {
ACPI_ERROR((AE_INFO,
- "Could not allocate the gpe_register_info table"));
+ "Could not allocate the GpeRegisterInfo table"));
return_ACPI_STATUS(AE_NO_MEMORY);
}
@@ -754,13 +762,14 @@ acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block)
* Allocate the GPE event_info block. There are eight distinct GPEs
* per register. Initialization to zeros is sufficient.
*/
- gpe_event_info = ACPI_MEM_CALLOCATE(((acpi_size) gpe_block->
- register_count *
- ACPI_GPE_REGISTER_WIDTH) *
- sizeof(struct acpi_gpe_event_info));
+ gpe_event_info = ACPI_ALLOCATE_ZEROED(((acpi_size) gpe_block->
+ register_count *
+ ACPI_GPE_REGISTER_WIDTH) *
+ sizeof(struct
+ acpi_gpe_event_info));
if (!gpe_event_info) {
ACPI_ERROR((AE_INFO,
- "Could not allocate the gpe_event_info table"));
+ "Could not allocate the GpeEventInfo table"));
status = AE_NO_MEMORY;
goto error_exit;
}
@@ -780,6 +789,7 @@ acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block)
this_event = gpe_event_info;
for (i = 0; i < gpe_block->register_count; i++) {
+
/* Init the register_info for this GPE register (8 GPEs) */
this_register->base_gpe_number =
@@ -839,10 +849,10 @@ acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block)
error_exit:
if (gpe_register_info) {
- ACPI_MEM_FREE(gpe_register_info);
+ ACPI_FREE(gpe_register_info);
}
if (gpe_event_info) {
- ACPI_MEM_FREE(gpe_event_info);
+ ACPI_FREE(gpe_event_info);
}
return_ACPI_STATUS(status);
@@ -878,7 +888,7 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
acpi_status status;
struct acpi_gpe_block_info *gpe_block;
- ACPI_FUNCTION_TRACE("ev_create_gpe_block");
+ ACPI_FUNCTION_TRACE(ev_create_gpe_block);
if (!register_count) {
return_ACPI_STATUS(AE_OK);
@@ -886,7 +896,7 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
/* Allocate a new GPE block */
- gpe_block = ACPI_MEM_CALLOCATE(sizeof(struct acpi_gpe_block_info));
+ gpe_block = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_block_info));
if (!gpe_block) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
@@ -906,7 +916,7 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
*/
status = acpi_ev_create_gpe_info_blocks(gpe_block);
if (ACPI_FAILURE(status)) {
- ACPI_MEM_FREE(gpe_block);
+ ACPI_FREE(gpe_block);
return_ACPI_STATUS(status);
}
@@ -914,7 +924,7 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
status = acpi_ev_install_gpe_block(gpe_block, interrupt_number);
if (ACPI_FAILURE(status)) {
- ACPI_MEM_FREE(gpe_block);
+ ACPI_FREE(gpe_block);
return_ACPI_STATUS(status);
}
@@ -971,7 +981,7 @@ acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device,
acpi_native_uint i;
acpi_native_uint j;
- ACPI_FUNCTION_TRACE("ev_initialize_gpe_block");
+ ACPI_FUNCTION_TRACE(ev_initialize_gpe_block);
/* Ignore a null GPE block (e.g., if no GPE block 1 exists) */
@@ -1013,6 +1023,7 @@ acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device,
for (i = 0; i < gpe_block->register_count; i++) {
for (j = 0; j < 8; j++) {
+
/* Get the info block for this particular GPE */
gpe_event_info =
@@ -1040,7 +1051,7 @@ acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device,
status = acpi_hw_enable_runtime_gpe_block(NULL, gpe_block);
if (ACPI_FAILURE(status)) {
- ACPI_ERROR((AE_INFO, "Could not enable GPEs in gpe_block %p",
+ ACPI_ERROR((AE_INFO, "Could not enable GPEs in GpeBlock %p",
gpe_block));
}
@@ -1066,7 +1077,7 @@ acpi_status acpi_ev_gpe_initialize(void)
u32 gpe_number_max = 0;
acpi_status status;
- ACPI_FUNCTION_TRACE("ev_gpe_initialize");
+ ACPI_FUNCTION_TRACE(ev_gpe_initialize);
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
@@ -1099,6 +1110,7 @@ acpi_status acpi_ev_gpe_initialize(void)
* particular block is not supported.
*/
if (acpi_gbl_FADT->gpe0_blk_len && acpi_gbl_FADT->xgpe0_blk.address) {
+
/* GPE block 0 exists (has both length and address > 0) */
register_count0 = (u16) (acpi_gbl_FADT->gpe0_blk_len / 2);
@@ -1121,6 +1133,7 @@ acpi_status acpi_ev_gpe_initialize(void)
}
if (acpi_gbl_FADT->gpe1_blk_len && acpi_gbl_FADT->xgpe1_blk.address) {
+
/* GPE block 1 exists (has both length and address > 0) */
register_count1 = (u16) (acpi_gbl_FADT->gpe1_blk_len / 2);
@@ -1168,6 +1181,7 @@ acpi_status acpi_ev_gpe_initialize(void)
/* Exit if there are no GPE registers */
if ((register_count0 + register_count1) == 0) {
+
/* GPEs are not required by ACPI, this is OK */
ACPI_DEBUG_PRINT((ACPI_DB_INIT,
diff --git a/drivers/acpi/events/evmisc.c b/drivers/acpi/events/evmisc.c
index 0909ba69577e..6eef4efddcf6 100644
--- a/drivers/acpi/events/evmisc.c
+++ b/drivers/acpi/events/evmisc.c
@@ -49,12 +49,13 @@
#define _COMPONENT ACPI_EVENTS
ACPI_MODULE_NAME("evmisc")
+/* Names for Notify() values, used for debug output */
#ifdef ACPI_DEBUG_OUTPUT
static const char *acpi_notify_value_names[] = {
"Bus Check",
"Device Check",
"Device Wake",
- "Eject request",
+ "Eject Request",
"Device Check Light",
"Frequency Mismatch",
"Bus Mode Mismatch",
@@ -124,7 +125,7 @@ acpi_ev_queue_notify_request(struct acpi_namespace_node * node,
union acpi_generic_state *notify_info;
acpi_status status = AE_OK;
- ACPI_FUNCTION_NAME("ev_queue_notify_request");
+ ACPI_FUNCTION_NAME(ev_queue_notify_request);
/*
* For value 3 (Ejection Request), some device method may need to be run.
@@ -150,6 +151,7 @@ acpi_ev_queue_notify_request(struct acpi_namespace_node * node,
obj_desc = acpi_ns_get_attached_object(node);
if (obj_desc) {
+
/* We have the notify object, Get the right handler */
switch (node->type) {
@@ -184,14 +186,15 @@ acpi_ev_queue_notify_request(struct acpi_namespace_node * node,
return (AE_NO_MEMORY);
}
- notify_info->common.data_type = ACPI_DESC_TYPE_STATE_NOTIFY;
+ notify_info->common.descriptor_type =
+ ACPI_DESC_TYPE_STATE_NOTIFY;
notify_info->notify.node = node;
notify_info->notify.value = (u16) notify_value;
notify_info->notify.handler_obj = handler_obj;
- status = acpi_os_queue_for_execution(OSD_PRIORITY_HIGH,
- acpi_ev_notify_dispatch,
- notify_info);
+ status =
+ acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_ev_notify_dispatch,
+ notify_info);
if (ACPI_FAILURE(status)) {
acpi_ut_delete_generic_state(notify_info);
}
@@ -240,6 +243,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context)
* to the device.
*/
if (notify_info->notify.value <= ACPI_MAX_SYS_NOTIFY) {
+
/* Global system notification handler */
if (acpi_gbl_system_notify.handler) {
@@ -297,6 +301,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_global_lock_thread(void *context)
/* Signal threads that are waiting for the lock */
if (acpi_gbl_global_lock_thread_count) {
+
/* Send sufficient units to the semaphore */
status =
@@ -335,15 +340,16 @@ static u32 acpi_ev_global_lock_handler(void *context)
*/
ACPI_ACQUIRE_GLOBAL_LOCK(acpi_gbl_common_fACS.global_lock, acquired);
if (acquired) {
+
/* Got the lock, now wake all threads waiting for it */
acpi_gbl_global_lock_acquired = TRUE;
/* Run the Global Lock thread which will signal all waiting threads */
- status = acpi_os_queue_for_execution(OSD_PRIORITY_HIGH,
- acpi_ev_global_lock_thread,
- context);
+ status =
+ acpi_os_execute(OSL_GLOBAL_LOCK_HANDLER,
+ acpi_ev_global_lock_thread, context);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"Could not queue Global Lock thread"));
@@ -371,7 +377,7 @@ acpi_status acpi_ev_init_global_lock_handler(void)
{
acpi_status status;
- ACPI_FUNCTION_TRACE("ev_init_global_lock_handler");
+ ACPI_FUNCTION_TRACE(ev_init_global_lock_handler);
acpi_gbl_global_lock_present = TRUE;
status = acpi_install_fixed_event_handler(ACPI_EVENT_GLOBAL,
@@ -413,7 +419,7 @@ acpi_status acpi_ev_acquire_global_lock(u16 timeout)
acpi_status status = AE_OK;
u8 acquired = FALSE;
- ACPI_FUNCTION_TRACE("ev_acquire_global_lock");
+ ACPI_FUNCTION_TRACE(ev_acquire_global_lock);
#ifndef ACPI_APPLICATION
/* Make sure that we actually have a global lock */
@@ -439,6 +445,7 @@ acpi_status acpi_ev_acquire_global_lock(u16 timeout)
ACPI_ACQUIRE_GLOBAL_LOCK(acpi_gbl_common_fACS.global_lock, acquired);
if (acquired) {
+
/* We got the lock */
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
@@ -458,8 +465,9 @@ acpi_status acpi_ev_acquire_global_lock(u16 timeout)
* Acquire the global lock semaphore first.
* Since this wait will block, we must release the interpreter
*/
- status = acpi_ex_system_wait_semaphore(acpi_gbl_global_lock_semaphore,
- timeout);
+ status =
+ acpi_ex_system_wait_semaphore(acpi_gbl_global_lock_semaphore,
+ timeout);
return_ACPI_STATUS(status);
}
@@ -480,7 +488,7 @@ acpi_status acpi_ev_release_global_lock(void)
u8 pending = FALSE;
acpi_status status = AE_OK;
- ACPI_FUNCTION_TRACE("ev_release_global_lock");
+ ACPI_FUNCTION_TRACE(ev_release_global_lock);
if (!acpi_gbl_global_lock_thread_count) {
ACPI_WARNING((AE_INFO,
@@ -492,6 +500,7 @@ acpi_status acpi_ev_release_global_lock(void)
acpi_gbl_global_lock_thread_count--;
if (acpi_gbl_global_lock_thread_count) {
+
/* There are still some threads holding the lock, cannot release */
return_ACPI_STATUS(AE_OK);
@@ -533,7 +542,7 @@ void acpi_ev_terminate(void)
acpi_native_uint i;
acpi_status status;
- ACPI_FUNCTION_TRACE("ev_terminate");
+ ACPI_FUNCTION_TRACE(ev_terminate);
if (acpi_gbl_events_initialized) {
/*
@@ -573,7 +582,7 @@ void acpi_ev_terminate(void)
if (acpi_gbl_original_mode == ACPI_SYS_MODE_LEGACY) {
status = acpi_disable();
if (ACPI_FAILURE(status)) {
- ACPI_WARNING((AE_INFO, "acpi_disable failed"));
+ ACPI_WARNING((AE_INFO, "AcpiDisable failed"));
}
}
return_VOID;
diff --git a/drivers/acpi/events/evregion.c b/drivers/acpi/events/evregion.c
index 6da58e776413..094a17e4c86d 100644
--- a/drivers/acpi/events/evregion.c
+++ b/drivers/acpi/events/evregion.c
@@ -83,7 +83,7 @@ acpi_status acpi_ev_install_region_handlers(void)
acpi_status status;
acpi_native_uint i;
- ACPI_FUNCTION_TRACE("ev_install_region_handlers");
+ ACPI_FUNCTION_TRACE(ev_install_region_handlers);
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
@@ -153,7 +153,7 @@ acpi_status acpi_ev_initialize_op_regions(void)
acpi_status status;
acpi_native_uint i;
- ACPI_FUNCTION_TRACE("ev_initialize_op_regions");
+ ACPI_FUNCTION_TRACE(ev_initialize_op_regions);
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
@@ -164,6 +164,7 @@ acpi_status acpi_ev_initialize_op_regions(void)
* Run the _REG methods for op_regions in each default address space
*/
for (i = 0; i < ACPI_NUM_DEFAULT_SPACES; i++) {
+
/* TBD: Make sure handler is the DEFAULT handler, otherwise
* _REG will have already been run.
*/
@@ -192,12 +193,12 @@ acpi_status acpi_ev_initialize_op_regions(void)
acpi_status
acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function)
{
- struct acpi_parameter_info info;
- union acpi_operand_object *params[3];
+ struct acpi_evaluate_info *info;
+ union acpi_operand_object *args[3];
union acpi_operand_object *region_obj2;
acpi_status status;
- ACPI_FUNCTION_TRACE("ev_execute_reg_method");
+ ACPI_FUNCTION_TRACE(ev_execute_reg_method);
region_obj2 = acpi_ns_get_secondary_object(region_obj);
if (!region_obj2) {
@@ -208,48 +209,60 @@ acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function)
return_ACPI_STATUS(AE_OK);
}
+ /* Allocate and initialize the evaluation information block */
+
+ info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
+ if (!info) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ info->prefix_node = region_obj2->extra.method_REG;
+ info->pathname = NULL;
+ info->parameters = args;
+ info->parameter_type = ACPI_PARAM_ARGS;
+ info->flags = ACPI_IGNORE_RETURN_VALUE;
+
/*
* The _REG method has two arguments:
*
- * Arg0, Integer: Operation region space ID
- * Same value as region_obj->Region.space_id
- * Arg1, Integer: connection status
- * 1 for connecting the handler,
- * 0 for disconnecting the handler
- * Passed as a parameter
+ * Arg0 - Integer:
+ * Operation region space ID Same value as region_obj->Region.space_id
+ *
+ * Arg1 - Integer:
+ * connection status 1 for connecting the handler, 0 for disconnecting
+ * the handler (Passed as a parameter)
*/
- params[0] = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
- if (!params[0]) {
- return_ACPI_STATUS(AE_NO_MEMORY);
+ args[0] = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
+ if (!args[0]) {
+ status = AE_NO_MEMORY;
+ goto cleanup1;
}
- params[1] = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
- if (!params[1]) {
+ args[1] = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
+ if (!args[1]) {
status = AE_NO_MEMORY;
- goto cleanup;
+ goto cleanup2;
}
/* Setup the parameter objects */
- params[0]->integer.value = region_obj->region.space_id;
- params[1]->integer.value = function;
- params[2] = NULL;
-
- info.node = region_obj2->extra.method_REG;
- info.parameters = params;
- info.parameter_type = ACPI_PARAM_ARGS;
+ args[0]->integer.value = region_obj->region.space_id;
+ args[1]->integer.value = function;
+ args[2] = NULL;
/* Execute the method, no return value */
ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname
- (ACPI_TYPE_METHOD, info.node, NULL));
- status = acpi_ns_evaluate_by_handle(&info);
+ (ACPI_TYPE_METHOD, info->prefix_node, NULL));
- acpi_ut_remove_reference(params[1]);
+ status = acpi_ns_evaluate(info);
+ acpi_ut_remove_reference(args[1]);
- cleanup:
- acpi_ut_remove_reference(params[0]);
+ cleanup2:
+ acpi_ut_remove_reference(args[0]);
+ cleanup1:
+ ACPI_FREE(info);
return_ACPI_STATUS(status);
}
@@ -261,7 +274,8 @@ acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function)
* Function - Read or Write operation
* Address - Where in the space to read or write
* bit_width - Field width in bits (8, 16, 32, or 64)
- * Value - Pointer to in or out value
+ * Value - Pointer to in or out value, must be
+ * full 64-bit acpi_integer
*
* RETURN: Status
*
@@ -274,7 +288,7 @@ acpi_status
acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
u32 function,
acpi_physical_address address,
- u32 bit_width, void *value)
+ u32 bit_width, acpi_integer * value)
{
acpi_status status;
acpi_status status2;
@@ -284,7 +298,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
union acpi_operand_object *region_obj2;
void *region_context = NULL;
- ACPI_FUNCTION_TRACE("ev_address_space_dispatch");
+ ACPI_FUNCTION_TRACE(ev_address_space_dispatch);
region_obj2 = acpi_ns_get_secondary_object(region_obj);
if (!region_obj2) {
@@ -315,6 +329,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
*/
region_setup = handler_desc->address_space.setup;
if (!region_setup) {
+
/* No initialization routine, exit with error */
ACPI_ERROR((AE_INFO,
@@ -361,9 +376,10 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
region_obj->region.flags |= AOPOBJ_SETUP_COMPLETE;
if (region_obj2->extra.region_context) {
+
/* The handler for this region was already installed */
- ACPI_MEM_FREE(region_context);
+ ACPI_FREE(region_context);
} else {
/*
* Save the returned context for use in all accesses to
@@ -386,9 +402,8 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
acpi_ut_get_region_name(region_obj->region.
space_id)));
- if (!
- (handler_desc->address_space.
- hflags & ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) {
+ if (!(handler_desc->address_space.handler_flags &
+ ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) {
/*
* For handlers other than the default (supplied) handlers, we must
* exit the interpreter because the handler *might* block -- we don't
@@ -409,9 +424,8 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
space_id)));
}
- if (!
- (handler_desc->address_space.
- hflags & ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) {
+ if (!(handler_desc->address_space.handler_flags &
+ ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) {
/*
* We just returned from a non-default handler, we must re-enter the
* interpreter
@@ -451,7 +465,7 @@ acpi_ev_detach_region(union acpi_operand_object *region_obj,
union acpi_operand_object *region_obj2;
acpi_status status;
- ACPI_FUNCTION_TRACE("ev_detach_region");
+ ACPI_FUNCTION_TRACE(ev_detach_region);
region_obj2 = acpi_ns_get_secondary_object(region_obj);
if (!region_obj2) {
@@ -463,6 +477,7 @@ acpi_ev_detach_region(union acpi_operand_object *region_obj,
handler_obj = region_obj->region.handler;
if (!handler_obj) {
+
/* This region has no handler, all done */
return_VOID;
@@ -474,6 +489,7 @@ acpi_ev_detach_region(union acpi_operand_object *region_obj,
last_obj_ptr = &handler_obj->address_space.region_list;
while (obj_desc) {
+
/* Is this the correct Region? */
if (obj_desc == region_obj) {
@@ -583,7 +599,7 @@ acpi_ev_attach_region(union acpi_operand_object *handler_obj,
u8 acpi_ns_is_locked)
{
- ACPI_FUNCTION_TRACE("ev_attach_region");
+ ACPI_FUNCTION_TRACE(ev_attach_region);
ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
"Adding Region [%4.4s] %p to address handler %p [%s]\n",
@@ -636,7 +652,7 @@ acpi_ev_install_handler(acpi_handle obj_handle,
struct acpi_namespace_node *node;
acpi_status status;
- ACPI_FUNCTION_NAME("ev_install_handler");
+ ACPI_FUNCTION_NAME(ev_install_handler);
handler_obj = (union acpi_operand_object *)context;
@@ -666,6 +682,7 @@ acpi_ev_install_handler(acpi_handle obj_handle,
obj_desc = acpi_ns_get_attached_object(node);
if (!obj_desc) {
+
/* No object, just exit */
return (AE_OK);
@@ -674,10 +691,12 @@ acpi_ev_install_handler(acpi_handle obj_handle,
/* Devices are handled different than regions */
if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_DEVICE) {
+
/* Check if this Device already has a handler for this address space */
next_handler_obj = obj_desc->device.handler;
while (next_handler_obj) {
+
/* Found a handler, is it for the same address space? */
if (next_handler_obj->address_space.space_id ==
@@ -764,9 +783,9 @@ acpi_ev_install_space_handler(struct acpi_namespace_node * node,
union acpi_operand_object *handler_obj;
acpi_status status;
acpi_object_type type;
- u16 flags = 0;
+ u8 flags = 0;
- ACPI_FUNCTION_TRACE("ev_install_space_handler");
+ ACPI_FUNCTION_TRACE(ev_install_space_handler);
/*
* This registration is valid for only the types below
@@ -839,6 +858,7 @@ acpi_ev_install_space_handler(struct acpi_namespace_node * node,
/* Walk the handler list for this device */
while (handler_obj) {
+
/* Same space_id indicates a handler already installed */
if (handler_obj->address_space.space_id == space_id) {
@@ -921,7 +941,7 @@ acpi_ev_install_space_handler(struct acpi_namespace_node * node,
/* Init handler obj */
handler_obj->address_space.space_id = (u8) space_id;
- handler_obj->address_space.hflags = flags;
+ handler_obj->address_space.handler_flags = flags;
handler_obj->address_space.region_list = NULL;
handler_obj->address_space.node = node;
handler_obj->address_space.handler = handler;
@@ -979,7 +999,7 @@ acpi_ev_execute_reg_methods(struct acpi_namespace_node *node,
{
acpi_status status;
- ACPI_FUNCTION_TRACE("ev_execute_reg_methods");
+ ACPI_FUNCTION_TRACE(ev_execute_reg_methods);
/*
* Run all _REG methods for all Operation Regions for this
@@ -1001,7 +1021,7 @@ acpi_ev_execute_reg_methods(struct acpi_namespace_node *node,
*
* PARAMETERS: walk_namespace callback
*
- * DESCRIPTION: Run _REg method for region objects of the requested space_iD
+ * DESCRIPTION: Run _REG method for region objects of the requested space_iD
*
******************************************************************************/
@@ -1035,6 +1055,7 @@ acpi_ev_reg_run(acpi_handle obj_handle,
obj_desc = acpi_ns_get_attached_object(node);
if (!obj_desc) {
+
/* No object, just exit */
return (AE_OK);
diff --git a/drivers/acpi/events/evrgnini.c b/drivers/acpi/events/evrgnini.c
index baed8c1a1b9f..5b3c7a85eb9a 100644
--- a/drivers/acpi/events/evrgnini.c
+++ b/drivers/acpi/events/evrgnini.c
@@ -71,11 +71,22 @@ acpi_ev_system_memory_region_setup(acpi_handle handle,
(union acpi_operand_object *)handle;
struct acpi_mem_space_context *local_region_context;
- ACPI_FUNCTION_TRACE("ev_system_memory_region_setup");
+ ACPI_FUNCTION_TRACE(ev_system_memory_region_setup);
if (function == ACPI_REGION_DEACTIVATE) {
if (*region_context) {
- ACPI_MEM_FREE(*region_context);
+ local_region_context =
+ (struct acpi_mem_space_context *)*region_context;
+
+ /* Delete a cached mapping if present */
+
+ if (local_region_context->mapped_length) {
+ acpi_os_unmap_memory(local_region_context->
+ mapped_logical_address,
+ local_region_context->
+ mapped_length);
+ }
+ ACPI_FREE(local_region_context);
*region_context = NULL;
}
return_ACPI_STATUS(AE_OK);
@@ -84,7 +95,7 @@ acpi_ev_system_memory_region_setup(acpi_handle handle,
/* Create a new context */
local_region_context =
- ACPI_MEM_CALLOCATE(sizeof(struct acpi_mem_space_context));
+ ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_mem_space_context));
if (!(local_region_context)) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
@@ -118,7 +129,7 @@ acpi_ev_io_space_region_setup(acpi_handle handle,
u32 function,
void *handler_context, void **region_context)
{
- ACPI_FUNCTION_TRACE("ev_io_space_region_setup");
+ ACPI_FUNCTION_TRACE(ev_io_space_region_setup);
if (function == ACPI_REGION_DEACTIVATE) {
*region_context = NULL;
@@ -161,7 +172,7 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
(union acpi_operand_object *)handle;
struct acpi_device_id object_hID;
- ACPI_FUNCTION_TRACE("ev_pci_config_region_setup");
+ ACPI_FUNCTION_TRACE(ev_pci_config_region_setup);
handler_obj = region_obj->region.handler;
if (!handler_obj) {
@@ -178,7 +189,7 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
*region_context = NULL;
if (function == ACPI_REGION_DEACTIVATE) {
if (pci_id) {
- ACPI_MEM_FREE(pci_id);
+ ACPI_FREE(pci_id);
}
return_ACPI_STATUS(status);
}
@@ -199,6 +210,7 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
* handlers with that device.
*/
if (handler_obj->address_space.node == acpi_gbl_root_node) {
+
/* Start search from the parent object */
pci_root_node = parent_node;
@@ -220,6 +232,7 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
PCI_EXPRESS_ROOT_HID_STRING,
sizeof(PCI_EXPRESS_ROOT_HID_STRING)))))
{
+
/* Install a handler for this PCI root bridge */
status =
@@ -235,7 +248,7 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
} else {
ACPI_EXCEPTION((AE_INFO,
status,
- "Could not install pci_config handler for Root Bridge %4.4s",
+ "Could not install PciConfig handler for Root Bridge %4.4s",
acpi_ut_get_node_name
(pci_root_node)));
}
@@ -262,7 +275,7 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
/* Region is still not initialized. Create a new context */
- pci_id = ACPI_MEM_CALLOCATE(sizeof(struct acpi_pci_id));
+ pci_id = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_pci_id));
if (!pci_id) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
@@ -337,7 +350,7 @@ acpi_ev_pci_bar_region_setup(acpi_handle handle,
u32 function,
void *handler_context, void **region_context)
{
- ACPI_FUNCTION_TRACE("ev_pci_bar_region_setup");
+ ACPI_FUNCTION_TRACE(ev_pci_bar_region_setup);
return_ACPI_STATUS(AE_OK);
}
@@ -364,7 +377,7 @@ acpi_ev_cmos_region_setup(acpi_handle handle,
u32 function,
void *handler_context, void **region_context)
{
- ACPI_FUNCTION_TRACE("ev_cmos_region_setup");
+ ACPI_FUNCTION_TRACE(ev_cmos_region_setup);
return_ACPI_STATUS(AE_OK);
}
@@ -389,7 +402,7 @@ acpi_ev_default_region_setup(acpi_handle handle,
u32 function,
void *handler_context, void **region_context)
{
- ACPI_FUNCTION_TRACE("ev_default_region_setup");
+ ACPI_FUNCTION_TRACE(ev_default_region_setup);
if (function == ACPI_REGION_DEACTIVATE) {
*region_context = NULL;
@@ -435,7 +448,7 @@ acpi_ev_initialize_region(union acpi_operand_object *region_obj,
acpi_name *reg_name_ptr = (acpi_name *) METHOD_NAME__REG;
union acpi_operand_object *region_obj2;
- ACPI_FUNCTION_TRACE_U32("ev_initialize_region", acpi_ns_locked);
+ ACPI_FUNCTION_TRACE_U32(ev_initialize_region, acpi_ns_locked);
if (!region_obj) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
@@ -462,8 +475,9 @@ acpi_ev_initialize_region(union acpi_operand_object *region_obj,
/* Find any "_REG" method associated with this region definition */
- status = acpi_ns_search_node(*reg_name_ptr, node,
- ACPI_TYPE_METHOD, &method_node);
+ status =
+ acpi_ns_search_one_scope(*reg_name_ptr, node, ACPI_TYPE_METHOD,
+ &method_node);
if (ACPI_SUCCESS(status)) {
/*
* The _REG method is optional and there can be only one per region
@@ -478,11 +492,13 @@ acpi_ev_initialize_region(union acpi_operand_object *region_obj,
* ie: acpi_gbl_root_node->parent_entry being set to NULL
*/
while (node) {
+
/* Check to see if a handler exists */
handler_obj = NULL;
obj_desc = acpi_ns_get_attached_object(node);
if (obj_desc) {
+
/* Can only be a handler if the object exists */
switch (node->type) {
@@ -507,10 +523,12 @@ acpi_ev_initialize_region(union acpi_operand_object *region_obj,
}
while (handler_obj) {
+
/* Is this handler of the correct type? */
if (handler_obj->address_space.space_id ==
space_id) {
+
/* Found correct handler */
ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
@@ -571,7 +589,7 @@ acpi_ev_initialize_region(union acpi_operand_object *region_obj,
/* If we get here, there is no handler for this region */
ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
- "No handler for region_type %s(%X) (region_obj %p)\n",
+ "No handler for RegionType %s(%X) (RegionObj %p)\n",
acpi_ut_get_region_name(space_id), space_id,
region_obj));
diff --git a/drivers/acpi/events/evsci.c b/drivers/acpi/events/evsci.c
index 9a622169008a..8106215ad554 100644
--- a/drivers/acpi/events/evsci.c
+++ b/drivers/acpi/events/evsci.c
@@ -69,7 +69,7 @@ static u32 ACPI_SYSTEM_XFACE acpi_ev_sci_xrupt_handler(void *context)
struct acpi_gpe_xrupt_info *gpe_xrupt_list = context;
u32 interrupt_handled = ACPI_INTERRUPT_NOT_HANDLED;
- ACPI_FUNCTION_TRACE("ev_sci_xrupt_handler");
+ ACPI_FUNCTION_TRACE(ev_sci_xrupt_handler);
/*
* We are guaranteed by the ACPI CA initialization/shutdown code that
@@ -108,7 +108,7 @@ u32 ACPI_SYSTEM_XFACE acpi_ev_gpe_xrupt_handler(void *context)
struct acpi_gpe_xrupt_info *gpe_xrupt_list = context;
u32 interrupt_handled = ACPI_INTERRUPT_NOT_HANDLED;
- ACPI_FUNCTION_TRACE("ev_gpe_xrupt_handler");
+ ACPI_FUNCTION_TRACE(ev_gpe_xrupt_handler);
/*
* We are guaranteed by the ACPI CA initialization/shutdown code that
@@ -140,7 +140,7 @@ u32 acpi_ev_install_sci_handler(void)
{
u32 status = AE_OK;
- ACPI_FUNCTION_TRACE("ev_install_sci_handler");
+ ACPI_FUNCTION_TRACE(ev_install_sci_handler);
status = acpi_os_install_interrupt_handler((u32) acpi_gbl_FADT->sci_int,
acpi_ev_sci_xrupt_handler,
@@ -171,7 +171,7 @@ acpi_status acpi_ev_remove_sci_handler(void)
{
acpi_status status;
- ACPI_FUNCTION_TRACE("ev_remove_sci_handler");
+ ACPI_FUNCTION_TRACE(ev_remove_sci_handler);
/* Just let the OS remove the handler and disable the level */
diff --git a/drivers/acpi/events/evxface.c b/drivers/acpi/events/evxface.c
index b38b39dde543..76c34a66e0e0 100644
--- a/drivers/acpi/events/evxface.c
+++ b/drivers/acpi/events/evxface.c
@@ -41,8 +41,6 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/module.h>
-
#include <acpi/acpi.h>
#include <acpi/acnamesp.h>
#include <acpi/acevents.h>
@@ -68,7 +66,7 @@ acpi_status acpi_install_exception_handler(acpi_exception_handler handler)
{
acpi_status status;
- ACPI_FUNCTION_TRACE("acpi_install_exception_handler");
+ ACPI_FUNCTION_TRACE(acpi_install_exception_handler);
status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
if (ACPI_FAILURE(status)) {
@@ -90,6 +88,8 @@ acpi_status acpi_install_exception_handler(acpi_exception_handler handler)
(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
return_ACPI_STATUS(status);
}
+
+ACPI_EXPORT_SYMBOL(acpi_install_exception_handler)
#endif /* ACPI_FUTURE_USAGE */
/*******************************************************************************
@@ -107,14 +107,13 @@ acpi_status acpi_install_exception_handler(acpi_exception_handler handler)
* event.
*
******************************************************************************/
-
acpi_status
acpi_install_fixed_event_handler(u32 event,
acpi_event_handler handler, void *context)
{
acpi_status status;
- ACPI_FUNCTION_TRACE("acpi_install_fixed_event_handler");
+ ACPI_FUNCTION_TRACE(acpi_install_fixed_event_handler);
/* Parameter validation */
@@ -161,7 +160,7 @@ acpi_install_fixed_event_handler(u32 event,
return_ACPI_STATUS(status);
}
-EXPORT_SYMBOL(acpi_install_fixed_event_handler);
+ACPI_EXPORT_SYMBOL(acpi_install_fixed_event_handler)
/*******************************************************************************
*
@@ -175,13 +174,12 @@ EXPORT_SYMBOL(acpi_install_fixed_event_handler);
* DESCRIPTION: Disables the event and unregisters the event handler.
*
******************************************************************************/
-
acpi_status
acpi_remove_fixed_event_handler(u32 event, acpi_event_handler handler)
{
acpi_status status = AE_OK;
- ACPI_FUNCTION_TRACE("acpi_remove_fixed_event_handler");
+ ACPI_FUNCTION_TRACE(acpi_remove_fixed_event_handler);
/* Parameter validation */
@@ -216,7 +214,7 @@ acpi_remove_fixed_event_handler(u32 event, acpi_event_handler handler)
return_ACPI_STATUS(status);
}
-EXPORT_SYMBOL(acpi_remove_fixed_event_handler);
+ACPI_EXPORT_SYMBOL(acpi_remove_fixed_event_handler)
/*******************************************************************************
*
@@ -235,7 +233,6 @@ EXPORT_SYMBOL(acpi_remove_fixed_event_handler);
* DESCRIPTION: Install a handler for notifies on an ACPI device
*
******************************************************************************/
-
acpi_status
acpi_install_notify_handler(acpi_handle device,
u32 handler_type,
@@ -246,7 +243,7 @@ acpi_install_notify_handler(acpi_handle device,
struct acpi_namespace_node *node;
acpi_status status;
- ACPI_FUNCTION_TRACE("acpi_install_notify_handler");
+ ACPI_FUNCTION_TRACE(acpi_install_notify_handler);
/* Parameter validation */
@@ -275,6 +272,7 @@ acpi_install_notify_handler(acpi_handle device,
* only one <external> global handler can be regsitered (per notify type).
*/
if (device == ACPI_ROOT_OBJECT) {
+
/* Make sure the handler is not already installed */
if (((handler_type & ACPI_SYSTEM_NOTIFY) &&
@@ -317,6 +315,7 @@ acpi_install_notify_handler(acpi_handle device,
obj_desc = acpi_ns_get_attached_object(node);
if (obj_desc) {
+
/* Object exists - make sure there's no handler */
if (((handler_type & ACPI_SYSTEM_NOTIFY) &&
@@ -370,6 +369,7 @@ acpi_install_notify_handler(acpi_handle device,
}
if (handler_type == ACPI_ALL_NOTIFY) {
+
/* Extra ref if installed in both */
acpi_ut_add_reference(notify_obj);
@@ -381,7 +381,7 @@ acpi_install_notify_handler(acpi_handle device,
return_ACPI_STATUS(status);
}
-EXPORT_SYMBOL(acpi_install_notify_handler);
+ACPI_EXPORT_SYMBOL(acpi_install_notify_handler)
/*******************************************************************************
*
@@ -399,7 +399,6 @@ EXPORT_SYMBOL(acpi_install_notify_handler);
* DESCRIPTION: Remove a handler for notifies on an ACPI device
*
******************************************************************************/
-
acpi_status
acpi_remove_notify_handler(acpi_handle device,
u32 handler_type, acpi_notify_handler handler)
@@ -409,7 +408,7 @@ acpi_remove_notify_handler(acpi_handle device,
struct acpi_namespace_node *node;
acpi_status status;
- ACPI_FUNCTION_TRACE("acpi_remove_notify_handler");
+ ACPI_FUNCTION_TRACE(acpi_remove_notify_handler);
/* Parameter validation */
@@ -535,7 +534,7 @@ acpi_remove_notify_handler(acpi_handle device,
return_ACPI_STATUS(status);
}
-EXPORT_SYMBOL(acpi_remove_notify_handler);
+ACPI_EXPORT_SYMBOL(acpi_remove_notify_handler)
/*******************************************************************************
*
@@ -554,7 +553,6 @@ EXPORT_SYMBOL(acpi_remove_notify_handler);
* DESCRIPTION: Install a handler for a General Purpose Event.
*
******************************************************************************/
-
acpi_status
acpi_install_gpe_handler(acpi_handle gpe_device,
u32 gpe_number,
@@ -565,7 +563,7 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
acpi_status status;
acpi_cpu_flags flags;
- ACPI_FUNCTION_TRACE("acpi_install_gpe_handler");
+ ACPI_FUNCTION_TRACE(acpi_install_gpe_handler);
/* Parameter validation */
@@ -596,7 +594,7 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
/* Allocate and init handler object */
- handler = ACPI_MEM_CALLOCATE(sizeof(struct acpi_handler_info));
+ handler = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_handler_info));
if (!handler) {
status = AE_NO_MEMORY;
goto unlock_and_exit;
@@ -630,7 +628,7 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
return_ACPI_STATUS(status);
}
-EXPORT_SYMBOL(acpi_install_gpe_handler);
+ACPI_EXPORT_SYMBOL(acpi_install_gpe_handler)
/*******************************************************************************
*
@@ -646,7 +644,6 @@ EXPORT_SYMBOL(acpi_install_gpe_handler);
* DESCRIPTION: Remove a handler for a General Purpose acpi_event.
*
******************************************************************************/
-
acpi_status
acpi_remove_gpe_handler(acpi_handle gpe_device,
u32 gpe_number, acpi_event_handler address)
@@ -656,7 +653,7 @@ acpi_remove_gpe_handler(acpi_handle gpe_device,
acpi_status status;
acpi_cpu_flags flags;
- ACPI_FUNCTION_TRACE("acpi_remove_gpe_handler");
+ ACPI_FUNCTION_TRACE(acpi_remove_gpe_handler);
/* Parameter validation */
@@ -724,14 +721,14 @@ acpi_remove_gpe_handler(acpi_handle gpe_device,
/* Now we can free the handler object */
- ACPI_MEM_FREE(handler);
+ ACPI_FREE(handler);
unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
return_ACPI_STATUS(status);
}
-EXPORT_SYMBOL(acpi_remove_gpe_handler);
+ACPI_EXPORT_SYMBOL(acpi_remove_gpe_handler)
/*******************************************************************************
*
@@ -746,7 +743,6 @@ EXPORT_SYMBOL(acpi_remove_gpe_handler);
* DESCRIPTION: Acquire the ACPI Global Lock
*
******************************************************************************/
-
acpi_status acpi_acquire_global_lock(u16 timeout, u32 * handle)
{
acpi_status status;
@@ -771,7 +767,7 @@ acpi_status acpi_acquire_global_lock(u16 timeout, u32 * handle)
return (status);
}
-EXPORT_SYMBOL(acpi_acquire_global_lock);
+ACPI_EXPORT_SYMBOL(acpi_acquire_global_lock)
/*******************************************************************************
*
@@ -784,7 +780,6 @@ EXPORT_SYMBOL(acpi_acquire_global_lock);
* DESCRIPTION: Release the ACPI Global Lock. The handle must be valid.
*
******************************************************************************/
-
acpi_status acpi_release_global_lock(u32 handle)
{
acpi_status status;
@@ -797,4 +792,4 @@ acpi_status acpi_release_global_lock(u32 handle)
return (status);
}
-EXPORT_SYMBOL(acpi_release_global_lock);
+ACPI_EXPORT_SYMBOL(acpi_release_global_lock)
diff --git a/drivers/acpi/events/evxfevnt.c b/drivers/acpi/events/evxfevnt.c
index ec9ce8429f15..7ebc2efac936 100644
--- a/drivers/acpi/events/evxfevnt.c
+++ b/drivers/acpi/events/evxfevnt.c
@@ -41,8 +41,6 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/module.h>
-
#include <acpi/acpi.h>
#include <acpi/acevents.h>
#include <acpi/acnamesp.h>
@@ -65,7 +63,7 @@ acpi_status acpi_enable(void)
{
acpi_status status = AE_OK;
- ACPI_FUNCTION_TRACE("acpi_enable");
+ ACPI_FUNCTION_TRACE(acpi_enable);
/* Make sure we have the FADT */
@@ -94,6 +92,8 @@ acpi_status acpi_enable(void)
return_ACPI_STATUS(status);
}
+ACPI_EXPORT_SYMBOL(acpi_enable)
+
/*******************************************************************************
*
* FUNCTION: acpi_disable
@@ -105,12 +105,11 @@ acpi_status acpi_enable(void)
* DESCRIPTION: Transfers the system into LEGACY (non-ACPI) mode.
*
******************************************************************************/
-
acpi_status acpi_disable(void)
{
acpi_status status = AE_OK;
- ACPI_FUNCTION_TRACE("acpi_disable");
+ ACPI_FUNCTION_TRACE(acpi_disable);
if (!acpi_gbl_FADT) {
ACPI_WARNING((AE_INFO, "No FADT information present!"));
@@ -137,6 +136,8 @@ acpi_status acpi_disable(void)
return_ACPI_STATUS(status);
}
+ACPI_EXPORT_SYMBOL(acpi_disable)
+
/*******************************************************************************
*
* FUNCTION: acpi_enable_event
@@ -149,13 +150,12 @@ acpi_status acpi_disable(void)
* DESCRIPTION: Enable an ACPI event (fixed)
*
******************************************************************************/
-
acpi_status acpi_enable_event(u32 event, u32 flags)
{
acpi_status status = AE_OK;
u32 value;
- ACPI_FUNCTION_TRACE("acpi_enable_event");
+ ACPI_FUNCTION_TRACE(acpi_enable_event);
/* Decode the Fixed Event */
@@ -193,7 +193,7 @@ acpi_status acpi_enable_event(u32 event, u32 flags)
return_ACPI_STATUS(status);
}
-EXPORT_SYMBOL(acpi_enable_event);
+ACPI_EXPORT_SYMBOL(acpi_enable_event)
/*******************************************************************************
*
@@ -208,13 +208,12 @@ EXPORT_SYMBOL(acpi_enable_event);
* DESCRIPTION: Set the type of an individual GPE
*
******************************************************************************/
-
acpi_status acpi_set_gpe_type(acpi_handle gpe_device, u32 gpe_number, u8 type)
{
acpi_status status = AE_OK;
struct acpi_gpe_event_info *gpe_event_info;
- ACPI_FUNCTION_TRACE("acpi_set_gpe_type");
+ ACPI_FUNCTION_TRACE(acpi_set_gpe_type);
/* Ensure that we have a valid GPE number */
@@ -236,7 +235,7 @@ acpi_status acpi_set_gpe_type(acpi_handle gpe_device, u32 gpe_number, u8 type)
return_ACPI_STATUS(status);
}
-EXPORT_SYMBOL(acpi_set_gpe_type);
+ACPI_EXPORT_SYMBOL(acpi_set_gpe_type)
/*******************************************************************************
*
@@ -252,13 +251,12 @@ EXPORT_SYMBOL(acpi_set_gpe_type);
* DESCRIPTION: Enable an ACPI event (general purpose)
*
******************************************************************************/
-
acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number, u32 flags)
{
acpi_status status = AE_OK;
struct acpi_gpe_event_info *gpe_event_info;
- ACPI_FUNCTION_TRACE("acpi_enable_gpe");
+ ACPI_FUNCTION_TRACE(acpi_enable_gpe);
/* Use semaphore lock if not executing at interrupt level */
@@ -288,7 +286,7 @@ acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number, u32 flags)
return_ACPI_STATUS(status);
}
-EXPORT_SYMBOL(acpi_enable_gpe);
+ACPI_EXPORT_SYMBOL(acpi_enable_gpe)
/*******************************************************************************
*
@@ -304,13 +302,12 @@ EXPORT_SYMBOL(acpi_enable_gpe);
* DESCRIPTION: Disable an ACPI event (general purpose)
*
******************************************************************************/
-
acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number, u32 flags)
{
acpi_status status = AE_OK;
struct acpi_gpe_event_info *gpe_event_info;
- ACPI_FUNCTION_TRACE("acpi_disable_gpe");
+ ACPI_FUNCTION_TRACE(acpi_disable_gpe);
/* Use semaphore lock if not executing at interrupt level */
@@ -338,6 +335,8 @@ acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number, u32 flags)
return_ACPI_STATUS(status);
}
+ACPI_EXPORT_SYMBOL(acpi_disable_gpe)
+
/*******************************************************************************
*
* FUNCTION: acpi_disable_event
@@ -350,13 +349,12 @@ acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number, u32 flags)
* DESCRIPTION: Disable an ACPI event (fixed)
*
******************************************************************************/
-
acpi_status acpi_disable_event(u32 event, u32 flags)
{
acpi_status status = AE_OK;
u32 value;
- ACPI_FUNCTION_TRACE("acpi_disable_event");
+ ACPI_FUNCTION_TRACE(acpi_disable_event);
/* Decode the Fixed Event */
@@ -392,7 +390,7 @@ acpi_status acpi_disable_event(u32 event, u32 flags)
return_ACPI_STATUS(status);
}
-EXPORT_SYMBOL(acpi_disable_event);
+ACPI_EXPORT_SYMBOL(acpi_disable_event)
/*******************************************************************************
*
@@ -405,12 +403,11 @@ EXPORT_SYMBOL(acpi_disable_event);
* DESCRIPTION: Clear an ACPI event (fixed)
*
******************************************************************************/
-
acpi_status acpi_clear_event(u32 event)
{
acpi_status status = AE_OK;
- ACPI_FUNCTION_TRACE("acpi_clear_event");
+ ACPI_FUNCTION_TRACE(acpi_clear_event);
/* Decode the Fixed Event */
@@ -429,7 +426,7 @@ acpi_status acpi_clear_event(u32 event)
return_ACPI_STATUS(status);
}
-EXPORT_SYMBOL(acpi_clear_event);
+ACPI_EXPORT_SYMBOL(acpi_clear_event)
/*******************************************************************************
*
@@ -444,13 +441,12 @@ EXPORT_SYMBOL(acpi_clear_event);
* DESCRIPTION: Clear an ACPI event (general purpose)
*
******************************************************************************/
-
acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number, u32 flags)
{
acpi_status status = AE_OK;
struct acpi_gpe_event_info *gpe_event_info;
- ACPI_FUNCTION_TRACE("acpi_clear_gpe");
+ ACPI_FUNCTION_TRACE(acpi_clear_gpe);
/* Use semaphore lock if not executing at interrupt level */
@@ -478,6 +474,8 @@ acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number, u32 flags)
return_ACPI_STATUS(status);
}
+ACPI_EXPORT_SYMBOL(acpi_clear_gpe)
+
#ifdef ACPI_FUTURE_USAGE
/*******************************************************************************
*
@@ -492,12 +490,11 @@ acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number, u32 flags)
* DESCRIPTION: Obtains and returns the current status of the event
*
******************************************************************************/
-
acpi_status acpi_get_event_status(u32 event, acpi_event_status * event_status)
{
acpi_status status = AE_OK;
- ACPI_FUNCTION_TRACE("acpi_get_event_status");
+ ACPI_FUNCTION_TRACE(acpi_get_event_status);
if (!event_status) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
@@ -518,6 +515,8 @@ acpi_status acpi_get_event_status(u32 event, acpi_event_status * event_status)
return_ACPI_STATUS(status);
}
+ACPI_EXPORT_SYMBOL(acpi_get_event_status)
+
/*******************************************************************************
*
* FUNCTION: acpi_get_gpe_status
@@ -533,7 +532,6 @@ acpi_status acpi_get_event_status(u32 event, acpi_event_status * event_status)
* DESCRIPTION: Get status of an event (general purpose)
*
******************************************************************************/
-
acpi_status
acpi_get_gpe_status(acpi_handle gpe_device,
u32 gpe_number, u32 flags, acpi_event_status * event_status)
@@ -541,7 +539,7 @@ acpi_get_gpe_status(acpi_handle gpe_device,
acpi_status status = AE_OK;
struct acpi_gpe_event_info *gpe_event_info;
- ACPI_FUNCTION_TRACE("acpi_get_gpe_status");
+ ACPI_FUNCTION_TRACE(acpi_get_gpe_status);
/* Use semaphore lock if not executing at interrupt level */
@@ -570,6 +568,8 @@ acpi_get_gpe_status(acpi_handle gpe_device,
}
return_ACPI_STATUS(status);
}
+
+ACPI_EXPORT_SYMBOL(acpi_get_gpe_status)
#endif /* ACPI_FUTURE_USAGE */
/*******************************************************************************
@@ -586,7 +586,6 @@ acpi_get_gpe_status(acpi_handle gpe_device,
* DESCRIPTION: Create and Install a block of GPE registers
*
******************************************************************************/
-
acpi_status
acpi_install_gpe_block(acpi_handle gpe_device,
struct acpi_generic_address *gpe_block_address,
@@ -597,7 +596,7 @@ acpi_install_gpe_block(acpi_handle gpe_device,
struct acpi_namespace_node *node;
struct acpi_gpe_block_info *gpe_block;
- ACPI_FUNCTION_TRACE("acpi_install_gpe_block");
+ ACPI_FUNCTION_TRACE(acpi_install_gpe_block);
if ((!gpe_device) || (!gpe_block_address) || (!register_count)) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
@@ -636,6 +635,7 @@ acpi_install_gpe_block(acpi_handle gpe_device,
obj_desc = acpi_ns_get_attached_object(node);
if (!obj_desc) {
+
/* No object, create a new one */
obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_DEVICE);
@@ -665,7 +665,7 @@ acpi_install_gpe_block(acpi_handle gpe_device,
return_ACPI_STATUS(status);
}
-EXPORT_SYMBOL(acpi_install_gpe_block);
+ACPI_EXPORT_SYMBOL(acpi_install_gpe_block)
/*******************************************************************************
*
@@ -678,14 +678,13 @@ EXPORT_SYMBOL(acpi_install_gpe_block);
* DESCRIPTION: Remove a previously installed block of GPE registers
*
******************************************************************************/
-
acpi_status acpi_remove_gpe_block(acpi_handle gpe_device)
{
union acpi_operand_object *obj_desc;
acpi_status status;
struct acpi_namespace_node *node;
- ACPI_FUNCTION_TRACE("acpi_remove_gpe_block");
+ ACPI_FUNCTION_TRACE(acpi_remove_gpe_block);
if (!gpe_device) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
@@ -721,4 +720,4 @@ acpi_status acpi_remove_gpe_block(acpi_handle gpe_device)
return_ACPI_STATUS(status);
}
-EXPORT_SYMBOL(acpi_remove_gpe_block);
+ACPI_EXPORT_SYMBOL(acpi_remove_gpe_block)
diff --git a/drivers/acpi/events/evxfregn.c b/drivers/acpi/events/evxfregn.c
index abf5caca9ae5..e8b86a0baad0 100644
--- a/drivers/acpi/events/evxfregn.c
+++ b/drivers/acpi/events/evxfregn.c
@@ -42,8 +42,6 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/module.h>
-
#include <acpi/acpi.h>
#include <acpi/acnamesp.h>
#include <acpi/acevents.h>
@@ -75,7 +73,7 @@ acpi_install_address_space_handler(acpi_handle device,
struct acpi_namespace_node *node;
acpi_status status;
- ACPI_FUNCTION_TRACE("acpi_install_address_space_handler");
+ ACPI_FUNCTION_TRACE(acpi_install_address_space_handler);
/* Parameter validation */
@@ -114,7 +112,7 @@ acpi_install_address_space_handler(acpi_handle device,
return_ACPI_STATUS(status);
}
-EXPORT_SYMBOL(acpi_install_address_space_handler);
+ACPI_EXPORT_SYMBOL(acpi_install_address_space_handler)
/*******************************************************************************
*
@@ -129,7 +127,6 @@ EXPORT_SYMBOL(acpi_install_address_space_handler);
* DESCRIPTION: Remove a previously installed handler.
*
******************************************************************************/
-
acpi_status
acpi_remove_address_space_handler(acpi_handle device,
acpi_adr_space_type space_id,
@@ -142,7 +139,7 @@ acpi_remove_address_space_handler(acpi_handle device,
struct acpi_namespace_node *node;
acpi_status status;
- ACPI_FUNCTION_TRACE("acpi_remove_address_space_handler");
+ ACPI_FUNCTION_TRACE(acpi_remove_address_space_handler);
/* Parameter validation */
@@ -176,9 +173,11 @@ acpi_remove_address_space_handler(acpi_handle device,
handler_obj = obj_desc->device.handler;
last_obj_ptr = &obj_desc->device.handler;
while (handler_obj) {
+
/* We have a handler, see if user requested this one */
if (handler_obj->address_space.space_id == space_id) {
+
/* Matched space_id, first dereference this in the Regions */
ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
@@ -229,7 +228,7 @@ acpi_remove_address_space_handler(acpi_handle device,
/* The handler does not exist */
ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
- "Unable to remove address handler %p for %s(%X), dev_node %p, obj %p\n",
+ "Unable to remove address handler %p for %s(%X), DevNode %p, obj %p\n",
handler, acpi_ut_get_region_name(space_id), space_id,
node, obj_desc));
@@ -240,4 +239,4 @@ acpi_remove_address_space_handler(acpi_handle device,
return_ACPI_STATUS(status);
}
-EXPORT_SYMBOL(acpi_remove_address_space_handler);
+ACPI_EXPORT_SYMBOL(acpi_remove_address_space_handler)
diff --git a/drivers/acpi/executer/exconfig.c b/drivers/acpi/executer/exconfig.c
index a29782fe3ecf..823352435e08 100644
--- a/drivers/acpi/executer/exconfig.c
+++ b/drivers/acpi/executer/exconfig.c
@@ -82,7 +82,7 @@ acpi_ex_add_table(struct acpi_table_header *table,
struct acpi_table_desc table_info;
union acpi_operand_object *obj_desc;
- ACPI_FUNCTION_TRACE("ex_add_table");
+ ACPI_FUNCTION_TRACE(ex_add_table);
/* Create an object to be the table handle */
@@ -100,7 +100,7 @@ acpi_ex_add_table(struct acpi_table_header *table,
ACPI_MEMSET(&table_info, 0, sizeof(struct acpi_table_desc));
- table_info.type = ACPI_TABLE_SSDT;
+ table_info.type = ACPI_TABLE_ID_SSDT;
table_info.pointer = table;
table_info.length = (acpi_size) table->length;
table_info.allocation = ACPI_MEM_ALLOCATED;
@@ -110,6 +110,7 @@ acpi_ex_add_table(struct acpi_table_header *table,
if (ACPI_FAILURE(status)) {
if (status == AE_ALREADY_EXISTS) {
+
/* Table already exists, just return the handle */
return_ACPI_STATUS(AE_OK);
@@ -121,6 +122,7 @@ acpi_ex_add_table(struct acpi_table_header *table,
status = acpi_ns_load_table(table_info.installed_desc, parent_node);
if (ACPI_FAILURE(status)) {
+
/* Uninstall table on error */
(void)acpi_tb_uninstall_table(table_info.installed_desc);
@@ -160,7 +162,7 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state,
struct acpi_namespace_node *parameter_node = NULL;
union acpi_operand_object *ddb_handle;
- ACPI_FUNCTION_TRACE("ex_load_table_op");
+ ACPI_FUNCTION_TRACE(ex_load_table_op);
#if 0
/*
@@ -169,6 +171,7 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state,
*/
status = acpi_tb_match_signature(operand[0]->string.pointer, NULL);
if (status == AE_OK) {
+
/* Signature matched -- don't allow override */
return_ACPI_STATUS(AE_ALREADY_EXISTS);
@@ -211,9 +214,8 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state,
* location within the namespace where the table will be loaded.
*/
status =
- acpi_ns_get_node_by_path(operand[3]->string.pointer,
- start_node, ACPI_NS_SEARCH_PARENT,
- &parent_node);
+ acpi_ns_get_node(start_node, operand[3]->string.pointer,
+ ACPI_NS_SEARCH_PARENT, &parent_node);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
@@ -234,9 +236,8 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state,
/* Find the node referenced by the parameter_path_string */
status =
- acpi_ns_get_node_by_path(operand[4]->string.pointer,
- start_node, ACPI_NS_SEARCH_PARENT,
- &parameter_node);
+ acpi_ns_get_node(start_node, operand[4]->string.pointer,
+ ACPI_NS_SEARCH_PARENT, &parameter_node);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
@@ -252,6 +253,7 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state,
/* Parameter Data (optional) */
if (parameter_node) {
+
/* Store the parameter data into the optional parameter object */
status = acpi_ex_store(operand[5],
@@ -294,9 +296,10 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
struct acpi_table_header *table_ptr = NULL;
acpi_physical_address address;
struct acpi_table_header table_header;
+ acpi_integer temp;
u32 i;
- ACPI_FUNCTION_TRACE("ex_load_op");
+ ACPI_FUNCTION_TRACE(ex_load_op);
/* Object can be either an op_region or a Field */
@@ -322,7 +325,7 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
address = obj_desc->region.address;
- /* Get the table length from the table header */
+ /* Get part of the table header to get the table length */
table_header.length = 0;
for (i = 0; i < 8; i++) {
@@ -330,11 +333,14 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
acpi_ev_address_space_dispatch(obj_desc, ACPI_READ,
(acpi_physical_address)
(i + address), 8,
- ((u8 *) &
- table_header) + i);
+ &temp);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
+
+ /* Get the one valid byte of the returned 64-bit value */
+
+ ACPI_CAST_PTR(u8, &table_header)[i] = (u8) temp;
}
/* Sanity check the table length */
@@ -345,7 +351,7 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
/* Allocate a buffer for the entire table */
- table_ptr = ACPI_MEM_ALLOCATE(table_header.length);
+ table_ptr = ACPI_ALLOCATE(table_header.length);
if (!table_ptr) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
@@ -357,11 +363,14 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
acpi_ev_address_space_dispatch(obj_desc, ACPI_READ,
(acpi_physical_address)
(i + address), 8,
- ((u8 *) table_ptr +
- i));
+ &temp);
if (ACPI_FAILURE(status)) {
goto cleanup;
}
+
+ /* Get the one valid byte of the returned 64-bit value */
+
+ ACPI_CAST_PTR(u8, table_ptr)[i] = (u8) temp;
}
break;
@@ -407,12 +416,8 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
/* The table must be either an SSDT or a PSDT */
- if ((!ACPI_STRNCMP(table_ptr->signature,
- acpi_gbl_table_data[ACPI_TABLE_PSDT].signature,
- acpi_gbl_table_data[ACPI_TABLE_PSDT].sig_length)) &&
- (!ACPI_STRNCMP(table_ptr->signature,
- acpi_gbl_table_data[ACPI_TABLE_SSDT].signature,
- acpi_gbl_table_data[ACPI_TABLE_SSDT].sig_length))) {
+ if ((!ACPI_COMPARE_NAME(table_ptr->signature, PSDT_SIG)) &&
+ (!ACPI_COMPARE_NAME(table_ptr->signature, SSDT_SIG))) {
ACPI_ERROR((AE_INFO,
"Table has invalid signature [%4.4s], must be SSDT or PSDT",
table_ptr->signature));
@@ -424,6 +429,7 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
status = acpi_ex_add_table(table_ptr, acpi_gbl_root_node, &ddb_handle);
if (ACPI_FAILURE(status)) {
+
/* On error, table_ptr was deallocated above */
return_ACPI_STATUS(status);
@@ -442,7 +448,7 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
cleanup:
if (ACPI_FAILURE(status)) {
- ACPI_MEM_FREE(table_ptr);
+ ACPI_FREE(table_ptr);
}
return_ACPI_STATUS(status);
}
@@ -465,7 +471,7 @@ acpi_status acpi_ex_unload_table(union acpi_operand_object *ddb_handle)
union acpi_operand_object *table_desc = ddb_handle;
struct acpi_table_desc *table_info;
- ACPI_FUNCTION_TRACE("ex_unload_table");
+ ACPI_FUNCTION_TRACE(ex_unload_table);
/*
* Validate the handle
diff --git a/drivers/acpi/executer/exconvrt.c b/drivers/acpi/executer/exconvrt.c
index e6d52e12d77a..b732e399b1ef 100644
--- a/drivers/acpi/executer/exconvrt.c
+++ b/drivers/acpi/executer/exconvrt.c
@@ -79,7 +79,7 @@ acpi_ex_convert_to_integer(union acpi_operand_object *obj_desc,
u32 count;
acpi_status status;
- ACPI_FUNCTION_TRACE_PTR("ex_convert_to_integer", obj_desc);
+ ACPI_FUNCTION_TRACE_PTR(ex_convert_to_integer, obj_desc);
switch (ACPI_GET_OBJECT_TYPE(obj_desc)) {
case ACPI_TYPE_INTEGER:
@@ -199,7 +199,7 @@ acpi_ex_convert_to_buffer(union acpi_operand_object *obj_desc,
union acpi_operand_object *return_desc;
u8 *new_buf;
- ACPI_FUNCTION_TRACE_PTR("ex_convert_to_buffer", obj_desc);
+ ACPI_FUNCTION_TRACE_PTR(ex_convert_to_buffer, obj_desc);
switch (ACPI_GET_OBJECT_TYPE(obj_desc)) {
case ACPI_TYPE_BUFFER:
@@ -319,6 +319,7 @@ acpi_ex_convert_to_ascii(acpi_integer integer,
remainder = 0;
for (i = decimal_length; i > 0; i--) {
+
/* Divide by nth factor of 10 */
digit = integer;
@@ -346,6 +347,7 @@ acpi_ex_convert_to_ascii(acpi_integer integer,
hex_length = (acpi_native_uint) ACPI_MUL_2(data_width);
for (i = 0, j = (hex_length - 1); i < hex_length; i++, j--) {
+
/* Get one hex digit, most significant digits first */
string[k] =
@@ -400,7 +402,7 @@ acpi_ex_convert_to_string(union acpi_operand_object * obj_desc,
u16 base = 16;
u8 separator = ',';
- ACPI_FUNCTION_TRACE_PTR("ex_convert_to_string", obj_desc);
+ ACPI_FUNCTION_TRACE_PTR(ex_convert_to_string, obj_desc);
switch (ACPI_GET_OBJECT_TYPE(obj_desc)) {
case ACPI_TYPE_STRING:
@@ -567,7 +569,7 @@ acpi_ex_convert_to_target_type(acpi_object_type destination_type,
{
acpi_status status = AE_OK;
- ACPI_FUNCTION_TRACE("ex_convert_to_target_type");
+ ACPI_FUNCTION_TRACE(ex_convert_to_target_type);
/* Default behavior */
@@ -657,7 +659,7 @@ acpi_ex_convert_to_target_type(acpi_object_type destination_type,
default:
ACPI_ERROR((AE_INFO,
- "Unknown Target type ID 0x%X aml_opcode %X dest_type %s",
+ "Unknown Target type ID 0x%X AmlOpcode %X DestType %s",
GET_CURRENT_ARG_TYPE(walk_state->op_info->
runtime_args),
walk_state->opcode,
diff --git a/drivers/acpi/executer/excreate.c b/drivers/acpi/executer/excreate.c
index 680575402835..106dc7219df7 100644
--- a/drivers/acpi/executer/excreate.c
+++ b/drivers/acpi/executer/excreate.c
@@ -69,7 +69,7 @@ acpi_status acpi_ex_create_alias(struct acpi_walk_state *walk_state)
struct acpi_namespace_node *alias_node;
acpi_status status = AE_OK;
- ACPI_FUNCTION_TRACE("ex_create_alias");
+ ACPI_FUNCTION_TRACE(ex_create_alias);
/* Get the source/alias operands (both namespace nodes) */
@@ -164,7 +164,7 @@ acpi_status acpi_ex_create_event(struct acpi_walk_state *walk_state)
acpi_status status;
union acpi_operand_object *obj_desc;
- ACPI_FUNCTION_TRACE("ex_create_event");
+ ACPI_FUNCTION_TRACE(ex_create_event);
obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_EVENT);
if (!obj_desc) {
@@ -216,7 +216,7 @@ acpi_status acpi_ex_create_mutex(struct acpi_walk_state *walk_state)
acpi_status status = AE_OK;
union acpi_operand_object *obj_desc;
- ACPI_FUNCTION_TRACE_PTR("ex_create_mutex", ACPI_WALK_OPERANDS);
+ ACPI_FUNCTION_TRACE_PTR(ex_create_mutex, ACPI_WALK_OPERANDS);
/* Create the new mutex object */
@@ -243,8 +243,9 @@ acpi_status acpi_ex_create_mutex(struct acpi_walk_state *walk_state)
obj_desc->mutex.node =
(struct acpi_namespace_node *)walk_state->operands[0];
- status = acpi_ns_attach_object(obj_desc->mutex.node,
- obj_desc, ACPI_TYPE_MUTEX);
+ status =
+ acpi_ns_attach_object(obj_desc->mutex.node, obj_desc,
+ ACPI_TYPE_MUTEX);
cleanup:
/*
@@ -280,7 +281,7 @@ acpi_ex_create_region(u8 * aml_start,
struct acpi_namespace_node *node;
union acpi_operand_object *region_obj2;
- ACPI_FUNCTION_TRACE("ex_create_region");
+ ACPI_FUNCTION_TRACE(ex_create_region);
/* Get the Namespace Node */
@@ -300,7 +301,7 @@ acpi_ex_create_region(u8 * aml_start,
*/
if ((region_space >= ACPI_NUM_PREDEFINED_REGIONS) &&
(region_space < ACPI_USER_REGION_BEGIN)) {
- ACPI_ERROR((AE_INFO, "Invalid address_space type %X",
+ ACPI_ERROR((AE_INFO, "Invalid AddressSpace type %X",
region_space));
return_ACPI_STATUS(AE_AML_INVALID_SPACE_ID);
}
@@ -364,7 +365,7 @@ acpi_status acpi_ex_create_table_region(struct acpi_walk_state *walk_state)
struct acpi_table_header *table;
union acpi_operand_object *region_obj2;
- ACPI_FUNCTION_TRACE("ex_create_table_region");
+ ACPI_FUNCTION_TRACE(ex_create_table_region);
/* Get the Node from the object stack */
@@ -452,7 +453,7 @@ acpi_status acpi_ex_create_processor(struct acpi_walk_state *walk_state)
union acpi_operand_object *obj_desc;
acpi_status status;
- ACPI_FUNCTION_TRACE_PTR("ex_create_processor", walk_state);
+ ACPI_FUNCTION_TRACE_PTR(ex_create_processor, walk_state);
/* Create the processor object */
@@ -464,9 +465,9 @@ acpi_status acpi_ex_create_processor(struct acpi_walk_state *walk_state)
/* Initialize the processor object from the operands */
obj_desc->processor.proc_id = (u8) operand[1]->integer.value;
+ obj_desc->processor.length = (u8) operand[3]->integer.value;
obj_desc->processor.address =
(acpi_io_address) operand[2]->integer.value;
- obj_desc->processor.length = (u8) operand[3]->integer.value;
/* Install the processor object in the parent Node */
@@ -499,7 +500,7 @@ acpi_status acpi_ex_create_power_resource(struct acpi_walk_state *walk_state)
acpi_status status;
union acpi_operand_object *obj_desc;
- ACPI_FUNCTION_TRACE_PTR("ex_create_power_resource", walk_state);
+ ACPI_FUNCTION_TRACE_PTR(ex_create_power_resource, walk_state);
/* Create the power resource object */
@@ -549,7 +550,7 @@ acpi_ex_create_method(u8 * aml_start,
acpi_status status;
u8 method_flags;
- ACPI_FUNCTION_TRACE_PTR("ex_create_method", walk_state);
+ ACPI_FUNCTION_TRACE_PTR(ex_create_method, walk_state);
/* Create a new method object */
diff --git a/drivers/acpi/executer/exdump.c b/drivers/acpi/executer/exdump.c
index a7cca8d4f855..7b9718e976bf 100644
--- a/drivers/acpi/executer/exdump.c
+++ b/drivers/acpi/executer/exdump.c
@@ -61,6 +61,10 @@ static void acpi_ex_out_pointer(char *title, void *value);
static void acpi_ex_out_address(char *title, acpi_physical_address value);
+static void
+acpi_ex_dump_object(union acpi_operand_object *obj_desc,
+ struct acpi_exdump_info *info);
+
static void acpi_ex_dump_reference_obj(union acpi_operand_object *obj_desc);
static void
@@ -119,7 +123,7 @@ static struct acpi_exdump_info acpi_ex_dump_event[2] = {
static struct acpi_exdump_info acpi_ex_dump_method[8] = {
{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_method), NULL},
- {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.param_count), "param_count"},
+ {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.param_count), "ParamCount"},
{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.concurrency), "Concurrency"},
{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(method.semaphore), "Semaphore"},
{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.owner_id), "Owner Id"},
@@ -263,12 +267,10 @@ static struct acpi_exdump_info acpi_ex_dump_field_common[7] = {
{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(common_field.node), "Parent Node"}
};
-static struct acpi_exdump_info acpi_ex_dump_node[6] = {
+static struct acpi_exdump_info acpi_ex_dump_node[5] = {
{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_node), NULL},
{ACPI_EXD_UINT8, ACPI_EXD_NSOFFSET(flags), "Flags"},
{ACPI_EXD_UINT8, ACPI_EXD_NSOFFSET(owner_id), "Owner Id"},
- {ACPI_EXD_UINT16, ACPI_EXD_NSOFFSET(reference_count),
- "Reference Count"},
{ACPI_EXD_POINTER, ACPI_EXD_NSOFFSET(child), "Child List"},
{ACPI_EXD_POINTER, ACPI_EXD_NSOFFSET(peer), "Next Peer"}
};
@@ -330,7 +332,7 @@ acpi_ex_dump_object(union acpi_operand_object *obj_desc,
if (!info) {
acpi_os_printf
- ("ex_dump_object: Display not implemented for object type %s\n",
+ ("ExDumpObject: Display not implemented for object type %s\n",
acpi_ut_get_object_type_name(obj_desc));
return;
}
@@ -454,7 +456,7 @@ void acpi_ex_dump_operand(union acpi_operand_object *obj_desc, u32 depth)
u32 length;
u32 index;
- ACPI_FUNCTION_NAME("ex_dump_operand")
+ ACPI_FUNCTION_NAME(ex_dump_operand)
if (!
((ACPI_LV_EXEC & acpi_dbg_level)
@@ -463,6 +465,7 @@ void acpi_ex_dump_operand(union acpi_operand_object *obj_desc, u32 depth)
}
if (!obj_desc) {
+
/* This could be a null element of a package */
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Null Object Descriptor\n"));
@@ -522,7 +525,7 @@ void acpi_ex_dump_operand(union acpi_operand_object *obj_desc, u32 depth)
case AML_REF_OF_OP:
- acpi_os_printf("Reference: (ref_of) %p\n",
+ acpi_os_printf("Reference: (RefOf) %p\n",
obj_desc->reference.object);
break;
@@ -532,6 +535,7 @@ void acpi_ex_dump_operand(union acpi_operand_object *obj_desc, u32 depth)
obj_desc->reference.offset);
if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_INTEGER) {
+
/* Value is an Integer */
acpi_os_printf(" value is [%8.8X%8.8x]",
@@ -610,7 +614,7 @@ void acpi_ex_dump_operand(union acpi_operand_object *obj_desc, u32 depth)
case ACPI_TYPE_PACKAGE:
- acpi_os_printf("Package [Len %X] element_array %p\n",
+ acpi_os_printf("Package [Len %X] ElementArray %p\n",
obj_desc->package.count,
obj_desc->package.elements);
@@ -662,13 +666,13 @@ void acpi_ex_dump_operand(union acpi_operand_object *obj_desc, u32 depth)
case ACPI_TYPE_LOCAL_BANK_FIELD:
- acpi_os_printf("bank_field\n");
+ acpi_os_printf("BankField\n");
break;
case ACPI_TYPE_LOCAL_REGION_FIELD:
acpi_os_printf
- ("region_field: Bits=%X acc_width=%X Lock=%X Update=%X at byte=%X bit=%X of below:\n",
+ ("RegionField: Bits=%X AccWidth=%X Lock=%X Update=%X at byte=%X bit=%X of below:\n",
obj_desc->field.bit_length,
obj_desc->field.access_byte_width,
obj_desc->field.field_flags & AML_FIELD_LOCK_RULE_MASK,
@@ -681,12 +685,12 @@ void acpi_ex_dump_operand(union acpi_operand_object *obj_desc, u32 depth)
case ACPI_TYPE_LOCAL_INDEX_FIELD:
- acpi_os_printf("index_field\n");
+ acpi_os_printf("IndexField\n");
break;
case ACPI_TYPE_BUFFER_FIELD:
- acpi_os_printf("buffer_field: %X bits at byte %X bit %X of\n",
+ acpi_os_printf("BufferField: %X bits at byte %X bit %X of\n",
obj_desc->buffer_field.bit_length,
obj_desc->buffer_field.base_byte_offset,
obj_desc->buffer_field.start_field_bit_offset);
@@ -777,7 +781,7 @@ acpi_ex_dump_operands(union acpi_operand_object **operands,
{
acpi_native_uint i;
- ACPI_FUNCTION_NAME("ex_dump_operands");
+ ACPI_FUNCTION_NAME(ex_dump_operands);
if (!ident) {
ident = "?";
@@ -901,7 +905,7 @@ static void acpi_ex_dump_reference_obj(union acpi_operand_object *obj_desc)
acpi_os_printf("Could not convert name to pathname\n");
} else {
acpi_os_printf("%s\n", (char *)ret_buf.pointer);
- ACPI_MEM_FREE(ret_buf.pointer);
+ ACPI_FREE(ret_buf.pointer);
}
} else if (obj_desc->reference.object) {
acpi_os_printf("\nReferenced Object: %p\n",
@@ -1017,7 +1021,7 @@ acpi_ex_dump_package_obj(union acpi_operand_object *obj_desc,
void
acpi_ex_dump_object_descriptor(union acpi_operand_object *obj_desc, u32 flags)
{
- ACPI_FUNCTION_TRACE("ex_dump_object_descriptor");
+ ACPI_FUNCTION_TRACE(ex_dump_object_descriptor);
if (!obj_desc) {
return_VOID;
@@ -1046,7 +1050,7 @@ acpi_ex_dump_object_descriptor(union acpi_operand_object *obj_desc, u32 flags)
if (ACPI_GET_DESCRIPTOR_TYPE(obj_desc) != ACPI_DESC_TYPE_OPERAND) {
acpi_os_printf
- ("ex_dump_object_descriptor: %p is not an ACPI operand object: [%s]\n",
+ ("ExDumpObjectDescriptor: %p is not an ACPI operand object: [%s]\n",
obj_desc, acpi_ut_get_descriptor_name(obj_desc));
return_VOID;
}
diff --git a/drivers/acpi/executer/exfield.c b/drivers/acpi/executer/exfield.c
index e259201ce9a0..9ea9c3a67ca9 100644
--- a/drivers/acpi/executer/exfield.c
+++ b/drivers/acpi/executer/exfield.c
@@ -73,7 +73,7 @@ acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state,
void *buffer;
u8 locked;
- ACPI_FUNCTION_TRACE_PTR("ex_read_data_from_field", obj_desc);
+ ACPI_FUNCTION_TRACE_PTR(ex_read_data_from_field, obj_desc);
/* Parameter validation */
@@ -142,6 +142,7 @@ acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state,
length =
(acpi_size) ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->field.bit_length);
if (length > acpi_gbl_integer_byte_width) {
+
/* Field is too large for an Integer, create a Buffer instead */
buffer_desc = acpi_ut_create_buffer_object(length);
@@ -163,11 +164,11 @@ acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state,
}
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
- "field_read [TO]: Obj %p, Type %X, Buf %p, byte_len %X\n",
+ "FieldRead [TO]: Obj %p, Type %X, Buf %p, ByteLen %X\n",
obj_desc, ACPI_GET_OBJECT_TYPE(obj_desc), buffer,
(u32) length));
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
- "field_read [FROM]: bit_len %X, bit_off %X, byte_off %X\n",
+ "FieldRead [FROM]: BitLen %X, BitOff %X, ByteOff %X\n",
obj_desc->common_field.bit_length,
obj_desc->common_field.start_field_bit_offset,
obj_desc->common_field.base_byte_offset));
@@ -219,7 +220,7 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
u8 locked;
union acpi_operand_object *buffer_desc;
- ACPI_FUNCTION_TRACE_PTR("ex_write_data_to_field", obj_desc);
+ ACPI_FUNCTION_TRACE_PTR(ex_write_data_to_field, obj_desc);
/* Parameter validation */
@@ -329,9 +330,10 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->common_field.bit_length);
if (length < required_length) {
+
/* We need to create a new buffer */
- new_buffer = ACPI_MEM_CALLOCATE(required_length);
+ new_buffer = ACPI_ALLOCATE_ZEROED(required_length);
if (!new_buffer) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
@@ -347,14 +349,14 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
}
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
- "field_write [FROM]: Obj %p (%s:%X), Buf %p, byte_len %X\n",
+ "FieldWrite [FROM]: Obj %p (%s:%X), Buf %p, ByteLen %X\n",
source_desc,
acpi_ut_get_type_name(ACPI_GET_OBJECT_TYPE
(source_desc)),
ACPI_GET_OBJECT_TYPE(source_desc), buffer, length));
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
- "field_write [TO]: Obj %p (%s:%X), bit_len %X, bit_off %X, byte_off %X\n",
+ "FieldWrite [TO]: Obj %p (%s:%X), BitLen %X, BitOff %X, ByteOff %X\n",
obj_desc,
acpi_ut_get_type_name(ACPI_GET_OBJECT_TYPE(obj_desc)),
ACPI_GET_OBJECT_TYPE(obj_desc),
@@ -375,7 +377,7 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
/* Free temporary buffer if we used one */
if (new_buffer) {
- ACPI_MEM_FREE(new_buffer);
+ ACPI_FREE(new_buffer);
}
return_ACPI_STATUS(status);
diff --git a/drivers/acpi/executer/exfldio.c b/drivers/acpi/executer/exfldio.c
index bd1af35f7fcf..051053f7cccb 100644
--- a/drivers/acpi/executer/exfldio.c
+++ b/drivers/acpi/executer/exfldio.c
@@ -87,7 +87,7 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc,
acpi_status status = AE_OK;
union acpi_operand_object *rgn_desc;
- ACPI_FUNCTION_TRACE_U32("ex_setup_region", field_datum_byte_offset);
+ ACPI_FUNCTION_TRACE_U32(ex_setup_region, field_datum_byte_offset);
rgn_desc = obj_desc->common_field.region_obj;
@@ -112,7 +112,18 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc,
}
}
+ /* Exit if Address/Length have been disallowed by the host OS */
+
+ if (rgn_desc->common.flags & AOPOBJ_INVALID) {
+ return_ACPI_STATUS(AE_AML_ILLEGAL_ADDRESS);
+ }
+
+ /*
+ * Exit now for SMBus address space, it has a non-linear address space
+ * and the request cannot be directly validated
+ */
if (rgn_desc->region.space_id == ACPI_ADR_SPACE_SMBUS) {
+
/* SMBus has a non-linear address space */
return_ACPI_STATUS(AE_OK);
@@ -134,10 +145,10 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc,
* length of one field datum (access width) must fit within the region.
* (Region length is specified in bytes)
*/
- if (rgn_desc->region.length < (obj_desc->common_field.base_byte_offset +
- field_datum_byte_offset +
- obj_desc->common_field.
- access_byte_width)) {
+ if (rgn_desc->region.length <
+ (obj_desc->common_field.base_byte_offset +
+ field_datum_byte_offset +
+ obj_desc->common_field.access_byte_width)) {
if (acpi_gbl_enable_interpreter_slack) {
/*
* Slack mode only: We will go ahead and allow access to this
@@ -217,7 +228,7 @@ acpi_ex_access_region(union acpi_operand_object *obj_desc,
union acpi_operand_object *rgn_desc;
acpi_physical_address address;
- ACPI_FUNCTION_TRACE("ex_access_region");
+ ACPI_FUNCTION_TRACE(ex_access_region);
/*
* Ensure that the region operands are fully evaluated and verify
@@ -246,7 +257,7 @@ acpi_ex_access_region(union acpi_operand_object *obj_desc,
}
ACPI_DEBUG_PRINT_RAW((ACPI_DB_BFIELD,
- " Region [%s:%X], Width %X, byte_base %X, Offset %X at %8.8X%8.8X\n",
+ " Region [%s:%X], Width %X, ByteBase %X, Offset %X at %8.8X%8.8X\n",
acpi_ut_get_region_name(rgn_desc->region.
space_id),
rgn_desc->region.space_id,
@@ -352,7 +363,7 @@ acpi_ex_field_datum_io(union acpi_operand_object *obj_desc,
acpi_status status;
acpi_integer local_value;
- ACPI_FUNCTION_TRACE_U32("ex_field_datum_io", field_datum_byte_offset);
+ ACPI_FUNCTION_TRACE_U32(ex_field_datum_io, field_datum_byte_offset);
if (read_write == ACPI_READ) {
if (!value) {
@@ -487,10 +498,11 @@ acpi_ex_field_datum_io(union acpi_operand_object *obj_desc,
}
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
- "I/O to Data Register: value_ptr %p\n",
+ "I/O to Data Register: ValuePtr %p\n",
value));
if (read_write == ACPI_READ) {
+
/* Read the datum from the data_register */
status =
@@ -559,7 +571,7 @@ acpi_ex_write_with_update_rule(union acpi_operand_object *obj_desc,
acpi_integer merged_value;
acpi_integer current_value;
- ACPI_FUNCTION_TRACE_U32("ex_write_with_update_rule", mask);
+ ACPI_FUNCTION_TRACE_U32(ex_write_with_update_rule, mask);
/* Start with the new bits */
@@ -568,6 +580,7 @@ acpi_ex_write_with_update_rule(union acpi_operand_object *obj_desc,
/* If the mask is all ones, we don't need to worry about the update rule */
if (mask != ACPI_INTEGER_MAX) {
+
/* Decode the update rule */
switch (obj_desc->common_field.
@@ -614,7 +627,7 @@ acpi_ex_write_with_update_rule(union acpi_operand_object *obj_desc,
default:
ACPI_ERROR((AE_INFO,
- "Unknown update_rule value: %X",
+ "Unknown UpdateRule value: %X",
(obj_desc->common_field.
field_flags &
AML_FIELD_UPDATE_RULE_MASK)));
@@ -623,7 +636,7 @@ acpi_ex_write_with_update_rule(union acpi_operand_object *obj_desc,
}
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
- "Mask %8.8X%8.8X, datum_offset %X, Width %X, Value %8.8X%8.8X, merged_value %8.8X%8.8X\n",
+ "Mask %8.8X%8.8X, DatumOffset %X, Width %X, Value %8.8X%8.8X, MergedValue %8.8X%8.8X\n",
ACPI_FORMAT_UINT64(mask),
field_datum_byte_offset,
obj_desc->common_field.access_byte_width,
@@ -666,7 +679,7 @@ acpi_ex_extract_from_field(union acpi_operand_object *obj_desc,
u32 field_datum_count;
u32 i;
- ACPI_FUNCTION_TRACE("ex_extract_from_field");
+ ACPI_FUNCTION_TRACE(ex_extract_from_field);
/* Validate target buffer and clear it */
@@ -704,6 +717,7 @@ acpi_ex_extract_from_field(union acpi_operand_object *obj_desc,
/* Read the rest of the field */
for (i = 1; i < field_datum_count; i++) {
+
/* Get next input datum from the field */
field_offset += obj_desc->common_field.access_byte_width;
@@ -771,6 +785,7 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc,
{
acpi_status status;
acpi_integer mask;
+ acpi_integer width_mask;
acpi_integer merged_datum;
acpi_integer raw_datum = 0;
u32 field_offset = 0;
@@ -780,7 +795,7 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc,
u32 field_datum_count;
u32 i;
- ACPI_FUNCTION_TRACE("ex_insert_into_field");
+ ACPI_FUNCTION_TRACE(ex_insert_into_field);
/* Validate input buffer */
@@ -795,15 +810,20 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc,
/* Compute the number of datums (access width data items) */
+ width_mask =
+ ACPI_MASK_BITS_ABOVE(obj_desc->common_field.access_bit_width);
mask =
- ACPI_MASK_BITS_BELOW(obj_desc->common_field.start_field_bit_offset);
- datum_count =
- ACPI_ROUND_UP_TO(obj_desc->common_field.bit_length,
- obj_desc->common_field.access_bit_width);
- field_datum_count =
- ACPI_ROUND_UP_TO(obj_desc->common_field.bit_length +
- obj_desc->common_field.start_field_bit_offset,
- obj_desc->common_field.access_bit_width);
+ width_mask & ACPI_MASK_BITS_BELOW(obj_desc->common_field.
+ start_field_bit_offset);
+
+ datum_count = ACPI_ROUND_UP_TO(obj_desc->common_field.bit_length,
+ obj_desc->common_field.access_bit_width);
+
+ field_datum_count = ACPI_ROUND_UP_TO(obj_desc->common_field.bit_length +
+ obj_desc->common_field.
+ start_field_bit_offset,
+ obj_desc->common_field.
+ access_bit_width);
/* Get initial Datum from the input buffer */
@@ -817,6 +837,7 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc,
/* Write the entire field */
for (i = 1; i < field_datum_count; i++) {
+
/* Write merged datum to the target field */
merged_datum &= mask;
@@ -833,7 +854,7 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc,
merged_datum = raw_datum >>
(obj_desc->common_field.access_bit_width -
obj_desc->common_field.start_field_bit_offset);
- mask = ACPI_INTEGER_MAX;
+ mask = width_mask;
if (i == datum_count) {
break;
diff --git a/drivers/acpi/executer/exmisc.c b/drivers/acpi/executer/exmisc.c
index 48c18d29222a..bd98aab017cf 100644
--- a/drivers/acpi/executer/exmisc.c
+++ b/drivers/acpi/executer/exmisc.c
@@ -72,7 +72,7 @@ acpi_ex_get_object_reference(union acpi_operand_object *obj_desc,
union acpi_operand_object *reference_obj;
union acpi_operand_object *referenced_obj;
- ACPI_FUNCTION_TRACE_PTR("ex_get_object_reference", obj_desc);
+ ACPI_FUNCTION_TRACE_PTR(ex_get_object_reference, obj_desc);
*return_desc = NULL;
@@ -168,7 +168,7 @@ acpi_ex_concat_template(union acpi_operand_object *operand0,
acpi_size length1;
acpi_size new_length;
- ACPI_FUNCTION_TRACE("ex_concat_template");
+ ACPI_FUNCTION_TRACE(ex_concat_template);
/*
* Find the end_tag descriptor in each resource template.
@@ -250,7 +250,7 @@ acpi_ex_do_concatenate(union acpi_operand_object *operand0,
char *new_buf;
acpi_status status;
- ACPI_FUNCTION_TRACE("ex_do_concatenate");
+ ACPI_FUNCTION_TRACE(ex_do_concatenate);
/*
* Convert the second operand if necessary. The first operand
@@ -445,10 +445,24 @@ acpi_ex_do_math_op(u16 opcode, acpi_integer integer0, acpi_integer integer1)
case AML_SHIFT_LEFT_OP: /* shift_left (Operand, shift_count, Result) */
+ /*
+ * We need to check if the shiftcount is larger than the integer bit
+ * width since the behavior of this is not well-defined in the C language.
+ */
+ if (integer1 >= acpi_gbl_integer_bit_width) {
+ return (0);
+ }
return (integer0 << integer1);
case AML_SHIFT_RIGHT_OP: /* shift_right (Operand, shift_count, Result) */
+ /*
+ * We need to check if the shiftcount is larger than the integer bit
+ * width since the behavior of this is not well-defined in the C language.
+ */
+ if (integer1 >= acpi_gbl_integer_bit_width) {
+ return (0);
+ }
return (integer0 >> integer1);
case AML_SUBTRACT_OP: /* Subtract (Integer0, Integer1, Result) */
@@ -489,7 +503,7 @@ acpi_ex_do_logical_numeric_op(u16 opcode,
acpi_status status = AE_OK;
u8 local_result = FALSE;
- ACPI_FUNCTION_TRACE("ex_do_logical_numeric_op");
+ ACPI_FUNCTION_TRACE(ex_do_logical_numeric_op);
switch (opcode) {
case AML_LAND_OP: /* LAnd (Integer0, Integer1) */
@@ -557,7 +571,7 @@ acpi_ex_do_logical_op(u16 opcode,
u8 local_result = FALSE;
int compare;
- ACPI_FUNCTION_TRACE("ex_do_logical_op");
+ ACPI_FUNCTION_TRACE(ex_do_logical_op);
/*
* Convert the second operand if necessary. The first operand
@@ -649,6 +663,7 @@ acpi_ex_do_logical_op(u16 opcode,
/* Length and all bytes must be equal */
if ((length0 == length1) && (compare == 0)) {
+
/* Length and all bytes match ==> TRUE */
local_result = TRUE;
diff --git a/drivers/acpi/executer/exmutex.c b/drivers/acpi/executer/exmutex.c
index f843b22e20b9..93098d68cadf 100644
--- a/drivers/acpi/executer/exmutex.c
+++ b/drivers/acpi/executer/exmutex.c
@@ -61,7 +61,7 @@ acpi_ex_link_mutex(union acpi_operand_object *obj_desc,
*
* RETURN: None
*
- * DESCRIPTION: Remove a mutex from the "acquired_mutex" list
+ * DESCRIPTION: Remove a mutex from the "AcquiredMutex" list
*
******************************************************************************/
@@ -95,7 +95,7 @@ void acpi_ex_unlink_mutex(union acpi_operand_object *obj_desc)
*
* RETURN: None
*
- * DESCRIPTION: Add a mutex to the "acquired_mutex" list for this walk
+ * DESCRIPTION: Add a mutex to the "AcquiredMutex" list for this walk
*
******************************************************************************/
@@ -144,7 +144,7 @@ acpi_ex_acquire_mutex(union acpi_operand_object *time_desc,
{
acpi_status status;
- ACPI_FUNCTION_TRACE_PTR("ex_acquire_mutex", obj_desc);
+ ACPI_FUNCTION_TRACE_PTR(ex_acquire_mutex, obj_desc);
if (!obj_desc) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
@@ -165,7 +165,7 @@ acpi_ex_acquire_mutex(union acpi_operand_object *time_desc,
*/
if (walk_state->thread->current_sync_level > obj_desc->mutex.sync_level) {
ACPI_ERROR((AE_INFO,
- "Cannot acquire Mutex [%4.4s], incorrect sync_level",
+ "Cannot acquire Mutex [%4.4s], incorrect SyncLevel",
acpi_ut_get_node_name(obj_desc->mutex.node)));
return_ACPI_STATUS(AE_AML_MUTEX_ORDER);
}
@@ -173,6 +173,7 @@ acpi_ex_acquire_mutex(union acpi_operand_object *time_desc,
/* Support for multiple acquires by the owning thread */
if (obj_desc->mutex.owner_thread) {
+
/* Special case for Global Lock, allow all threads */
if ((obj_desc->mutex.owner_thread->thread_id ==
@@ -192,6 +193,7 @@ acpi_ex_acquire_mutex(union acpi_operand_object *time_desc,
status = acpi_ex_system_acquire_mutex(time_desc, obj_desc);
if (ACPI_FAILURE(status)) {
+
/* Includes failure from a timeout on time_desc */
return_ACPI_STATUS(status);
@@ -232,7 +234,7 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
{
acpi_status status;
- ACPI_FUNCTION_TRACE("ex_release_mutex");
+ ACPI_FUNCTION_TRACE(ex_release_mutex);
if (!obj_desc) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
@@ -277,7 +279,7 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
*/
if (obj_desc->mutex.sync_level > walk_state->thread->current_sync_level) {
ACPI_ERROR((AE_INFO,
- "Cannot release Mutex [%4.4s], incorrect sync_level",
+ "Cannot release Mutex [%4.4s], incorrect SyncLevel",
acpi_ut_get_node_name(obj_desc->mutex.node)));
return_ACPI_STATUS(AE_AML_MUTEX_ORDER);
}
@@ -286,6 +288,7 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
obj_desc->mutex.acquisition_depth--;
if (obj_desc->mutex.acquisition_depth != 0) {
+
/* Just decrement the depth and return */
return_ACPI_STATUS(AE_OK);
diff --git a/drivers/acpi/executer/exnames.c b/drivers/acpi/executer/exnames.c
index 054fe5e1a314..d3d70364626c 100644
--- a/drivers/acpi/executer/exnames.c
+++ b/drivers/acpi/executer/exnames.c
@@ -77,7 +77,7 @@ static char *acpi_ex_allocate_name_string(u32 prefix_count, u32 num_name_segs)
char *name_string;
u32 size_needed;
- ACPI_FUNCTION_TRACE("ex_allocate_name_string");
+ ACPI_FUNCTION_TRACE(ex_allocate_name_string);
/*
* Allow room for all \ and ^ prefixes, all segments and a multi_name_prefix.
@@ -85,6 +85,7 @@ static char *acpi_ex_allocate_name_string(u32 prefix_count, u32 num_name_segs)
* This may actually be somewhat longer than needed.
*/
if (prefix_count == ACPI_UINT32_MAX) {
+
/* Special case for root */
size_needed = 1 + (ACPI_NAME_SIZE * num_name_segs) + 2 + 1;
@@ -97,7 +98,7 @@ static char *acpi_ex_allocate_name_string(u32 prefix_count, u32 num_name_segs)
* Allocate a buffer for the name.
* This buffer must be deleted by the caller!
*/
- name_string = ACPI_MEM_ALLOCATE(size_needed);
+ name_string = ACPI_ALLOCATE(size_needed);
if (!name_string) {
ACPI_ERROR((AE_INFO,
"Could not allocate size %d", size_needed));
@@ -119,11 +120,13 @@ static char *acpi_ex_allocate_name_string(u32 prefix_count, u32 num_name_segs)
/* Set up Dual or Multi prefixes if needed */
if (num_name_segs > 2) {
+
/* Set up multi prefixes */
*temp_ptr++ = AML_MULTI_NAME_PREFIX_OP;
*temp_ptr++ = (char)num_name_segs;
} else if (2 == num_name_segs) {
+
/* Set up dual prefixes */
*temp_ptr++ = AML_DUAL_NAME_PREFIX;
@@ -159,7 +162,7 @@ static acpi_status acpi_ex_name_segment(u8 ** in_aml_address, char *name_string)
u32 index;
char char_buf[5];
- ACPI_FUNCTION_TRACE("ex_name_segment");
+ ACPI_FUNCTION_TRACE(ex_name_segment);
/*
* If first character is a digit, then we know that we aren't looking at a
@@ -176,7 +179,7 @@ static acpi_status acpi_ex_name_segment(u8 ** in_aml_address, char *name_string)
for (index = 0;
(index < ACPI_NAME_SIZE)
- && (acpi_ut_valid_acpi_character(*aml_address)); index++) {
+ && (acpi_ut_valid_acpi_char(*aml_address, 0)); index++) {
char_buf[index] = *aml_address++;
ACPI_DEBUG_PRINT((ACPI_DB_LOAD, "%c\n", char_buf[index]));
}
@@ -184,6 +187,7 @@ static acpi_status acpi_ex_name_segment(u8 ** in_aml_address, char *name_string)
/* Valid name segment */
if (index == 4) {
+
/* Found 4 valid characters */
char_buf[4] = '\0';
@@ -249,11 +253,12 @@ acpi_ex_get_name_string(acpi_object_type data_type,
u32 prefix_count = 0;
u8 has_prefix = FALSE;
- ACPI_FUNCTION_TRACE_PTR("ex_get_name_string", aml_address);
+ ACPI_FUNCTION_TRACE_PTR(ex_get_name_string, aml_address);
if (ACPI_TYPE_LOCAL_REGION_FIELD == data_type ||
ACPI_TYPE_LOCAL_BANK_FIELD == data_type ||
ACPI_TYPE_LOCAL_INDEX_FIELD == data_type) {
+
/* Disallow prefixes for types associated with field_unit names */
name_string = acpi_ex_allocate_name_string(0, 1);
@@ -272,7 +277,7 @@ acpi_ex_get_name_string(acpi_object_type data_type,
case AML_ROOT_PREFIX:
ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
- "root_prefix(\\) at %p\n",
+ "RootPrefix(\\) at %p\n",
aml_address));
/*
@@ -290,7 +295,7 @@ acpi_ex_get_name_string(acpi_object_type data_type,
do {
ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
- "parent_prefix (^) at %p\n",
+ "ParentPrefix (^) at %p\n",
aml_address));
aml_address++;
@@ -314,7 +319,7 @@ acpi_ex_get_name_string(acpi_object_type data_type,
case AML_DUAL_NAME_PREFIX:
ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
- "dual_name_prefix at %p\n",
+ "DualNamePrefix at %p\n",
aml_address));
aml_address++;
@@ -341,7 +346,7 @@ acpi_ex_get_name_string(acpi_object_type data_type,
case AML_MULTI_NAME_PREFIX_OP:
ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
- "multi_name_prefix at %p\n",
+ "MultiNamePrefix at %p\n",
aml_address));
/* Fetch count of segments remaining in name path */
@@ -377,7 +382,7 @@ acpi_ex_get_name_string(acpi_object_type data_type,
if (prefix_count == ACPI_UINT32_MAX) {
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "name_seg is \"\\\" followed by NULL\n"));
+ "NameSeg is \"\\\" followed by NULL\n"));
}
/* Consume the NULL byte */
@@ -410,6 +415,7 @@ acpi_ex_get_name_string(acpi_object_type data_type,
}
if (AE_CTRL_PENDING == status && has_prefix) {
+
/* Ran out of segments after processing a prefix */
ACPI_ERROR((AE_INFO, "Malformed Name at %p", name_string));
@@ -418,7 +424,7 @@ acpi_ex_get_name_string(acpi_object_type data_type,
if (ACPI_FAILURE(status)) {
if (name_string) {
- ACPI_MEM_FREE(name_string);
+ ACPI_FREE(name_string);
}
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/executer/exoparg1.c b/drivers/acpi/executer/exoparg1.c
index 23d0823bcd5e..6374d8be88e0 100644
--- a/drivers/acpi/executer/exoparg1.c
+++ b/drivers/acpi/executer/exoparg1.c
@@ -89,7 +89,7 @@ acpi_status acpi_ex_opcode_0A_0T_1R(struct acpi_walk_state *walk_state)
acpi_status status = AE_OK;
union acpi_operand_object *return_desc = NULL;
- ACPI_FUNCTION_TRACE_STR("ex_opcode_0A_0T_1R",
+ ACPI_FUNCTION_TRACE_STR(ex_opcode_0A_0T_1R,
acpi_ps_get_opcode_name(walk_state->opcode));
/* Examine the AML opcode */
@@ -150,7 +150,7 @@ acpi_status acpi_ex_opcode_1A_0T_0R(struct acpi_walk_state *walk_state)
union acpi_operand_object **operand = &walk_state->operands[0];
acpi_status status = AE_OK;
- ACPI_FUNCTION_TRACE_STR("ex_opcode_1A_0T_0R",
+ ACPI_FUNCTION_TRACE_STR(ex_opcode_1A_0T_0R,
acpi_ps_get_opcode_name(walk_state->opcode));
/* Examine the AML opcode */
@@ -216,7 +216,7 @@ acpi_status acpi_ex_opcode_1A_1T_0R(struct acpi_walk_state *walk_state)
acpi_status status = AE_OK;
union acpi_operand_object **operand = &walk_state->operands[0];
- ACPI_FUNCTION_TRACE_STR("ex_opcode_1A_1T_0R",
+ ACPI_FUNCTION_TRACE_STR(ex_opcode_1A_1T_0R,
acpi_ps_get_opcode_name(walk_state->opcode));
/* Examine the AML opcode */
@@ -264,7 +264,7 @@ acpi_status acpi_ex_opcode_1A_1T_1R(struct acpi_walk_state *walk_state)
acpi_integer power_of_ten;
acpi_integer digit;
- ACPI_FUNCTION_TRACE_STR("ex_opcode_1A_1T_1R",
+ ACPI_FUNCTION_TRACE_STR(ex_opcode_1A_1T_1R,
acpi_ps_get_opcode_name(walk_state->opcode));
/* Examine the AML opcode */
@@ -322,8 +322,9 @@ acpi_status acpi_ex_opcode_1A_1T_1R(struct acpi_walk_state *walk_state)
/* Since the bit position is one-based, subtract from 33 (65) */
- return_desc->integer.value = temp32 == 0 ? 0 :
- (ACPI_INTEGER_BIT_SIZE + 1) - temp32;
+ return_desc->integer.value =
+ temp32 ==
+ 0 ? 0 : (ACPI_INTEGER_BIT_SIZE + 1) - temp32;
break;
case AML_FROM_BCD_OP: /* from_bcd (BCDValue, Result) */
@@ -342,6 +343,7 @@ acpi_status acpi_ex_opcode_1A_1T_1R(struct acpi_walk_state *walk_state)
for (i = 0;
(i < acpi_gbl_integer_nybble_width) && (digit > 0);
i++) {
+
/* Get the least significant 4-bit BCD digit */
temp32 = ((u32) digit) & 0xF;
@@ -487,6 +489,7 @@ acpi_status acpi_ex_opcode_1A_1T_1R(struct acpi_walk_state *walk_state)
status = acpi_ex_convert_to_string(operand[0], &return_desc,
ACPI_EXPLICIT_CONVERT_DECIMAL);
if (return_desc == operand[0]) {
+
/* No conversion performed, add ref to handle return value */
acpi_ut_add_reference(return_desc);
}
@@ -497,6 +500,7 @@ acpi_status acpi_ex_opcode_1A_1T_1R(struct acpi_walk_state *walk_state)
status = acpi_ex_convert_to_string(operand[0], &return_desc,
ACPI_EXPLICIT_CONVERT_HEX);
if (return_desc == operand[0]) {
+
/* No conversion performed, add ref to handle return value */
acpi_ut_add_reference(return_desc);
}
@@ -506,6 +510,7 @@ acpi_status acpi_ex_opcode_1A_1T_1R(struct acpi_walk_state *walk_state)
status = acpi_ex_convert_to_buffer(operand[0], &return_desc);
if (return_desc == operand[0]) {
+
/* No conversion performed, add ref to handle return value */
acpi_ut_add_reference(return_desc);
}
@@ -516,6 +521,7 @@ acpi_status acpi_ex_opcode_1A_1T_1R(struct acpi_walk_state *walk_state)
status = acpi_ex_convert_to_integer(operand[0], &return_desc,
ACPI_ANY_BASE);
if (return_desc == operand[0]) {
+
/* No conversion performed, add ref to handle return value */
acpi_ut_add_reference(return_desc);
}
@@ -541,6 +547,7 @@ acpi_status acpi_ex_opcode_1A_1T_1R(struct acpi_walk_state *walk_state)
}
if (ACPI_SUCCESS(status)) {
+
/* Store the return value computed above into the target object */
status = acpi_ex_store(return_desc, operand[1], walk_state);
@@ -548,16 +555,18 @@ acpi_status acpi_ex_opcode_1A_1T_1R(struct acpi_walk_state *walk_state)
cleanup:
- if (!walk_state->result_obj) {
- walk_state->result_obj = return_desc;
- }
-
/* Delete return object on error */
if (ACPI_FAILURE(status)) {
acpi_ut_remove_reference(return_desc);
}
+ /* Save return object on success */
+
+ else if (!walk_state->result_obj) {
+ walk_state->result_obj = return_desc;
+ }
+
return_ACPI_STATUS(status);
}
@@ -582,7 +591,7 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
u32 type;
acpi_integer value;
- ACPI_FUNCTION_TRACE_STR("ex_opcode_1A_0T_1R",
+ ACPI_FUNCTION_TRACE_STR(ex_opcode_1A_0T_1R,
acpi_ps_get_opcode_name(walk_state->opcode));
/* Examine the AML opcode */
@@ -625,6 +634,7 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
temp_desc = operand[0];
if (ACPI_GET_DESCRIPTOR_TYPE(temp_desc) ==
ACPI_DESC_TYPE_OPERAND) {
+
/* Internal reference object - prevent deletion */
acpi_ut_add_reference(temp_desc);
@@ -689,6 +699,7 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
if (ACPI_FAILURE(status)) {
goto cleanup;
}
+
/* Allocate a descriptor to hold the type. */
return_desc = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
@@ -777,8 +788,25 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
/* Check for a method local or argument, or standalone String */
- if (ACPI_GET_DESCRIPTOR_TYPE(operand[0]) !=
+ if (ACPI_GET_DESCRIPTOR_TYPE(operand[0]) ==
ACPI_DESC_TYPE_NAMED) {
+ temp_desc =
+ acpi_ns_get_attached_object((struct
+ acpi_namespace_node *)
+ operand[0]);
+ if (temp_desc
+ &&
+ ((ACPI_GET_OBJECT_TYPE(temp_desc) ==
+ ACPI_TYPE_STRING)
+ || (ACPI_GET_OBJECT_TYPE(temp_desc) ==
+ ACPI_TYPE_LOCAL_REFERENCE))) {
+ operand[0] = temp_desc;
+ acpi_ut_add_reference(temp_desc);
+ } else {
+ status = AE_AML_OPERAND_TYPE;
+ goto cleanup;
+ }
+ } else {
switch (ACPI_GET_OBJECT_TYPE(operand[0])) {
case ACPI_TYPE_LOCAL_REFERENCE:
/*
@@ -827,26 +855,35 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
break;
case ACPI_TYPE_STRING:
+ break;
+ default:
+ status = AE_AML_OPERAND_TYPE;
+ goto cleanup;
+ }
+ }
+
+ if (ACPI_GET_DESCRIPTOR_TYPE(operand[0]) !=
+ ACPI_DESC_TYPE_NAMED) {
+ if (ACPI_GET_OBJECT_TYPE(operand[0]) ==
+ ACPI_TYPE_STRING) {
/*
* This is a deref_of (String). The string is a reference
* to a named ACPI object.
*
* 1) Find the owning Node
- * 2) Dereference the node to an actual object. Could be a
+ * 2) Dereference the node to an actual object. Could be a
* Field, so we need to resolve the node to a value.
*/
status =
- acpi_ns_get_node_by_path(operand[0]->string.
- pointer,
- walk_state->
- scope_info->scope.
- node,
- ACPI_NS_SEARCH_PARENT,
- ACPI_CAST_INDIRECT_PTR
- (struct
- acpi_namespace_node,
- &return_desc));
+ acpi_ns_get_node(walk_state->scope_info->
+ scope.node,
+ operand[0]->string.pointer,
+ ACPI_NS_SEARCH_PARENT,
+ ACPI_CAST_INDIRECT_PTR
+ (struct
+ acpi_namespace_node,
+ &return_desc));
if (ACPI_FAILURE(status)) {
goto cleanup;
}
@@ -857,11 +894,6 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
(struct acpi_namespace_node, &return_desc),
walk_state);
goto cleanup;
-
- default:
-
- status = AE_AML_OPERAND_TYPE;
- goto cleanup;
}
}
@@ -937,13 +969,12 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
acpi_ut_add_reference
(return_desc);
}
-
break;
default:
ACPI_ERROR((AE_INFO,
- "Unknown Index target_type %X in obj %p",
+ "Unknown Index TargetType %X in obj %p",
operand[0]->reference.
target_type, operand[0]));
status = AE_AML_OPERAND_TYPE;
@@ -957,7 +988,6 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
if (ACPI_GET_DESCRIPTOR_TYPE(return_desc) ==
ACPI_DESC_TYPE_NAMED) {
-
return_desc =
acpi_ns_get_attached_object((struct
acpi_namespace_node
@@ -972,7 +1002,7 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
default:
ACPI_ERROR((AE_INFO,
- "Unknown opcode in ref(%p) - %X",
+ "Unknown opcode in reference(%p) - %X",
operand[0],
operand[0]->reference.opcode));
@@ -998,6 +1028,11 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
acpi_ut_remove_reference(return_desc);
}
- walk_state->result_obj = return_desc;
+ /* Save return object on success */
+
+ else {
+ walk_state->result_obj = return_desc;
+ }
+
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/executer/exoparg2.c b/drivers/acpi/executer/exoparg2.c
index e263a5ddd405..7d2cbc113160 100644
--- a/drivers/acpi/executer/exoparg2.c
+++ b/drivers/acpi/executer/exoparg2.c
@@ -92,7 +92,7 @@ acpi_status acpi_ex_opcode_2A_0T_0R(struct acpi_walk_state *walk_state)
u32 value;
acpi_status status = AE_OK;
- ACPI_FUNCTION_TRACE_STR("ex_opcode_2A_0T_0R",
+ ACPI_FUNCTION_TRACE_STR(ex_opcode_2A_0T_0R,
acpi_ps_get_opcode_name(walk_state->opcode));
/* Examine the opcode */
@@ -121,7 +121,7 @@ acpi_status acpi_ex_opcode_2A_0T_0R(struct acpi_walk_state *walk_state)
#ifdef ACPI_GPE_NOTIFY_CHECK
/*
* GPE method wake/notify check. Here, we want to ensure that we
- * don't receive any "device_wake" Notifies from a GPE _Lxx or _Exx
+ * don't receive any "DeviceWake" Notifies from a GPE _Lxx or _Exx
* GPE method during system runtime. If we do, the GPE is marked
* as "wake-only" and disabled.
*
@@ -138,6 +138,7 @@ acpi_status acpi_ex_opcode_2A_0T_0R(struct acpi_walk_state *walk_state)
acpi_ev_check_for_wake_only_gpe(walk_state->
gpe_event_info);
if (ACPI_FAILURE(status)) {
+
/* AE_WAKE_ONLY_GPE only error, means ignore this notify */
return_ACPI_STATUS(AE_OK)
@@ -185,7 +186,7 @@ acpi_status acpi_ex_opcode_2A_2T_1R(struct acpi_walk_state *walk_state)
union acpi_operand_object *return_desc2 = NULL;
acpi_status status;
- ACPI_FUNCTION_TRACE_STR("ex_opcode_2A_2T_1R",
+ ACPI_FUNCTION_TRACE_STR(ex_opcode_2A_2T_1R,
acpi_ps_get_opcode_name(walk_state->opcode));
/* Execute the opcode */
@@ -252,6 +253,7 @@ acpi_status acpi_ex_opcode_2A_2T_1R(struct acpi_walk_state *walk_state)
acpi_ut_remove_reference(return_desc2);
if (ACPI_FAILURE(status)) {
+
/* Delete the return object */
acpi_ut_remove_reference(return_desc1);
@@ -281,12 +283,13 @@ acpi_status acpi_ex_opcode_2A_1T_1R(struct acpi_walk_state *walk_state)
acpi_status status = AE_OK;
acpi_size length;
- ACPI_FUNCTION_TRACE_STR("ex_opcode_2A_1T_1R",
+ ACPI_FUNCTION_TRACE_STR(ex_opcode_2A_1T_1R,
acpi_ps_get_opcode_name(walk_state->opcode));
/* Execute the opcode */
if (walk_state->op_info->flags & AML_MATH) {
+
/* All simple math opcodes (add, etc.) */
return_desc = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
@@ -383,54 +386,70 @@ acpi_status acpi_ex_opcode_2A_1T_1R(struct acpi_walk_state *walk_state)
goto cleanup;
}
+ /* Initialize the Index reference object */
+
index = operand[1]->integer.value;
+ return_desc->reference.offset = (u32) index;
+ return_desc->reference.opcode = AML_INDEX_OP;
- /* At this point, the Source operand is a Package, Buffer, or String */
+ /*
+ * At this point, the Source operand is a String, Buffer, or Package.
+ * Verify that the index is within range.
+ */
+ switch (ACPI_GET_OBJECT_TYPE(operand[0])) {
+ case ACPI_TYPE_STRING:
- if (ACPI_GET_OBJECT_TYPE(operand[0]) == ACPI_TYPE_PACKAGE) {
- /* Object to be indexed is a Package */
+ if (index >= operand[0]->string.length) {
+ status = AE_AML_STRING_LIMIT;
+ }
+
+ return_desc->reference.target_type =
+ ACPI_TYPE_BUFFER_FIELD;
+ break;
+
+ case ACPI_TYPE_BUFFER:
+
+ if (index >= operand[0]->buffer.length) {
+ status = AE_AML_BUFFER_LIMIT;
+ }
+
+ return_desc->reference.target_type =
+ ACPI_TYPE_BUFFER_FIELD;
+ break;
+
+ case ACPI_TYPE_PACKAGE:
if (index >= operand[0]->package.count) {
- ACPI_ERROR((AE_INFO,
- "Index value (%X%8.8X) beyond package end (%X)",
- ACPI_FORMAT_UINT64(index),
- operand[0]->package.count));
status = AE_AML_PACKAGE_LIMIT;
- goto cleanup;
}
return_desc->reference.target_type = ACPI_TYPE_PACKAGE;
- return_desc->reference.object = operand[0];
return_desc->reference.where =
&operand[0]->package.elements[index];
- } else {
- /* Object to be indexed is a Buffer/String */
+ break;
- if (index >= operand[0]->buffer.length) {
- ACPI_ERROR((AE_INFO,
- "Index value (%X%8.8X) beyond end of buffer (%X)",
- ACPI_FORMAT_UINT64(index),
- operand[0]->buffer.length));
- status = AE_AML_BUFFER_LIMIT;
- goto cleanup;
- }
+ default:
- return_desc->reference.target_type =
- ACPI_TYPE_BUFFER_FIELD;
- return_desc->reference.object = operand[0];
+ status = AE_AML_INTERNAL;
+ goto cleanup;
+ }
+
+ /* Failure means that the Index was beyond the end of the object */
+
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Index (%X%8.8X) is beyond end of object",
+ ACPI_FORMAT_UINT64(index)));
+ goto cleanup;
}
/*
- * Add a reference to the target package/buffer/string for the life
- * of the index.
+ * Save the target object and add a reference to it for the life
+ * of the index
*/
+ return_desc->reference.object = operand[0];
acpi_ut_add_reference(operand[0]);
- /* Complete the Index reference object */
-
- return_desc->reference.opcode = AML_INDEX_OP;
- return_desc->reference.offset = (u32) index;
-
/* Store the reference to the Target */
status = acpi_ex_store(return_desc, operand[2], walk_state);
@@ -495,7 +514,7 @@ acpi_status acpi_ex_opcode_2A_0T_1R(struct acpi_walk_state *walk_state)
acpi_status status = AE_OK;
u8 logical_result = FALSE;
- ACPI_FUNCTION_TRACE_STR("ex_opcode_2A_0T_1R",
+ ACPI_FUNCTION_TRACE_STR(ex_opcode_2A_0T_1R,
acpi_ps_get_opcode_name(walk_state->opcode));
/* Create the internal return object */
@@ -509,6 +528,7 @@ acpi_status acpi_ex_opcode_2A_0T_1R(struct acpi_walk_state *walk_state)
/* Execute the Opcode */
if (walk_state->op_info->flags & AML_LOGICAL_NUMERIC) {
+
/* logical_op (Operand0, Operand1) */
status = acpi_ex_do_logical_numeric_op(walk_state->opcode,
@@ -518,6 +538,7 @@ acpi_status acpi_ex_opcode_2A_0T_1R(struct acpi_walk_state *walk_state)
value, &logical_result);
goto store_logical_result;
} else if (walk_state->op_info->flags & AML_LOGICAL) {
+
/* logical_op (Operand0, Operand1) */
status = acpi_ex_do_logical_op(walk_state->opcode, operand[0],
diff --git a/drivers/acpi/executer/exoparg3.c b/drivers/acpi/executer/exoparg3.c
index 6a3a883cb8a3..e2d945dfd509 100644
--- a/drivers/acpi/executer/exoparg3.c
+++ b/drivers/acpi/executer/exoparg3.c
@@ -88,20 +88,19 @@ acpi_status acpi_ex_opcode_3A_0T_0R(struct acpi_walk_state *walk_state)
struct acpi_signal_fatal_info *fatal;
acpi_status status = AE_OK;
- ACPI_FUNCTION_TRACE_STR("ex_opcode_3A_0T_0R",
+ ACPI_FUNCTION_TRACE_STR(ex_opcode_3A_0T_0R,
acpi_ps_get_opcode_name(walk_state->opcode));
switch (walk_state->opcode) {
case AML_FATAL_OP: /* Fatal (fatal_type fatal_code fatal_arg) */
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "fatal_op: Type %X Code %X Arg %X <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n",
+ "FatalOp: Type %X Code %X Arg %X <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n",
(u32) operand[0]->integer.value,
(u32) operand[1]->integer.value,
(u32) operand[2]->integer.value));
- fatal =
- ACPI_MEM_ALLOCATE(sizeof(struct acpi_signal_fatal_info));
+ fatal = ACPI_ALLOCATE(sizeof(struct acpi_signal_fatal_info));
if (fatal) {
fatal->type = (u32) operand[0]->integer.value;
fatal->code = (u32) operand[1]->integer.value;
@@ -114,7 +113,7 @@ acpi_status acpi_ex_opcode_3A_0T_0R(struct acpi_walk_state *walk_state)
/* Might return while OS is shutting down, just continue */
- ACPI_MEM_FREE(fatal);
+ ACPI_FREE(fatal);
break;
default:
@@ -151,7 +150,7 @@ acpi_status acpi_ex_opcode_3A_1T_1R(struct acpi_walk_state *walk_state)
acpi_integer index;
acpi_size length;
- ACPI_FUNCTION_TRACE_STR("ex_opcode_3A_1T_1R",
+ ACPI_FUNCTION_TRACE_STR(ex_opcode_3A_1T_1R,
acpi_ps_get_opcode_name(walk_state->opcode));
switch (walk_state->opcode) {
@@ -196,7 +195,7 @@ acpi_status acpi_ex_opcode_3A_1T_1R(struct acpi_walk_state *walk_state)
/* Always allocate a new buffer for the String */
- buffer = ACPI_MEM_CALLOCATE((acpi_size) length + 1);
+ buffer = ACPI_ALLOCATE_ZEROED((acpi_size) length + 1);
if (!buffer) {
status = AE_NO_MEMORY;
goto cleanup;
@@ -208,9 +207,10 @@ acpi_status acpi_ex_opcode_3A_1T_1R(struct acpi_walk_state *walk_state)
/* If the requested length is zero, don't allocate a buffer */
if (length > 0) {
+
/* Allocate a new buffer for the Buffer */
- buffer = ACPI_MEM_CALLOCATE(length);
+ buffer = ACPI_ALLOCATE_ZEROED(length);
if (!buffer) {
status = AE_NO_MEMORY;
goto cleanup;
@@ -225,6 +225,7 @@ acpi_status acpi_ex_opcode_3A_1T_1R(struct acpi_walk_state *walk_state)
}
if (buffer) {
+
/* We have a buffer, copy the portion requested */
ACPI_MEMCPY(buffer, operand[0]->string.pointer + index,
diff --git a/drivers/acpi/executer/exoparg6.c b/drivers/acpi/executer/exoparg6.c
index e043d924444f..f0c0ba6eb408 100644
--- a/drivers/acpi/executer/exoparg6.c
+++ b/drivers/acpi/executer/exoparg6.c
@@ -220,7 +220,7 @@ acpi_status acpi_ex_opcode_6A_0T_1R(struct acpi_walk_state * walk_state)
acpi_integer index;
union acpi_operand_object *this_element;
- ACPI_FUNCTION_TRACE_STR("ex_opcode_6A_0T_1R",
+ ACPI_FUNCTION_TRACE_STR(ex_opcode_6A_0T_1R,
acpi_ps_get_opcode_name(walk_state->opcode));
switch (walk_state->opcode) {
@@ -276,6 +276,7 @@ acpi_status acpi_ex_opcode_6A_0T_1R(struct acpi_walk_state * walk_state)
* match was found.
*/
for (; index < operand[0]->package.count; index++) {
+
/* Get the current package element */
this_element = operand[0]->package.elements[index];
diff --git a/drivers/acpi/executer/exprep.c b/drivers/acpi/executer/exprep.c
index 7719ae5d4f16..44d064f427b9 100644
--- a/drivers/acpi/executer/exprep.c
+++ b/drivers/acpi/executer/exprep.c
@@ -97,7 +97,7 @@ acpi_ex_generate_access(u32 field_bit_offset,
u32 minimum_accesses = 0xFFFFFFFF;
u32 accesses;
- ACPI_FUNCTION_TRACE("ex_generate_access");
+ ACPI_FUNCTION_TRACE(ex_generate_access);
/* Round Field start offset and length to "minimal" byte boundaries */
@@ -146,7 +146,7 @@ acpi_ex_generate_access(u32 field_bit_offset,
accesses = field_end_offset - field_start_offset;
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
- "access_width %d end is within region\n",
+ "AccessWidth %d end is within region\n",
access_byte_width));
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
@@ -173,7 +173,7 @@ acpi_ex_generate_access(u32 field_bit_offset,
}
} else {
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
- "access_width %d end is NOT within region\n",
+ "AccessWidth %d end is NOT within region\n",
access_byte_width));
if (access_byte_width == 1) {
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
@@ -228,7 +228,7 @@ acpi_ex_decode_field_access(union acpi_operand_object *obj_desc,
u32 byte_alignment;
u32 bit_length;
- ACPI_FUNCTION_TRACE("ex_decode_field_access");
+ ACPI_FUNCTION_TRACE(ex_decode_field_access);
access = (field_flags & AML_FIELD_ACCESS_TYPE_MASK);
@@ -322,7 +322,7 @@ acpi_ex_prep_common_field_object(union acpi_operand_object *obj_desc,
u32 byte_alignment;
u32 nearest_byte_address;
- ACPI_FUNCTION_TRACE("ex_prep_common_field_object");
+ ACPI_FUNCTION_TRACE(ex_prep_common_field_object);
/*
* Note: the structure being initialized is the
@@ -415,13 +415,13 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info)
u32 type;
acpi_status status;
- ACPI_FUNCTION_TRACE("ex_prep_field_value");
+ ACPI_FUNCTION_TRACE(ex_prep_field_value);
/* Parameter validation */
if (info->field_type != ACPI_TYPE_LOCAL_INDEX_FIELD) {
if (!info->region_node) {
- ACPI_ERROR((AE_INFO, "Null region_node"));
+ ACPI_ERROR((AE_INFO, "Null RegionNode"));
return_ACPI_STATUS(AE_AML_NO_OPERAND);
}
@@ -467,7 +467,7 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info)
acpi_ut_add_reference(obj_desc->field.region_obj);
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
- "region_field: bit_off %X, Off %X, Gran %X, Region %p\n",
+ "RegionField: BitOff %X, Off %X, Gran %X, Region %p\n",
obj_desc->field.start_field_bit_offset,
obj_desc->field.base_byte_offset,
obj_desc->field.access_byte_width,
@@ -488,7 +488,7 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info)
acpi_ut_add_reference(obj_desc->bank_field.bank_obj);
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
- "Bank Field: bit_off %X, Off %X, Gran %X, Region %p, bank_reg %p\n",
+ "Bank Field: BitOff %X, Off %X, Gran %X, Region %p, BankReg %p\n",
obj_desc->bank_field.start_field_bit_offset,
obj_desc->bank_field.base_byte_offset,
obj_desc->field.access_byte_width,
@@ -519,16 +519,29 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info)
acpi_ut_add_reference(obj_desc->index_field.index_obj);
/*
+ * April 2006: Changed to match MS behavior
+ *
* The value written to the Index register is the byte offset of the
- * target field
- * Note: may change code to: ACPI_DIV_8 (Info->field_bit_position)
+ * target field in units of the granularity of the index_field
+ *
+ * Previously, the value was calculated as an index in terms of the
+ * width of the Data register, as below:
+ *
+ * obj_desc->index_field.Value = (u32)
+ * (Info->field_bit_position / ACPI_MUL_8 (
+ * obj_desc->Field.access_byte_width));
+ *
+ * February 2006: Tried value as a byte offset:
+ * obj_desc->index_field.Value = (u32)
+ * ACPI_DIV_8 (Info->field_bit_position);
*/
- obj_desc->index_field.value = (u32)
- (info->field_bit_position /
- ACPI_MUL_8(obj_desc->field.access_byte_width));
+ obj_desc->index_field.value =
+ (u32) ACPI_ROUND_DOWN(ACPI_DIV_8(info->field_bit_position),
+ obj_desc->index_field.
+ access_byte_width);
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
- "index_field: bit_off %X, Off %X, Value %X, Gran %X, Index %p, Data %p\n",
+ "IndexField: BitOff %X, Off %X, Value %X, Gran %X, Index %p, Data %p\n",
obj_desc->index_field.start_field_bit_offset,
obj_desc->index_field.base_byte_offset,
obj_desc->index_field.value,
@@ -550,7 +563,7 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info)
acpi_ns_get_type(info->field_node));
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
- "Set named_obj %p [%4.4s], obj_desc %p\n",
+ "Set NamedObj %p [%4.4s], ObjDesc %p\n",
info->field_node,
acpi_ut_get_node_name(info->field_node), obj_desc));
diff --git a/drivers/acpi/executer/exregion.c b/drivers/acpi/executer/exregion.c
index 6a4cfdff606d..3cc97ba48b36 100644
--- a/drivers/acpi/executer/exregion.c
+++ b/drivers/acpi/executer/exregion.c
@@ -81,7 +81,7 @@ acpi_ex_system_memory_space_handler(u32 function,
u32 remainder;
#endif
- ACPI_FUNCTION_TRACE("ex_system_memory_space_handler");
+ ACPI_FUNCTION_TRACE(ex_system_memory_space_handler);
/* Validate and translate the bit width */
@@ -103,7 +103,7 @@ acpi_ex_system_memory_space_handler(u32 function,
break;
default:
- ACPI_ERROR((AE_INFO, "Invalid system_memory width %d",
+ ACPI_ERROR((AE_INFO, "Invalid SystemMemory width %d",
bit_width));
return_ACPI_STATUS(AE_AML_OPERAND_VALUE);
}
@@ -135,6 +135,7 @@ acpi_ex_system_memory_space_handler(u32 function,
* Delete the existing mapping and create a new one.
*/
if (mem_info->mapped_length) {
+
/* Valid mapping, delete it */
acpi_os_unmap_memory(mem_info->mapped_logical_address,
@@ -181,8 +182,8 @@ acpi_ex_system_memory_space_handler(u32 function,
(acpi_integer) mem_info->mapped_physical_address);
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "system_memory %d (%d width) Address=%8.8X%8.8X\n",
- function, bit_width, ACPI_FORMAT_UINT64(address)));
+ "System-Memory (width %d) R/W %d Address=%8.8X%8.8X\n",
+ bit_width, function, ACPI_FORMAT_UINT64(address)));
/*
* Perform the memory read or write
@@ -283,11 +284,11 @@ acpi_ex_system_io_space_handler(u32 function,
acpi_status status = AE_OK;
u32 value32;
- ACPI_FUNCTION_TRACE("ex_system_io_space_handler");
+ ACPI_FUNCTION_TRACE(ex_system_io_space_handler);
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "system_iO %d (%d width) Address=%8.8X%8.8X\n",
- function, bit_width, ACPI_FORMAT_UINT64(address)));
+ "System-IO (width %d) R/W %d Address=%8.8X%8.8X\n",
+ bit_width, function, ACPI_FORMAT_UINT64(address)));
/* Decode the function parameter */
@@ -342,7 +343,7 @@ acpi_ex_pci_config_space_handler(u32 function,
struct acpi_pci_id *pci_id;
u16 pci_register;
- ACPI_FUNCTION_TRACE("ex_pci_config_space_handler");
+ ACPI_FUNCTION_TRACE(ex_pci_config_space_handler);
/*
* The arguments to acpi_os(Read|Write)pci_configuration are:
@@ -360,7 +361,7 @@ acpi_ex_pci_config_space_handler(u32 function,
pci_register = (u16) (u32) address;
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "pci_config %d (%d) Seg(%04x) Bus(%04x) Dev(%04x) Func(%04x) Reg(%04x)\n",
+ "Pci-Config %d (%d) Seg(%04x) Bus(%04x) Dev(%04x) Func(%04x) Reg(%04x)\n",
function, bit_width, pci_id->segment, pci_id->bus,
pci_id->device, pci_id->function, pci_register));
@@ -414,7 +415,7 @@ acpi_ex_cmos_space_handler(u32 function,
{
acpi_status status = AE_OK;
- ACPI_FUNCTION_TRACE("ex_cmos_space_handler");
+ ACPI_FUNCTION_TRACE(ex_cmos_space_handler);
return_ACPI_STATUS(status);
}
@@ -446,7 +447,7 @@ acpi_ex_pci_bar_space_handler(u32 function,
{
acpi_status status = AE_OK;
- ACPI_FUNCTION_TRACE("ex_pci_bar_space_handler");
+ ACPI_FUNCTION_TRACE(ex_pci_bar_space_handler);
return_ACPI_STATUS(status);
}
@@ -476,23 +477,16 @@ acpi_ex_data_table_space_handler(u32 function,
acpi_integer * value,
void *handler_context, void *region_context)
{
- acpi_status status = AE_OK;
- u32 byte_width = ACPI_DIV_8(bit_width);
- u32 i;
- char *logical_addr_ptr;
-
- ACPI_FUNCTION_TRACE("ex_data_table_space_handler");
-
- logical_addr_ptr = ACPI_PHYSADDR_TO_PTR(address);
+ ACPI_FUNCTION_TRACE(ex_data_table_space_handler);
/* Perform the memory read or write */
switch (function) {
case ACPI_READ:
- for (i = 0; i < byte_width; i++) {
- ((char *)value)[i] = logical_addr_ptr[i];
- }
+ ACPI_MEMCPY(ACPI_CAST_PTR(char, value),
+ ACPI_PHYSADDR_TO_PTR(address),
+ ACPI_DIV_8(bit_width));
break;
case ACPI_WRITE:
@@ -501,5 +495,5 @@ acpi_ex_data_table_space_handler(u32 function,
return_ACPI_STATUS(AE_SUPPORT);
}
- return_ACPI_STATUS(status);
+ return_ACPI_STATUS(AE_OK);
}
diff --git a/drivers/acpi/executer/exresnte.c b/drivers/acpi/executer/exresnte.c
index 01b26c80d22b..3089b05a1368 100644
--- a/drivers/acpi/executer/exresnte.c
+++ b/drivers/acpi/executer/exresnte.c
@@ -87,7 +87,7 @@ acpi_ex_resolve_node_to_value(struct acpi_namespace_node **object_ptr,
struct acpi_namespace_node *node;
acpi_object_type entry_type;
- ACPI_FUNCTION_TRACE("ex_resolve_node_to_value");
+ ACPI_FUNCTION_TRACE(ex_resolve_node_to_value);
/*
* The stack pointer points to a struct acpi_namespace_node (Node). Get the
@@ -97,12 +97,13 @@ acpi_ex_resolve_node_to_value(struct acpi_namespace_node **object_ptr,
source_desc = acpi_ns_get_attached_object(node);
entry_type = acpi_ns_get_type((acpi_handle) node);
- ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Entry=%p source_desc=%p [%s]\n",
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Entry=%p SourceDesc=%p [%s]\n",
node, source_desc,
acpi_ut_get_type_name(entry_type)));
if ((entry_type == ACPI_TYPE_LOCAL_ALIAS) ||
(entry_type == ACPI_TYPE_LOCAL_METHOD_ALIAS)) {
+
/* There is always exactly one level of indirection */
node = ACPI_CAST_PTR(struct acpi_namespace_node, node->object);
@@ -113,10 +114,11 @@ acpi_ex_resolve_node_to_value(struct acpi_namespace_node **object_ptr,
/*
* Several object types require no further processing:
- * 1) Devices rarely have an attached object, return the Node
+ * 1) Device/Thermal objects don't have a "real" subobject, return the Node
* 2) Method locals and arguments have a pseudo-Node
*/
- if (entry_type == ACPI_TYPE_DEVICE ||
+ if ((entry_type == ACPI_TYPE_DEVICE) ||
+ (entry_type == ACPI_TYPE_THERMAL) ||
(node->flags & (ANOBJ_METHOD_ARG | ANOBJ_METHOD_LOCAL))) {
return_ACPI_STATUS(AE_OK);
}
@@ -141,6 +143,7 @@ acpi_ex_resolve_node_to_value(struct acpi_namespace_node **object_ptr,
status = acpi_ds_get_package_arguments(source_desc);
if (ACPI_SUCCESS(status)) {
+
/* Return an additional reference to the object */
obj_desc = source_desc;
@@ -158,6 +161,7 @@ acpi_ex_resolve_node_to_value(struct acpi_namespace_node **object_ptr,
status = acpi_ds_get_buffer_arguments(source_desc);
if (ACPI_SUCCESS(status)) {
+
/* Return an additional reference to the object */
obj_desc = source_desc;
@@ -199,7 +203,7 @@ acpi_ex_resolve_node_to_value(struct acpi_namespace_node **object_ptr,
case ACPI_TYPE_LOCAL_INDEX_FIELD:
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "field_read Node=%p source_desc=%p Type=%X\n",
+ "FieldRead Node=%p SourceDesc=%p Type=%X\n",
node, source_desc, entry_type));
status =
@@ -213,7 +217,6 @@ acpi_ex_resolve_node_to_value(struct acpi_namespace_node **object_ptr,
case ACPI_TYPE_METHOD:
case ACPI_TYPE_POWER:
case ACPI_TYPE_PROCESSOR:
- case ACPI_TYPE_THERMAL:
case ACPI_TYPE_EVENT:
case ACPI_TYPE_REGION:
@@ -240,6 +243,8 @@ acpi_ex_resolve_node_to_value(struct acpi_namespace_node **object_ptr,
/* This is a ddb_handle */
/* Return an additional reference to the object */
+ case AML_REF_OF_OP:
+
obj_desc = source_desc;
acpi_ut_add_reference(obj_desc);
break;
diff --git a/drivers/acpi/executer/exresolv.c b/drivers/acpi/executer/exresolv.c
index 1deed492fe88..6499de878017 100644
--- a/drivers/acpi/executer/exresolv.c
+++ b/drivers/acpi/executer/exresolv.c
@@ -78,7 +78,7 @@ acpi_ex_resolve_to_value(union acpi_operand_object **stack_ptr,
{
acpi_status status;
- ACPI_FUNCTION_TRACE_PTR("ex_resolve_to_value", stack_ptr);
+ ACPI_FUNCTION_TRACE_PTR(ex_resolve_to_value, stack_ptr);
if (!stack_ptr || !*stack_ptr) {
ACPI_ERROR((AE_INFO, "Internal - null pointer"));
@@ -144,7 +144,7 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
union acpi_operand_object *obj_desc;
u16 opcode;
- ACPI_FUNCTION_TRACE("ex_resolve_object_to_value");
+ ACPI_FUNCTION_TRACE(ex_resolve_object_to_value);
stack_desc = *stack_ptr;
@@ -190,7 +190,7 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
}
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "[Arg/Local %X] value_obj is %p\n",
+ "[Arg/Local %X] ValueObj is %p\n",
stack_desc->reference.offset,
obj_desc));
@@ -239,7 +239,7 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
/* Invalid reference object */
ACPI_ERROR((AE_INFO,
- "Unknown target_type %X in Index/Reference obj %p",
+ "Unknown TargetType %X in Index/Reference obj %p",
stack_desc->reference.target_type,
stack_desc));
status = AE_AML_INTERNAL;
@@ -257,10 +257,24 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
case AML_INT_NAMEPATH_OP: /* Reference to a named object */
- /* Get the object pointed to by the namespace node */
+ /* Dereference the name */
+
+ if ((stack_desc->reference.node->type ==
+ ACPI_TYPE_DEVICE)
+ || (stack_desc->reference.node->type ==
+ ACPI_TYPE_THERMAL)) {
+
+ /* These node types do not have 'real' subobjects */
+
+ *stack_ptr = (void *)stack_desc->reference.node;
+ } else {
+ /* Get the object pointed to by the namespace node */
+
+ *stack_ptr =
+ (stack_desc->reference.node)->object;
+ acpi_ut_add_reference(*stack_ptr);
+ }
- *stack_ptr = (stack_desc->reference.node)->object;
- acpi_ut_add_reference(*stack_ptr);
acpi_ut_remove_reference(stack_desc);
break;
@@ -293,7 +307,7 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
case ACPI_TYPE_LOCAL_INDEX_FIELD:
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "field_read source_desc=%p Type=%X\n",
+ "FieldRead SourceDesc=%p Type=%X\n",
stack_desc,
ACPI_GET_OBJECT_TYPE(stack_desc)));
@@ -337,7 +351,7 @@ acpi_ex_resolve_multiple(struct acpi_walk_state *walk_state,
acpi_object_type type;
acpi_status status;
- ACPI_FUNCTION_TRACE("acpi_ex_resolve_multiple");
+ ACPI_FUNCTION_TRACE(acpi_ex_resolve_multiple);
/* Operand can be either a namespace node or an operand descriptor */
@@ -382,10 +396,16 @@ acpi_ex_resolve_multiple(struct acpi_walk_state *walk_state,
while (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_LOCAL_REFERENCE) {
switch (obj_desc->reference.opcode) {
case AML_REF_OF_OP:
+ case AML_INT_NAMEPATH_OP:
/* Dereference the reference pointer */
- node = obj_desc->reference.object;
+ if (obj_desc->reference.opcode == AML_REF_OF_OP) {
+ node = obj_desc->reference.object;
+ } else { /* AML_INT_NAMEPATH_OP */
+
+ node = obj_desc->reference.node;
+ }
/* All "References" point to a NS node */
@@ -401,6 +421,7 @@ acpi_ex_resolve_multiple(struct acpi_walk_state *walk_state,
obj_desc = acpi_ns_get_attached_object(node);
if (!obj_desc) {
+
/* No object, use the NS node type */
type = acpi_ns_get_type(node);
@@ -432,6 +453,7 @@ acpi_ex_resolve_multiple(struct acpi_walk_state *walk_state,
*/
obj_desc = *(obj_desc->reference.where);
if (!obj_desc) {
+
/* NULL package elements are allowed */
type = 0; /* Uninitialized */
@@ -439,39 +461,6 @@ acpi_ex_resolve_multiple(struct acpi_walk_state *walk_state,
}
break;
- case AML_INT_NAMEPATH_OP:
-
- /* Dereference the reference pointer */
-
- node = obj_desc->reference.node;
-
- /* All "References" point to a NS node */
-
- if (ACPI_GET_DESCRIPTOR_TYPE(node) !=
- ACPI_DESC_TYPE_NAMED) {
- ACPI_ERROR((AE_INFO, "Not a NS node %p [%s]",
- node,
- acpi_ut_get_descriptor_name(node)));
- return_ACPI_STATUS(AE_AML_INTERNAL);
- }
-
- /* Get the attached object */
-
- obj_desc = acpi_ns_get_attached_object(node);
- if (!obj_desc) {
- /* No object, use the NS node type */
-
- type = acpi_ns_get_type(node);
- goto exit;
- }
-
- /* Check for circular references */
-
- if (obj_desc == operand) {
- return_ACPI_STATUS(AE_AML_CIRCULAR_REFERENCE);
- }
- break;
-
case AML_LOCAL_OP:
case AML_ARG_OP:
@@ -513,7 +502,7 @@ acpi_ex_resolve_multiple(struct acpi_walk_state *walk_state,
case AML_DEBUG_OP:
- /* The Debug Object is of type "debug_object" */
+ /* The Debug Object is of type "DebugObject" */
type = ACPI_TYPE_DEBUG_OBJECT;
goto exit;
diff --git a/drivers/acpi/executer/exresop.c b/drivers/acpi/executer/exresop.c
index a1c000f5a415..4c93d0972333 100644
--- a/drivers/acpi/executer/exresop.c
+++ b/drivers/acpi/executer/exresop.c
@@ -77,6 +77,7 @@ acpi_ex_check_object_type(acpi_object_type type_needed,
ACPI_FUNCTION_ENTRY();
if (type_needed == ACPI_TYPE_ANY) {
+
/* All types OK, so we don't perform any typechecks */
return (AE_OK);
@@ -143,7 +144,7 @@ acpi_ex_resolve_operands(u16 opcode,
acpi_object_type type_needed;
u16 target_op = 0;
- ACPI_FUNCTION_TRACE_U32("ex_resolve_operands", opcode);
+ ACPI_FUNCTION_TRACE_U32(ex_resolve_operands, opcode);
op_info = acpi_ps_get_opcode_info(opcode);
if (op_info->class == AML_CLASS_UNKNOWN) {
@@ -158,7 +159,7 @@ acpi_ex_resolve_operands(u16 opcode,
}
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "Opcode %X [%s] required_operand_types=%8.8X\n",
+ "Opcode %X [%s] RequiredOperandTypes=%8.8X\n",
opcode, op_info->name, arg_types));
/*
@@ -224,6 +225,7 @@ acpi_ex_resolve_operands(u16 opcode,
}
if (object_type == (u8) ACPI_TYPE_LOCAL_REFERENCE) {
+
/* Decode the Reference */
op_info = acpi_ps_get_opcode_info(opcode);
@@ -247,7 +249,7 @@ acpi_ex_resolve_operands(u16 opcode,
ACPI_DEBUG_ONLY_MEMBERS(ACPI_DEBUG_PRINT
((ACPI_DB_EXEC,
- "Operand is a Reference, ref_opcode [%s]\n",
+ "Operand is a Reference, RefOpcode [%s]\n",
(acpi_ps_get_opcode_info
(obj_desc->
reference.
@@ -332,6 +334,7 @@ acpi_ex_resolve_operands(u16 opcode,
}
if (obj_desc->reference.opcode == AML_NAME_OP) {
+
/* Convert a named reference to the actual named object */
temp_node = obj_desc->reference.object;
@@ -623,7 +626,7 @@ acpi_ex_resolve_operands(u16 opcode,
default:
ACPI_ERROR((AE_INFO,
- "Needed [Region/region_field], found [%s] %p",
+ "Needed [Region/RegionField], found [%s] %p",
acpi_ut_get_object_type_name
(obj_desc), obj_desc));
@@ -662,6 +665,7 @@ acpi_ex_resolve_operands(u16 opcode,
}
if (target_op == AML_DEBUG_OP) {
+
/* Allow store of any object to the Debug object */
break;
diff --git a/drivers/acpi/executer/exstore.c b/drivers/acpi/executer/exstore.c
index 3f020c0e2b95..0456405ba019 100644
--- a/drivers/acpi/executer/exstore.c
+++ b/drivers/acpi/executer/exstore.c
@@ -82,7 +82,7 @@ acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
{
u32 i;
- ACPI_FUNCTION_TRACE_PTR("ex_do_debug_object", source_desc);
+ ACPI_FUNCTION_TRACE_PTR(ex_do_debug_object, source_desc);
ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "[ACPI Debug] %*s",
level, " "));
@@ -245,7 +245,7 @@ acpi_ex_store(union acpi_operand_object *source_desc,
acpi_status status = AE_OK;
union acpi_operand_object *ref_desc = dest_desc;
- ACPI_FUNCTION_TRACE_PTR("ex_store", dest_desc);
+ ACPI_FUNCTION_TRACE_PTR(ex_store, dest_desc);
/* Validate parameters */
@@ -297,7 +297,7 @@ acpi_ex_store(union acpi_operand_object *source_desc,
ACPI_DUMP_STACK_ENTRY(source_desc);
ACPI_DUMP_STACK_ENTRY(dest_desc);
- ACPI_DUMP_OPERANDS(&dest_desc, ACPI_IMODE_EXECUTE, "ex_store",
+ ACPI_DUMP_OPERANDS(&dest_desc, ACPI_IMODE_EXECUTE, "ExStore",
2,
"Target is not a Reference or Constant object");
@@ -396,7 +396,7 @@ acpi_ex_store_object_to_index(union acpi_operand_object *source_desc,
u8 value = 0;
u32 i;
- ACPI_FUNCTION_TRACE("ex_store_object_to_index");
+ ACPI_FUNCTION_TRACE(ex_store_object_to_index);
/*
* Destination must be a reference pointer, and
@@ -423,6 +423,7 @@ acpi_ex_store_object_to_index(union acpi_operand_object *source_desc,
}
if (obj_desc) {
+
/* Decrement reference count by the ref count of the parent package */
for (i = 0; i < ((union acpi_operand_object *)
@@ -502,8 +503,7 @@ acpi_ex_store_object_to_index(union acpi_operand_object *source_desc,
break;
default:
- ACPI_ERROR((AE_INFO,
- "Target is not a Package or buffer_field"));
+ ACPI_ERROR((AE_INFO, "Target is not a Package or BufferField"));
status = AE_AML_OPERAND_TYPE;
break;
}
@@ -548,7 +548,7 @@ acpi_ex_store_object_to_node(union acpi_operand_object *source_desc,
union acpi_operand_object *new_desc;
acpi_object_type target_type;
- ACPI_FUNCTION_TRACE_PTR("ex_store_object_to_node", source_desc);
+ ACPI_FUNCTION_TRACE_PTR(ex_store_object_to_node, source_desc);
/* Get current type of the node, and object attached to Node */
@@ -572,6 +572,7 @@ acpi_ex_store_object_to_node(union acpi_operand_object *source_desc,
/* If no implicit conversion, drop into the default case below */
if ((!implicit_conversion) || (walk_state->opcode == AML_COPY_OP)) {
+
/* Force execution of default (no implicit conversion) */
target_type = ACPI_TYPE_ANY;
diff --git a/drivers/acpi/executer/exstoren.c b/drivers/acpi/executer/exstoren.c
index 42967baf760d..591aaf0e18b3 100644
--- a/drivers/acpi/executer/exstoren.c
+++ b/drivers/acpi/executer/exstoren.c
@@ -72,7 +72,7 @@ acpi_ex_resolve_object(union acpi_operand_object **source_desc_ptr,
union acpi_operand_object *source_desc = *source_desc_ptr;
acpi_status status = AE_OK;
- ACPI_FUNCTION_TRACE("ex_resolve_object");
+ ACPI_FUNCTION_TRACE(ex_resolve_object);
/* Ensure we have a Target that can be stored to */
@@ -97,6 +97,7 @@ acpi_ex_resolve_object(union acpi_operand_object **source_desc_ptr,
*/
if (ACPI_GET_OBJECT_TYPE(source_desc) ==
ACPI_TYPE_LOCAL_REFERENCE) {
+
/* Resolve a reference object first */
status =
@@ -121,6 +122,7 @@ acpi_ex_resolve_object(union acpi_operand_object **source_desc_ptr,
!((ACPI_GET_OBJECT_TYPE(source_desc) ==
ACPI_TYPE_LOCAL_REFERENCE)
&& (source_desc->reference.opcode == AML_LOAD_OP))) {
+
/* Conversion successful but still not a valid type */
ACPI_ERROR((AE_INFO,
@@ -199,7 +201,7 @@ acpi_ex_store_object_to_object(union acpi_operand_object *source_desc,
union acpi_operand_object *actual_src_desc;
acpi_status status = AE_OK;
- ACPI_FUNCTION_TRACE_PTR("ex_store_object_to_object", source_desc);
+ ACPI_FUNCTION_TRACE_PTR(ex_store_object_to_object, source_desc);
actual_src_desc = source_desc;
if (!dest_desc) {
@@ -289,6 +291,7 @@ acpi_ex_store_object_to_object(union acpi_operand_object *source_desc,
}
if (actual_src_desc != source_desc) {
+
/* Delete the intermediate (temporary) source object */
acpi_ut_remove_reference(actual_src_desc);
diff --git a/drivers/acpi/executer/exstorob.c b/drivers/acpi/executer/exstorob.c
index 6ab707087750..99ebe5adfcda 100644
--- a/drivers/acpi/executer/exstorob.c
+++ b/drivers/acpi/executer/exstorob.c
@@ -67,7 +67,7 @@ acpi_ex_store_buffer_to_buffer(union acpi_operand_object *source_desc,
u32 length;
u8 *buffer;
- ACPI_FUNCTION_TRACE_PTR("ex_store_buffer_to_buffer", source_desc);
+ ACPI_FUNCTION_TRACE_PTR(ex_store_buffer_to_buffer, source_desc);
/* We know that source_desc is a buffer by now */
@@ -80,7 +80,7 @@ acpi_ex_store_buffer_to_buffer(union acpi_operand_object *source_desc,
*/
if ((target_desc->buffer.length == 0) ||
(target_desc->common.flags & AOPOBJ_STATIC_POINTER)) {
- target_desc->buffer.pointer = ACPI_MEM_ALLOCATE(length);
+ target_desc->buffer.pointer = ACPI_ALLOCATE(length);
if (!target_desc->buffer.pointer) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
@@ -91,6 +91,7 @@ acpi_ex_store_buffer_to_buffer(union acpi_operand_object *source_desc,
/* Copy source buffer to target buffer */
if (length <= target_desc->buffer.length) {
+
/* Clear existing buffer and copy in the new one */
ACPI_MEMSET(target_desc->buffer.pointer, 0,
@@ -102,7 +103,7 @@ acpi_ex_store_buffer_to_buffer(union acpi_operand_object *source_desc,
* NOTE: ACPI versions up to 3.0 specified that the buffer must be
* truncated if the string is smaller than the buffer. However, "other"
* implementations of ACPI never did this and thus became the defacto
- * standard. ACPi 3.0_a changes this behavior such that the buffer
+ * standard. ACPI 3.0_a changes this behavior such that the buffer
* is no longer truncated.
*/
@@ -113,6 +114,7 @@ acpi_ex_store_buffer_to_buffer(union acpi_operand_object *source_desc,
* copy must not truncate the original buffer.
*/
if (original_src_type == ACPI_TYPE_STRING) {
+
/* Set the new length of the target */
target_desc->buffer.length = length;
@@ -156,7 +158,7 @@ acpi_ex_store_string_to_string(union acpi_operand_object *source_desc,
u32 length;
u8 *buffer;
- ACPI_FUNCTION_TRACE_PTR("ex_store_string_to_string", source_desc);
+ ACPI_FUNCTION_TRACE_PTR(ex_store_string_to_string, source_desc);
/* We know that source_desc is a string by now */
@@ -183,13 +185,14 @@ acpi_ex_store_string_to_string(union acpi_operand_object *source_desc,
*/
if (target_desc->string.pointer &&
(!(target_desc->common.flags & AOPOBJ_STATIC_POINTER))) {
+
/* Only free if not a pointer into the DSDT */
- ACPI_MEM_FREE(target_desc->string.pointer);
+ ACPI_FREE(target_desc->string.pointer);
}
- target_desc->string.pointer = ACPI_MEM_CALLOCATE((acpi_size)
- length + 1);
+ target_desc->string.pointer = ACPI_ALLOCATE_ZEROED((acpi_size)
+ length + 1);
if (!target_desc->string.pointer) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
diff --git a/drivers/acpi/executer/exsystem.c b/drivers/acpi/executer/exsystem.c
index ea9144f42e1f..52beee3674a0 100644
--- a/drivers/acpi/executer/exsystem.c
+++ b/drivers/acpi/executer/exsystem.c
@@ -68,7 +68,7 @@ acpi_status acpi_ex_system_wait_semaphore(acpi_handle semaphore, u16 timeout)
acpi_status status;
acpi_status status2;
- ACPI_FUNCTION_TRACE("ex_system_wait_semaphore");
+ ACPI_FUNCTION_TRACE(ex_system_wait_semaphore);
status = acpi_os_wait_semaphore(semaphore, 1, 0);
if (ACPI_SUCCESS(status)) {
@@ -76,6 +76,7 @@ acpi_status acpi_ex_system_wait_semaphore(acpi_handle semaphore, u16 timeout)
}
if (status == AE_TIME) {
+
/* We must wait, so unlock the interpreter */
acpi_ex_exit_interpreter();
@@ -90,6 +91,7 @@ acpi_status acpi_ex_system_wait_semaphore(acpi_handle semaphore, u16 timeout)
status2 = acpi_ex_enter_interpreter();
if (ACPI_FAILURE(status2)) {
+
/* Report fatal error, could not acquire interpreter */
return_ACPI_STATUS(status2);
@@ -191,7 +193,7 @@ acpi_ex_system_acquire_mutex(union acpi_operand_object * time_desc,
{
acpi_status status = AE_OK;
- ACPI_FUNCTION_TRACE_PTR("ex_system_acquire_mutex", obj_desc);
+ ACPI_FUNCTION_TRACE_PTR(ex_system_acquire_mutex, obj_desc);
if (!obj_desc) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
@@ -229,7 +231,7 @@ acpi_status acpi_ex_system_release_mutex(union acpi_operand_object *obj_desc)
{
acpi_status status = AE_OK;
- ACPI_FUNCTION_TRACE("ex_system_release_mutex");
+ ACPI_FUNCTION_TRACE(ex_system_release_mutex);
if (!obj_desc) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
@@ -263,7 +265,7 @@ acpi_status acpi_ex_system_signal_event(union acpi_operand_object *obj_desc)
{
acpi_status status = AE_OK;
- ACPI_FUNCTION_TRACE("ex_system_signal_event");
+ ACPI_FUNCTION_TRACE(ex_system_signal_event);
if (obj_desc) {
status = acpi_os_signal_semaphore(obj_desc->event.semaphore, 1);
@@ -293,7 +295,7 @@ acpi_ex_system_wait_event(union acpi_operand_object *time_desc,
{
acpi_status status = AE_OK;
- ACPI_FUNCTION_TRACE("ex_system_wait_event");
+ ACPI_FUNCTION_TRACE(ex_system_wait_event);
if (obj_desc) {
status =
diff --git a/drivers/acpi/executer/exutils.c b/drivers/acpi/executer/exutils.c
index f73a61aeb7ec..982c8b65876f 100644
--- a/drivers/acpi/executer/exutils.c
+++ b/drivers/acpi/executer/exutils.c
@@ -87,9 +87,9 @@ acpi_status acpi_ex_enter_interpreter(void)
{
acpi_status status;
- ACPI_FUNCTION_TRACE("ex_enter_interpreter");
+ ACPI_FUNCTION_TRACE(ex_enter_interpreter);
- status = acpi_ut_acquire_mutex(ACPI_MTX_EXECUTE);
+ status = acpi_ut_acquire_mutex(ACPI_MTX_INTERPRETER);
if (ACPI_FAILURE(status)) {
ACPI_ERROR((AE_INFO, "Could not acquire interpreter mutex"));
}
@@ -123,9 +123,9 @@ void acpi_ex_exit_interpreter(void)
{
acpi_status status;
- ACPI_FUNCTION_TRACE("ex_exit_interpreter");
+ ACPI_FUNCTION_TRACE(ex_exit_interpreter);
- status = acpi_ut_release_mutex(ACPI_MTX_EXECUTE);
+ status = acpi_ut_release_mutex(ACPI_MTX_INTERPRETER);
if (ACPI_FAILURE(status)) {
ACPI_ERROR((AE_INFO, "Could not release interpreter mutex"));
}
@@ -189,11 +189,12 @@ u8 acpi_ex_acquire_global_lock(u32 field_flags)
u8 locked = FALSE;
acpi_status status;
- ACPI_FUNCTION_TRACE("ex_acquire_global_lock");
+ ACPI_FUNCTION_TRACE(ex_acquire_global_lock);
/* Only attempt lock if the always_lock bit is set */
if (field_flags & AML_FIELD_LOCK_RULE_MASK) {
+
/* We should attempt to get the lock, wait forever */
status = acpi_ev_acquire_global_lock(ACPI_WAIT_FOREVER);
@@ -225,15 +226,17 @@ void acpi_ex_release_global_lock(u8 locked_by_me)
{
acpi_status status;
- ACPI_FUNCTION_TRACE("ex_release_global_lock");
+ ACPI_FUNCTION_TRACE(ex_release_global_lock);
/* Only attempt unlock if the caller locked it */
if (locked_by_me) {
+
/* OK, now release the lock */
status = acpi_ev_release_global_lock();
if (ACPI_FAILURE(status)) {
+
/* Report the error, but there isn't much else we can do */
ACPI_EXCEPTION((AE_INFO, status,
@@ -263,7 +266,7 @@ static u32 acpi_ex_digits_needed(acpi_integer value, u32 base)
u32 num_digits;
acpi_integer current_value;
- ACPI_FUNCTION_TRACE("ex_digits_needed");
+ ACPI_FUNCTION_TRACE(ex_digits_needed);
/* acpi_integer is unsigned, so we don't worry about a '-' prefix */
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index e8165c4f162a..1cd25784b7a4 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -48,6 +48,8 @@ MODULE_LICENSE("GPL");
static int acpi_fan_add(struct acpi_device *device);
static int acpi_fan_remove(struct acpi_device *device, int type);
+static int acpi_fan_suspend(struct acpi_device *device, int state);
+static int acpi_fan_resume(struct acpi_device *device, int state);
static struct acpi_driver acpi_fan_driver = {
.name = ACPI_FAN_DRIVER_NAME,
@@ -56,6 +58,8 @@ static struct acpi_driver acpi_fan_driver = {
.ops = {
.add = acpi_fan_add,
.remove = acpi_fan_remove,
+ .suspend = acpi_fan_suspend,
+ .resume = acpi_fan_resume,
},
};
@@ -206,6 +210,10 @@ static int acpi_fan_add(struct acpi_device *device)
goto end;
}
+ device->flags.force_power_state = 1;
+ acpi_bus_set_power(device->handle, state);
+ device->flags.force_power_state = 0;
+
result = acpi_fan_add_fs(device);
if (result)
goto end;
@@ -239,6 +247,38 @@ static int acpi_fan_remove(struct acpi_device *device, int type)
return_VALUE(0);
}
+static int acpi_fan_suspend(struct acpi_device *device, int state)
+{
+ if (!device)
+ return -EINVAL;
+
+ acpi_bus_set_power(device->handle, ACPI_STATE_D0);
+
+ return AE_OK;
+}
+
+static int acpi_fan_resume(struct acpi_device *device, int state)
+{
+ int result = 0;
+ int power_state = 0;
+
+ if (!device)
+ return -EINVAL;
+
+ result = acpi_bus_get_power(device->handle, &power_state);
+ if (result) {
+ ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
+ "Error reading fan power state\n"));
+ return result;
+ }
+
+ device->flags.force_power_state = 1;
+ acpi_bus_set_power(device->handle, power_state);
+ device->flags.force_power_state = 0;
+
+ return result;
+}
+
static int __init acpi_fan_init(void)
{
int result = 0;
diff --git a/drivers/acpi/hardware/hwacpi.c b/drivers/acpi/hardware/hwacpi.c
index ea2f13271ff1..de50fab2a910 100644
--- a/drivers/acpi/hardware/hwacpi.c
+++ b/drivers/acpi/hardware/hwacpi.c
@@ -63,7 +63,7 @@ acpi_status acpi_hw_initialize(void)
{
acpi_status status;
- ACPI_FUNCTION_TRACE("hw_initialize");
+ ACPI_FUNCTION_TRACE(hw_initialize);
/* We must have the ACPI tables by the time we get here */
@@ -100,7 +100,7 @@ acpi_status acpi_hw_set_mode(u32 mode)
acpi_status status;
u32 retry;
- ACPI_FUNCTION_TRACE("hw_set_mode");
+ ACPI_FUNCTION_TRACE(hw_set_mode);
/*
* ACPI 2.0 clarified that if SMI_CMD in FADT is zero,
@@ -198,7 +198,7 @@ u32 acpi_hw_get_mode(void)
acpi_status status;
u32 value;
- ACPI_FUNCTION_TRACE("hw_get_mode");
+ ACPI_FUNCTION_TRACE(hw_get_mode);
/*
* ACPI 2.0 clarified that if SMI_CMD in FADT is zero,
diff --git a/drivers/acpi/hardware/hwgpe.c b/drivers/acpi/hardware/hwgpe.c
index d84942d22dd5..608a3a60ee11 100644
--- a/drivers/acpi/hardware/hwgpe.c
+++ b/drivers/acpi/hardware/hwgpe.c
@@ -214,6 +214,7 @@ acpi_hw_disable_gpe_block(struct acpi_gpe_xrupt_info * gpe_xrupt_info,
/* Examine each GPE Register within the block */
for (i = 0; i < gpe_block->register_count; i++) {
+
/* Disable all GPEs in this register */
status = acpi_hw_low_level_write(8, 0x00,
@@ -250,6 +251,7 @@ acpi_hw_clear_gpe_block(struct acpi_gpe_xrupt_info * gpe_xrupt_info,
/* Examine each GPE Register within the block */
for (i = 0; i < gpe_block->register_count; i++) {
+
/* Clear status on all GPEs in this register */
status = acpi_hw_low_level_write(8, 0xFF,
@@ -368,7 +370,7 @@ acpi_status acpi_hw_disable_all_gpes(void)
{
acpi_status status;
- ACPI_FUNCTION_TRACE("hw_disable_all_gpes");
+ ACPI_FUNCTION_TRACE(hw_disable_all_gpes);
status = acpi_ev_walk_gpe_list(acpi_hw_disable_gpe_block);
status = acpi_ev_walk_gpe_list(acpi_hw_clear_gpe_block);
@@ -391,7 +393,7 @@ acpi_status acpi_hw_enable_all_runtime_gpes(void)
{
acpi_status status;
- ACPI_FUNCTION_TRACE("hw_enable_all_runtime_gpes");
+ ACPI_FUNCTION_TRACE(hw_enable_all_runtime_gpes);
status = acpi_ev_walk_gpe_list(acpi_hw_enable_runtime_gpe_block);
return_ACPI_STATUS(status);
@@ -413,7 +415,7 @@ acpi_status acpi_hw_enable_all_wakeup_gpes(void)
{
acpi_status status;
- ACPI_FUNCTION_TRACE("hw_enable_all_wakeup_gpes");
+ ACPI_FUNCTION_TRACE(hw_enable_all_wakeup_gpes);
status = acpi_ev_walk_gpe_list(acpi_hw_enable_wakeup_gpe_block);
return_ACPI_STATUS(status);
diff --git a/drivers/acpi/hardware/hwregs.c b/drivers/acpi/hardware/hwregs.c
index e1fe75498415..ae142de19507 100644
--- a/drivers/acpi/hardware/hwregs.c
+++ b/drivers/acpi/hardware/hwregs.c
@@ -43,8 +43,6 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/module.h>
-
#include <acpi/acpi.h>
#include <acpi/acnamesp.h>
#include <acpi/acevents.h>
@@ -63,23 +61,21 @@ ACPI_MODULE_NAME("hwregs")
* DESCRIPTION: Clears all fixed and general purpose status bits
* THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED
*
+ * NOTE: TBD: Flags parameter is obsolete, to be removed
+ *
******************************************************************************/
acpi_status acpi_hw_clear_acpi_status(u32 flags)
{
acpi_status status;
+ acpi_cpu_flags lock_flags = 0;
- ACPI_FUNCTION_TRACE("hw_clear_acpi_status");
+ ACPI_FUNCTION_TRACE(hw_clear_acpi_status);
ACPI_DEBUG_PRINT((ACPI_DB_IO, "About to write %04X to %04X\n",
ACPI_BITMASK_ALL_FIXED_STATUS,
(u16) acpi_gbl_FADT->xpm1a_evt_blk.address));
- if (flags & ACPI_MTX_LOCK) {
- status = acpi_ut_acquire_mutex(ACPI_MTX_HARDWARE);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
- }
+ lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
status = acpi_hw_register_write(ACPI_MTX_DO_NOT_LOCK,
ACPI_REGISTER_PM1_STATUS,
@@ -104,9 +100,7 @@ acpi_status acpi_hw_clear_acpi_status(u32 flags)
status = acpi_ev_walk_gpe_list(acpi_hw_clear_gpe_block);
unlock_and_exit:
- if (flags & ACPI_MTX_LOCK) {
- (void)acpi_ut_release_mutex(ACPI_MTX_HARDWARE);
- }
+ acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags);
return_ACPI_STATUS(status);
}
@@ -129,10 +123,9 @@ acpi_status
acpi_get_sleep_type_data(u8 sleep_state, u8 * sleep_type_a, u8 * sleep_type_b)
{
acpi_status status = AE_OK;
- struct acpi_parameter_info info;
- char *sleep_state_name;
+ struct acpi_evaluate_info *info;
- ACPI_FUNCTION_TRACE("acpi_get_sleep_type_data");
+ ACPI_FUNCTION_TRACE(acpi_get_sleep_type_data);
/* Validate parameters */
@@ -140,34 +133,39 @@ acpi_get_sleep_type_data(u8 sleep_state, u8 * sleep_type_a, u8 * sleep_type_b)
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
- /* Evaluate the namespace object containing the values for this state */
+ /* Allocate the evaluation information block */
+
+ info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
+ if (!info) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
- info.parameters = NULL;
- info.return_object = NULL;
- sleep_state_name =
+ info->pathname =
ACPI_CAST_PTR(char, acpi_gbl_sleep_state_names[sleep_state]);
- status = acpi_ns_evaluate_by_name(sleep_state_name, &info);
+ /* Evaluate the namespace object containing the values for this state */
+
+ status = acpi_ns_evaluate(info);
if (ACPI_FAILURE(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "%s while evaluating sleep_state [%s]\n",
+ "%s while evaluating SleepState [%s]\n",
acpi_format_exception(status),
- sleep_state_name));
+ info->pathname));
- return_ACPI_STATUS(status);
+ goto cleanup;
}
/* Must have a return object */
- if (!info.return_object) {
+ if (!info->return_object) {
ACPI_ERROR((AE_INFO, "No Sleep State object returned from [%s]",
- sleep_state_name));
+ info->pathname));
status = AE_NOT_EXIST;
}
/* It must be of type Package */
- else if (ACPI_GET_OBJECT_TYPE(info.return_object) != ACPI_TYPE_PACKAGE) {
+ else if (ACPI_GET_OBJECT_TYPE(info->return_object) != ACPI_TYPE_PACKAGE) {
ACPI_ERROR((AE_INFO,
"Sleep State return object is not a Package"));
status = AE_AML_OPERAND_TYPE;
@@ -180,7 +178,7 @@ acpi_get_sleep_type_data(u8 sleep_state, u8 * sleep_type_a, u8 * sleep_type_b)
* by BIOS vendors seems to be to have 2 or more elements, at least
* one per sleep type (A/B).
*/
- else if (info.return_object->package.count < 2) {
+ else if (info->return_object->package.count < 2) {
ACPI_ERROR((AE_INFO,
"Sleep State return package does not have at least two elements"));
status = AE_AML_NO_OPERAND;
@@ -188,39 +186,42 @@ acpi_get_sleep_type_data(u8 sleep_state, u8 * sleep_type_a, u8 * sleep_type_b)
/* The first two elements must both be of type Integer */
- else if ((ACPI_GET_OBJECT_TYPE(info.return_object->package.elements[0])
+ else if ((ACPI_GET_OBJECT_TYPE(info->return_object->package.elements[0])
!= ACPI_TYPE_INTEGER) ||
- (ACPI_GET_OBJECT_TYPE(info.return_object->package.elements[1])
+ (ACPI_GET_OBJECT_TYPE(info->return_object->package.elements[1])
!= ACPI_TYPE_INTEGER)) {
ACPI_ERROR((AE_INFO,
"Sleep State return package elements are not both Integers (%s, %s)",
- acpi_ut_get_object_type_name(info.return_object->
+ acpi_ut_get_object_type_name(info->return_object->
package.elements[0]),
- acpi_ut_get_object_type_name(info.return_object->
+ acpi_ut_get_object_type_name(info->return_object->
package.elements[1])));
status = AE_AML_OPERAND_TYPE;
} else {
/* Valid _Sx_ package size, type, and value */
*sleep_type_a = (u8)
- (info.return_object->package.elements[0])->integer.value;
+ (info->return_object->package.elements[0])->integer.value;
*sleep_type_b = (u8)
- (info.return_object->package.elements[1])->integer.value;
+ (info->return_object->package.elements[1])->integer.value;
}
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
- "While evaluating sleep_state [%s], bad Sleep object %p type %s",
- sleep_state_name, info.return_object,
- acpi_ut_get_object_type_name(info.
+ "While evaluating SleepState [%s], bad Sleep object %p type %s",
+ info->pathname, info->return_object,
+ acpi_ut_get_object_type_name(info->
return_object)));
}
- acpi_ut_remove_reference(info.return_object);
+ acpi_ut_remove_reference(info->return_object);
+
+ cleanup:
+ ACPI_FREE(info);
return_ACPI_STATUS(status);
}
-EXPORT_SYMBOL(acpi_get_sleep_type_data);
+ACPI_EXPORT_SYMBOL(acpi_get_sleep_type_data)
/*******************************************************************************
*
@@ -233,13 +234,12 @@ EXPORT_SYMBOL(acpi_get_sleep_type_data);
* DESCRIPTION: Map register_id into a register bitmask.
*
******************************************************************************/
-
struct acpi_bit_register_info *acpi_hw_get_bit_register_info(u32 register_id)
{
ACPI_FUNCTION_ENTRY();
if (register_id > ACPI_BITREG_MAX) {
- ACPI_ERROR((AE_INFO, "Invalid bit_register ID: %X",
+ ACPI_ERROR((AE_INFO, "Invalid BitRegister ID: %X",
register_id));
return (NULL);
}
@@ -260,6 +260,8 @@ struct acpi_bit_register_info *acpi_hw_get_bit_register_info(u32 register_id)
*
* DESCRIPTION: ACPI bit_register read function.
*
+ * NOTE: TBD: Flags parameter is obsolete, to be removed
+ *
******************************************************************************/
acpi_status acpi_get_register(u32 register_id, u32 * return_value, u32 flags)
@@ -268,7 +270,7 @@ acpi_status acpi_get_register(u32 register_id, u32 * return_value, u32 flags)
struct acpi_bit_register_info *bit_reg_info;
acpi_status status;
- ACPI_FUNCTION_TRACE("acpi_get_register");
+ ACPI_FUNCTION_TRACE(acpi_get_register);
/* Get the info structure corresponding to the requested ACPI Register */
@@ -277,24 +279,14 @@ acpi_status acpi_get_register(u32 register_id, u32 * return_value, u32 flags)
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
- if (flags & ACPI_MTX_LOCK) {
- status = acpi_ut_acquire_mutex(ACPI_MTX_HARDWARE);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
- }
-
/* Read from the register */
- status = acpi_hw_register_read(ACPI_MTX_DO_NOT_LOCK,
+ status = acpi_hw_register_read(ACPI_MTX_LOCK,
bit_reg_info->parent_register,
&register_value);
- if (flags & ACPI_MTX_LOCK) {
- (void)acpi_ut_release_mutex(ACPI_MTX_HARDWARE);
- }
-
if (ACPI_SUCCESS(status)) {
+
/* Normalize the value that was read */
register_value =
@@ -311,7 +303,7 @@ acpi_status acpi_get_register(u32 register_id, u32 * return_value, u32 flags)
return_ACPI_STATUS(status);
}
-EXPORT_SYMBOL(acpi_get_register);
+ACPI_EXPORT_SYMBOL(acpi_get_register)
/*******************************************************************************
*
@@ -326,31 +318,28 @@ EXPORT_SYMBOL(acpi_get_register);
*
* DESCRIPTION: ACPI Bit Register write function.
*
+ * NOTE: TBD: Flags parameter is obsolete, to be removed
+ *
******************************************************************************/
-
acpi_status acpi_set_register(u32 register_id, u32 value, u32 flags)
{
u32 register_value = 0;
struct acpi_bit_register_info *bit_reg_info;
acpi_status status;
+ acpi_cpu_flags lock_flags;
- ACPI_FUNCTION_TRACE_U32("acpi_set_register", register_id);
+ ACPI_FUNCTION_TRACE_U32(acpi_set_register, register_id);
/* Get the info structure corresponding to the requested ACPI Register */
bit_reg_info = acpi_hw_get_bit_register_info(register_id);
if (!bit_reg_info) {
- ACPI_ERROR((AE_INFO, "Bad ACPI HW register_id: %X",
+ ACPI_ERROR((AE_INFO, "Bad ACPI HW RegisterId: %X",
register_id));
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
- if (flags & ACPI_MTX_LOCK) {
- status = acpi_ut_acquire_mutex(ACPI_MTX_HARDWARE);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
- }
+ lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
/* Always do a register read first so we can insert the new bits */
@@ -458,9 +447,7 @@ acpi_status acpi_set_register(u32 register_id, u32 value, u32 flags)
unlock_and_exit:
- if (flags & ACPI_MTX_LOCK) {
- (void)acpi_ut_release_mutex(ACPI_MTX_HARDWARE);
- }
+ acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags);
/* Normalize the value that was read */
@@ -474,7 +461,7 @@ acpi_status acpi_set_register(u32 register_id, u32 value, u32 flags)
return_ACPI_STATUS(status);
}
-EXPORT_SYMBOL(acpi_set_register);
+ACPI_EXPORT_SYMBOL(acpi_set_register)
/******************************************************************************
*
@@ -490,21 +477,18 @@ EXPORT_SYMBOL(acpi_set_register);
* given offset.
*
******************************************************************************/
-
acpi_status
acpi_hw_register_read(u8 use_lock, u32 register_id, u32 * return_value)
{
u32 value1 = 0;
u32 value2 = 0;
acpi_status status;
+ acpi_cpu_flags lock_flags = 0;
- ACPI_FUNCTION_TRACE("hw_register_read");
+ ACPI_FUNCTION_TRACE(hw_register_read);
if (ACPI_MTX_LOCK == use_lock) {
- status = acpi_ut_acquire_mutex(ACPI_MTX_HARDWARE);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
+ lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
}
switch (register_id) {
@@ -582,7 +566,7 @@ acpi_hw_register_read(u8 use_lock, u32 register_id, u32 * return_value)
unlock_and_exit:
if (ACPI_MTX_LOCK == use_lock) {
- (void)acpi_ut_release_mutex(ACPI_MTX_HARDWARE);
+ acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags);
}
if (ACPI_SUCCESS(status)) {
@@ -610,14 +594,12 @@ acpi_hw_register_read(u8 use_lock, u32 register_id, u32 * return_value)
acpi_status acpi_hw_register_write(u8 use_lock, u32 register_id, u32 value)
{
acpi_status status;
+ acpi_cpu_flags lock_flags = 0;
- ACPI_FUNCTION_TRACE("hw_register_write");
+ ACPI_FUNCTION_TRACE(hw_register_write);
if (ACPI_MTX_LOCK == use_lock) {
- status = acpi_ut_acquire_mutex(ACPI_MTX_HARDWARE);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
+ lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
}
switch (register_id) {
@@ -707,7 +689,7 @@ acpi_status acpi_hw_register_write(u8 use_lock, u32 register_id, u32 value)
unlock_and_exit:
if (ACPI_MTX_LOCK == use_lock) {
- (void)acpi_ut_release_mutex(ACPI_MTX_HARDWARE);
+ acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags);
}
return_ACPI_STATUS(status);
@@ -733,7 +715,7 @@ acpi_hw_low_level_read(u32 width, u32 * value, struct acpi_generic_address *reg)
u64 address;
acpi_status status;
- ACPI_FUNCTION_NAME("hw_low_level_read");
+ ACPI_FUNCTION_NAME(hw_low_level_read);
/*
* Must have a valid pointer to a GAS structure, and
@@ -805,7 +787,7 @@ acpi_hw_low_level_write(u32 width, u32 value, struct acpi_generic_address * reg)
u64 address;
acpi_status status;
- ACPI_FUNCTION_NAME("hw_low_level_write");
+ ACPI_FUNCTION_NAME(hw_low_level_write);
/*
* Must have a valid pointer to a GAS structure, and
diff --git a/drivers/acpi/hardware/hwsleep.c b/drivers/acpi/hardware/hwsleep.c
index 89269272fd62..8bb43cae60c2 100644
--- a/drivers/acpi/hardware/hwsleep.c
+++ b/drivers/acpi/hardware/hwsleep.c
@@ -42,7 +42,6 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/module.h>
#include <acpi/acpi.h>
#define _COMPONENT ACPI_HARDWARE
@@ -64,7 +63,7 @@ acpi_status
acpi_set_firmware_waking_vector(acpi_physical_address physical_address)
{
- ACPI_FUNCTION_TRACE("acpi_set_firmware_waking_vector");
+ ACPI_FUNCTION_TRACE(acpi_set_firmware_waking_vector);
/* Set the vector */
@@ -79,6 +78,8 @@ acpi_set_firmware_waking_vector(acpi_physical_address physical_address)
return_ACPI_STATUS(AE_OK);
}
+ACPI_EXPORT_SYMBOL(acpi_set_firmware_waking_vector)
+
/*******************************************************************************
*
* FUNCTION: acpi_get_firmware_waking_vector
@@ -92,13 +93,12 @@ acpi_set_firmware_waking_vector(acpi_physical_address physical_address)
* DESCRIPTION: Access function for the firmware_waking_vector field in FACS
*
******************************************************************************/
-
#ifdef ACPI_FUTURE_USAGE
acpi_status
acpi_get_firmware_waking_vector(acpi_physical_address * physical_address)
{
- ACPI_FUNCTION_TRACE("acpi_get_firmware_waking_vector");
+ ACPI_FUNCTION_TRACE(acpi_get_firmware_waking_vector);
if (!physical_address) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
@@ -118,6 +118,8 @@ acpi_get_firmware_waking_vector(acpi_physical_address * physical_address)
return_ACPI_STATUS(AE_OK);
}
+
+ACPI_EXPORT_SYMBOL(acpi_get_firmware_waking_vector)
#endif
/*******************************************************************************
@@ -134,14 +136,13 @@ acpi_get_firmware_waking_vector(acpi_physical_address * physical_address)
* various OS-specific tasks between the two steps.
*
******************************************************************************/
-
acpi_status acpi_enter_sleep_state_prep(u8 sleep_state)
{
acpi_status status;
struct acpi_object_list arg_list;
union acpi_object arg;
- ACPI_FUNCTION_TRACE("acpi_enter_sleep_state_prep");
+ ACPI_FUNCTION_TRACE(acpi_enter_sleep_state_prep);
/*
* _PSW methods could be run here to enable wake-on keyboard, LAN, etc.
@@ -206,6 +207,8 @@ acpi_status acpi_enter_sleep_state_prep(u8 sleep_state)
return_ACPI_STATUS(AE_OK);
}
+ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_prep)
+
/*******************************************************************************
*
* FUNCTION: acpi_enter_sleep_state
@@ -218,7 +221,6 @@ acpi_status acpi_enter_sleep_state_prep(u8 sleep_state)
* THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED
*
******************************************************************************/
-
acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state)
{
u32 PM1Acontrol;
@@ -228,7 +230,7 @@ acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state)
u32 in_value;
acpi_status status;
- ACPI_FUNCTION_TRACE("acpi_enter_sleep_state");
+ ACPI_FUNCTION_TRACE(acpi_enter_sleep_state);
if ((acpi_gbl_sleep_type_a > ACPI_SLEEP_TYPE_MAX) ||
(acpi_gbl_sleep_type_b > ACPI_SLEEP_TYPE_MAX)) {
@@ -378,7 +380,7 @@ acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state)
return_ACPI_STATUS(AE_OK);
}
-EXPORT_SYMBOL(acpi_enter_sleep_state);
+ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state)
/*******************************************************************************
*
@@ -392,13 +394,12 @@ EXPORT_SYMBOL(acpi_enter_sleep_state);
* THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED
*
******************************************************************************/
-
acpi_status asmlinkage acpi_enter_sleep_state_s4bios(void)
{
u32 in_value;
acpi_status status;
- ACPI_FUNCTION_TRACE("acpi_enter_sleep_state_s4bios");
+ ACPI_FUNCTION_TRACE(acpi_enter_sleep_state_s4bios);
status =
acpi_set_register(ACPI_BITREG_WAKE_STATUS, 1, ACPI_MTX_DO_NOT_LOCK);
@@ -443,7 +444,7 @@ acpi_status asmlinkage acpi_enter_sleep_state_s4bios(void)
return_ACPI_STATUS(AE_OK);
}
-EXPORT_SYMBOL(acpi_enter_sleep_state_s4bios);
+ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_s4bios)
/*******************************************************************************
*
@@ -457,7 +458,6 @@ EXPORT_SYMBOL(acpi_enter_sleep_state_s4bios);
* Called with interrupts ENABLED.
*
******************************************************************************/
-
acpi_status acpi_leave_sleep_state(u8 sleep_state)
{
struct acpi_object_list arg_list;
@@ -468,7 +468,7 @@ acpi_status acpi_leave_sleep_state(u8 sleep_state)
u32 PM1Acontrol;
u32 PM1Bcontrol;
- ACPI_FUNCTION_TRACE("acpi_leave_sleep_state");
+ ACPI_FUNCTION_TRACE(acpi_leave_sleep_state);
/*
* Set SLP_TYPE and SLP_EN to state S0.
@@ -490,6 +490,7 @@ acpi_status acpi_leave_sleep_state(u8 sleep_state)
ACPI_REGISTER_PM1_CONTROL,
&PM1Acontrol);
if (ACPI_SUCCESS(status)) {
+
/* Clear SLP_EN and SLP_TYP fields */
PM1Acontrol &= ~(sleep_type_reg_info->access_bit_mask |
@@ -583,3 +584,5 @@ acpi_status acpi_leave_sleep_state(u8 sleep_state)
return_ACPI_STATUS(status);
}
+
+ACPI_EXPORT_SYMBOL(acpi_leave_sleep_state)
diff --git a/drivers/acpi/hardware/hwtimer.c b/drivers/acpi/hardware/hwtimer.c
index fc10b7cb456f..c4ec47c939fd 100644
--- a/drivers/acpi/hardware/hwtimer.c
+++ b/drivers/acpi/hardware/hwtimer.c
@@ -42,7 +42,6 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/module.h>
#include <acpi/acpi.h>
#define _COMPONENT ACPI_HARDWARE
@@ -61,13 +60,13 @@ ACPI_MODULE_NAME("hwtimer")
******************************************************************************/
acpi_status acpi_get_timer_resolution(u32 * resolution)
{
- ACPI_FUNCTION_TRACE("acpi_get_timer_resolution");
+ ACPI_FUNCTION_TRACE(acpi_get_timer_resolution);
if (!resolution) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
- if (0 == acpi_gbl_FADT->tmr_val_ext) {
+ if (acpi_gbl_FADT->tmr_val_ext == 0) {
*resolution = 24;
} else {
*resolution = 32;
@@ -76,6 +75,8 @@ acpi_status acpi_get_timer_resolution(u32 * resolution)
return_ACPI_STATUS(AE_OK);
}
+ACPI_EXPORT_SYMBOL(acpi_get_timer_resolution)
+
/******************************************************************************
*
* FUNCTION: acpi_get_timer
@@ -87,12 +88,11 @@ acpi_status acpi_get_timer_resolution(u32 * resolution)
* DESCRIPTION: Obtains current value of ACPI PM Timer (in ticks).
*
******************************************************************************/
-
acpi_status acpi_get_timer(u32 * ticks)
{
acpi_status status;
- ACPI_FUNCTION_TRACE("acpi_get_timer");
+ ACPI_FUNCTION_TRACE(acpi_get_timer);
if (!ticks) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
@@ -103,7 +103,7 @@ acpi_status acpi_get_timer(u32 * ticks)
return_ACPI_STATUS(status);
}
-EXPORT_SYMBOL(acpi_get_timer);
+ACPI_EXPORT_SYMBOL(acpi_get_timer)
/******************************************************************************
*
@@ -133,7 +133,6 @@ EXPORT_SYMBOL(acpi_get_timer);
* 2**32 Ticks / 3,600,000 Ticks/Sec = 1193 sec or 19.88 minutes
*
******************************************************************************/
-
acpi_status
acpi_get_timer_duration(u32 start_ticks, u32 end_ticks, u32 * time_elapsed)
{
@@ -141,7 +140,7 @@ acpi_get_timer_duration(u32 start_ticks, u32 end_ticks, u32 * time_elapsed)
u32 delta_ticks;
acpi_integer quotient;
- ACPI_FUNCTION_TRACE("acpi_get_timer_duration");
+ ACPI_FUNCTION_TRACE(acpi_get_timer_duration);
if (!time_elapsed) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
@@ -154,7 +153,8 @@ acpi_get_timer_duration(u32 start_ticks, u32 end_ticks, u32 * time_elapsed)
if (start_ticks < end_ticks) {
delta_ticks = end_ticks - start_ticks;
} else if (start_ticks > end_ticks) {
- if (0 == acpi_gbl_FADT->tmr_val_ext) {
+ if (acpi_gbl_FADT->tmr_val_ext == 0) {
+
/* 24-bit Timer */
delta_ticks =
@@ -183,4 +183,4 @@ acpi_get_timer_duration(u32 start_ticks, u32 end_ticks, u32 * time_elapsed)
return_ACPI_STATUS(status);
}
-EXPORT_SYMBOL(acpi_get_timer_duration);
+ACPI_EXPORT_SYMBOL(acpi_get_timer_duration)
diff --git a/drivers/acpi/hotkey.c b/drivers/acpi/hotkey.c
index 2e2e4051dfa7..c25b2b92edcf 100644
--- a/drivers/acpi/hotkey.c
+++ b/drivers/acpi/hotkey.c
@@ -723,6 +723,8 @@ get_parms(char *config_record,
goto do_fail;
count = tmp1 - tmp;
*action_handle = (char *)kmalloc(count + 1, GFP_KERNEL);
+ if (!*action_handle)
+ goto do_fail;
strncpy(*action_handle, tmp, count);
*(*action_handle + count) = 0;
diff --git a/drivers/acpi/ibm_acpi.c b/drivers/acpi/ibm_acpi.c
index 262b1f41335a..15fc12482ba0 100644
--- a/drivers/acpi/ibm_acpi.c
+++ b/drivers/acpi/ibm_acpi.c
@@ -567,6 +567,69 @@ static int bluetooth_write(char *buf)
return 0;
}
+static int wan_supported;
+
+static int wan_init(void)
+{
+ wan_supported = hkey_handle &&
+ acpi_evalf(hkey_handle, NULL, "GWAN", "qv");
+
+ return 0;
+}
+
+static int wan_status(void)
+{
+ int status;
+
+ if (!wan_supported ||
+ !acpi_evalf(hkey_handle, &status, "GWAN", "d"))
+ status = 0;
+
+ return status;
+}
+
+static int wan_read(char *p)
+{
+ int len = 0;
+ int status = wan_status();
+
+ if (!wan_supported)
+ len += sprintf(p + len, "status:\t\tnot supported\n");
+ else if (!(status & 1))
+ len += sprintf(p + len, "status:\t\tnot installed\n");
+ else {
+ len += sprintf(p + len, "status:\t\t%s\n", enabled(status, 1));
+ len += sprintf(p + len, "commands:\tenable, disable\n");
+ }
+
+ return len;
+}
+
+static int wan_write(char *buf)
+{
+ int status = wan_status();
+ char *cmd;
+ int do_cmd = 0;
+
+ if (!wan_supported)
+ return -ENODEV;
+
+ while ((cmd = next_cmd(&buf))) {
+ if (strlencmp(cmd, "enable") == 0) {
+ status |= 2;
+ } else if (strlencmp(cmd, "disable") == 0) {
+ status &= ~2;
+ } else
+ return -EINVAL;
+ do_cmd = 1;
+ }
+
+ if (do_cmd && !acpi_evalf(hkey_handle, NULL, "SWAN", "vd", status))
+ return -EIO;
+
+ return 0;
+}
+
static int video_supported;
static int video_orig_autosw;
@@ -1563,6 +1626,13 @@ static struct ibm_struct ibms[] = {
.write = bluetooth_write,
},
{
+ .name = "wan",
+ .init = wan_init,
+ .read = wan_read,
+ .write = wan_write,
+ .experimental = 1,
+ },
+ {
.name = "video",
.init = video_init,
.read = video_read,
diff --git a/drivers/acpi/motherboard.c b/drivers/acpi/motherboard.c
index 468244147ec1..d51d68f5dd8d 100644
--- a/drivers/acpi/motherboard.c
+++ b/drivers/acpi/motherboard.c
@@ -37,7 +37,7 @@ ACPI_MODULE_NAME("acpi_motherboard")
#define ACPI_MB_HID2 "PNP0C02"
/**
* Doesn't care about legacy IO ports, only IO ports beyond 0x1000 are reserved
- * Doesn't care about the failure of 'request_region', since other may reserve
+ * Doesn't care about the failure of 'request_region', since other may reserve
* the io ports as well
*/
#define IS_RESERVED_ADDR(base, len) \
@@ -46,7 +46,7 @@ ACPI_MODULE_NAME("acpi_motherboard")
/*
* Clearing the flag (IORESOURCE_BUSY) allows drivers to use
* the io ports if they really know they can use it, while
- * still preventing hotplug PCI devices from using it.
+ * still preventing hotplug PCI devices from using it.
*/
static acpi_status acpi_reserve_io_ranges(struct acpi_resource *res, void *data)
{
@@ -123,49 +123,54 @@ static struct acpi_driver acpi_motherboard_driver2 = {
},
};
+static void __init acpi_request_region (struct acpi_generic_address *addr,
+ unsigned int length, char *desc)
+{
+ if (!addr->address || !length)
+ return;
+
+ if (addr->address_space_id == ACPI_ADR_SPACE_SYSTEM_IO)
+ request_region(addr->address, length, desc);
+ else if (addr->address_space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
+ request_mem_region(addr->address, length, desc);
+}
+
static void __init acpi_reserve_resources(void)
{
- if (acpi_gbl_FADT->xpm1a_evt_blk.address && acpi_gbl_FADT->pm1_evt_len)
- request_region(acpi_gbl_FADT->xpm1a_evt_blk.address,
- acpi_gbl_FADT->pm1_evt_len, "PM1a_EVT_BLK");
+ acpi_request_region(&acpi_gbl_FADT->xpm1a_evt_blk,
+ acpi_gbl_FADT->pm1_evt_len, "ACPI PM1a_EVT_BLK");
- if (acpi_gbl_FADT->xpm1b_evt_blk.address && acpi_gbl_FADT->pm1_evt_len)
- request_region(acpi_gbl_FADT->xpm1b_evt_blk.address,
- acpi_gbl_FADT->pm1_evt_len, "PM1b_EVT_BLK");
+ acpi_request_region(&acpi_gbl_FADT->xpm1b_evt_blk,
+ acpi_gbl_FADT->pm1_evt_len, "ACPI PM1b_EVT_BLK");
- if (acpi_gbl_FADT->xpm1a_cnt_blk.address && acpi_gbl_FADT->pm1_cnt_len)
- request_region(acpi_gbl_FADT->xpm1a_cnt_blk.address,
- acpi_gbl_FADT->pm1_cnt_len, "PM1a_CNT_BLK");
+ acpi_request_region(&acpi_gbl_FADT->xpm1a_cnt_blk,
+ acpi_gbl_FADT->pm1_cnt_len, "ACPI PM1a_CNT_BLK");
- if (acpi_gbl_FADT->xpm1b_cnt_blk.address && acpi_gbl_FADT->pm1_cnt_len)
- request_region(acpi_gbl_FADT->xpm1b_cnt_blk.address,
- acpi_gbl_FADT->pm1_cnt_len, "PM1b_CNT_BLK");
+ acpi_request_region(&acpi_gbl_FADT->xpm1b_cnt_blk,
+ acpi_gbl_FADT->pm1_cnt_len, "ACPI PM1b_CNT_BLK");
- if (acpi_gbl_FADT->xpm_tmr_blk.address && acpi_gbl_FADT->pm_tm_len == 4)
- request_region(acpi_gbl_FADT->xpm_tmr_blk.address, 4, "PM_TMR");
+ if (acpi_gbl_FADT->pm_tm_len == 4)
+ acpi_request_region(&acpi_gbl_FADT->xpm_tmr_blk, 4, "ACPI PM_TMR");
- if (acpi_gbl_FADT->xpm2_cnt_blk.address && acpi_gbl_FADT->pm2_cnt_len)
- request_region(acpi_gbl_FADT->xpm2_cnt_blk.address,
- acpi_gbl_FADT->pm2_cnt_len, "PM2_CNT_BLK");
+ acpi_request_region(&acpi_gbl_FADT->xpm2_cnt_blk,
+ acpi_gbl_FADT->pm2_cnt_len, "ACPI PM2_CNT_BLK");
/* Length of GPE blocks must be a non-negative multiple of 2 */
- if (acpi_gbl_FADT->xgpe0_blk.address && acpi_gbl_FADT->gpe0_blk_len &&
- !(acpi_gbl_FADT->gpe0_blk_len & 0x1))
- request_region(acpi_gbl_FADT->xgpe0_blk.address,
- acpi_gbl_FADT->gpe0_blk_len, "GPE0_BLK");
+ if (!(acpi_gbl_FADT->gpe0_blk_len & 0x1))
+ acpi_request_region(&acpi_gbl_FADT->xgpe0_blk,
+ acpi_gbl_FADT->gpe0_blk_len, "ACPI GPE0_BLK");
- if (acpi_gbl_FADT->xgpe1_blk.address && acpi_gbl_FADT->gpe1_blk_len &&
- !(acpi_gbl_FADT->gpe1_blk_len & 0x1))
- request_region(acpi_gbl_FADT->xgpe1_blk.address,
- acpi_gbl_FADT->gpe1_blk_len, "GPE1_BLK");
+ if (!(acpi_gbl_FADT->gpe1_blk_len & 0x1))
+ acpi_request_region(&acpi_gbl_FADT->xgpe1_blk,
+ acpi_gbl_FADT->gpe1_blk_len, "ACPI GPE1_BLK");
}
static int __init acpi_motherboard_init(void)
{
acpi_bus_register_driver(&acpi_motherboard_driver1);
acpi_bus_register_driver(&acpi_motherboard_driver2);
- /*
+ /*
* Guarantee motherboard IO reservation first
* This module must run after scan.c
*/
diff --git a/drivers/acpi/namespace/nsaccess.c b/drivers/acpi/namespace/nsaccess.c
index 1149bc18fb35..48fadade52e2 100644
--- a/drivers/acpi/namespace/nsaccess.c
+++ b/drivers/acpi/namespace/nsaccess.c
@@ -70,7 +70,7 @@ acpi_status acpi_ns_root_initialize(void)
union acpi_operand_object *obj_desc;
acpi_string val = NULL;
- ACPI_FUNCTION_TRACE("ns_root_initialize");
+ ACPI_FUNCTION_TRACE(ns_root_initialize);
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
@@ -98,6 +98,7 @@ acpi_status acpi_ns_root_initialize(void)
"Entering predefined entries into namespace\n"));
for (init_val = acpi_gbl_pre_defined_names; init_val->name; init_val++) {
+
/* _OSI is optional for now, will be permanent later */
if (!ACPI_STRCMP(init_val->name, "_OSI")
@@ -156,7 +157,7 @@ acpi_status acpi_ns_root_initialize(void)
#if defined (ACPI_ASL_COMPILER)
- /* save the parameter count for the i_aSL compiler */
+ /* Save the parameter count for the i_aSL compiler */
new_node->value = obj_desc->method.param_count;
#else
@@ -258,10 +259,8 @@ acpi_status acpi_ns_root_initialize(void)
/* Save a handle to "_GPE", it is always present */
if (ACPI_SUCCESS(status)) {
- status =
- acpi_ns_get_node_by_path("\\_GPE", NULL,
- ACPI_NS_NO_UPSEARCH,
- &acpi_gbl_fadt_gpe_device);
+ status = acpi_ns_get_node(NULL, "\\_GPE", ACPI_NS_NO_UPSEARCH,
+ &acpi_gbl_fadt_gpe_device);
}
return_ACPI_STATUS(status);
@@ -310,17 +309,17 @@ acpi_ns_lookup(union acpi_generic_state *scope_info,
acpi_object_type type_to_check_for;
acpi_object_type this_search_type;
u32 search_parent_flag = ACPI_NS_SEARCH_PARENT;
- u32 local_flags = flags & ~(ACPI_NS_ERROR_IF_FOUND |
- ACPI_NS_SEARCH_PARENT);
+ u32 local_flags;
- ACPI_FUNCTION_TRACE("ns_lookup");
+ ACPI_FUNCTION_TRACE(ns_lookup);
if (!return_node) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
- acpi_gbl_ns_lookup_count++;
+ local_flags = flags & ~(ACPI_NS_ERROR_IF_FOUND | ACPI_NS_SEARCH_PARENT);
*return_node = ACPI_ENTRY_NOT_FOUND;
+ acpi_gbl_ns_lookup_count++;
if (!acpi_gbl_root_node) {
return_ACPI_STATUS(AE_NO_NAMESPACE);
@@ -346,14 +345,17 @@ acpi_ns_lookup(union acpi_generic_state *scope_info,
return_ACPI_STATUS(AE_AML_INTERNAL);
}
- /*
- * This node might not be a actual "scope" node (such as a
- * Device/Method, etc.) It could be a Package or other object node.
- * Backup up the tree to find the containing scope node.
- */
- while (!acpi_ns_opens_scope(prefix_node->type) &&
- prefix_node->type != ACPI_TYPE_ANY) {
- prefix_node = acpi_ns_get_parent_node(prefix_node);
+ if (!(flags & ACPI_NS_PREFIX_IS_SCOPE)) {
+ /*
+ * This node might not be a actual "scope" node (such as a
+ * Device/Method, etc.) It could be a Package or other object node.
+ * Backup up the tree to find the containing scope node.
+ */
+ while (!acpi_ns_opens_scope(prefix_node->type) &&
+ prefix_node->type != ACPI_TYPE_ANY) {
+ prefix_node =
+ acpi_ns_get_parent_node(prefix_node);
+ }
}
}
@@ -365,6 +367,7 @@ acpi_ns_lookup(union acpi_generic_state *scope_info,
* Begin examination of the actual pathname
*/
if (!pathname) {
+
/* A Null name_path is allowed and refers to the root */
num_segments = 0;
@@ -389,6 +392,7 @@ acpi_ns_lookup(union acpi_generic_state *scope_info,
* to the current scope).
*/
if (*path == (u8) AML_ROOT_PREFIX) {
+
/* Pathname is fully qualified, start from the root */
this_node = acpi_gbl_root_node;
@@ -416,6 +420,7 @@ acpi_ns_lookup(union acpi_generic_state *scope_info,
this_node = prefix_node;
num_carats = 0;
while (*path == (u8) AML_PARENT_PREFIX) {
+
/* Name is fully qualified, no search rules apply */
search_parent_flag = ACPI_NS_NO_UPSEARCH;
@@ -430,6 +435,7 @@ acpi_ns_lookup(union acpi_generic_state *scope_info,
num_carats++;
this_node = acpi_ns_get_parent_node(this_node);
if (!this_node) {
+
/* Current scope has no parent scope */
ACPI_ERROR((AE_INFO,
@@ -569,6 +575,7 @@ acpi_ns_lookup(union acpi_generic_state *scope_info,
&this_node);
if (ACPI_FAILURE(status)) {
if (status == AE_NOT_FOUND) {
+
/* Name not found in ACPI namespace */
ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
@@ -602,10 +609,11 @@ acpi_ns_lookup(union acpi_generic_state *scope_info,
(type_to_check_for != ACPI_TYPE_LOCAL_SCOPE) &&
(this_node->type != ACPI_TYPE_ANY) &&
(this_node->type != type_to_check_for)) {
+
/* Complain about a type mismatch */
ACPI_WARNING((AE_INFO,
- "ns_lookup: Type mismatch on %4.4s (%s), searching for (%s)",
+ "NsLookup: Type mismatch on %4.4s (%s), searching for (%s)",
ACPI_CAST_PTR(char, &simple_name),
acpi_ut_get_type_name(this_node->type),
acpi_ut_get_type_name
diff --git a/drivers/acpi/namespace/nsalloc.c b/drivers/acpi/namespace/nsalloc.c
index 9b871f38b61b..dc3f0739a46b 100644
--- a/drivers/acpi/namespace/nsalloc.c
+++ b/drivers/acpi/namespace/nsalloc.c
@@ -47,9 +47,6 @@
#define _COMPONENT ACPI_NAMESPACE
ACPI_MODULE_NAME("nsalloc")
-/* Local prototypes */
-static void acpi_ns_remove_reference(struct acpi_namespace_node *node);
-
/*******************************************************************************
*
* FUNCTION: acpi_ns_create_node
@@ -61,14 +58,13 @@ static void acpi_ns_remove_reference(struct acpi_namespace_node *node);
* DESCRIPTION: Create a namespace node
*
******************************************************************************/
-
struct acpi_namespace_node *acpi_ns_create_node(u32 name)
{
struct acpi_namespace_node *node;
- ACPI_FUNCTION_TRACE("ns_create_node");
+ ACPI_FUNCTION_TRACE(ns_create_node);
- node = ACPI_MEM_CALLOCATE(sizeof(struct acpi_namespace_node));
+ node = acpi_os_acquire_object(acpi_gbl_namespace_cache);
if (!node) {
return_PTR(NULL);
}
@@ -76,9 +72,7 @@ struct acpi_namespace_node *acpi_ns_create_node(u32 name)
ACPI_MEM_TRACKING(acpi_gbl_ns_node_list->total_allocated++);
node->name.integer = name;
- node->reference_count = 1;
ACPI_SET_DESCRIPTOR_TYPE(node, ACPI_DESC_TYPE_NAMED);
-
return_PTR(node);
}
@@ -100,7 +94,7 @@ void acpi_ns_delete_node(struct acpi_namespace_node *node)
struct acpi_namespace_node *prev_node;
struct acpi_namespace_node *next_node;
- ACPI_FUNCTION_TRACE_PTR("ns_delete_node", node);
+ ACPI_FUNCTION_TRACE_PTR(ns_delete_node, node);
parent_node = acpi_ns_get_parent_node(node);
@@ -115,6 +109,7 @@ void acpi_ns_delete_node(struct acpi_namespace_node *node)
}
if (prev_node) {
+
/* Node is not first child, unlink it */
prev_node->peer = next_node->peer;
@@ -125,6 +120,7 @@ void acpi_ns_delete_node(struct acpi_namespace_node *node)
/* Node is first child (has no previous peer) */
if (next_node->flags & ANOBJ_END_OF_PEER_LIST) {
+
/* No peers at all */
parent_node->child = NULL;
@@ -137,10 +133,10 @@ void acpi_ns_delete_node(struct acpi_namespace_node *node)
ACPI_MEM_TRACKING(acpi_gbl_ns_node_list->total_freed++);
/*
- * Detach an object if there is one then delete the node
+ * Detach an object if there is one, then delete the node
*/
acpi_ns_detach_object(node);
- ACPI_MEM_FREE(node);
+ (void)acpi_os_release_object(acpi_gbl_namespace_cache, node);
return_VOID;
}
@@ -171,7 +167,7 @@ void acpi_ns_install_node(struct acpi_walk_state *walk_state, struct acpi_namesp
acpi_owner_id owner_id = 0;
struct acpi_namespace_node *child_node;
- ACPI_FUNCTION_TRACE("ns_install_node");
+ ACPI_FUNCTION_TRACE(ns_install_node);
/*
* Get the owner ID from the Walk state
@@ -216,14 +212,6 @@ void acpi_ns_install_node(struct acpi_walk_state *walk_state, struct acpi_namesp
acpi_ut_get_type_name(parent_node->type),
parent_node));
- /*
- * Increment the reference count(s) of all parents up to
- * the root!
- */
- while ((node = acpi_ns_get_parent_node(node)) != NULL) {
- node->reference_count++;
- }
-
return_VOID;
}
@@ -244,10 +232,9 @@ void acpi_ns_delete_children(struct acpi_namespace_node *parent_node)
{
struct acpi_namespace_node *child_node;
struct acpi_namespace_node *next_node;
- struct acpi_namespace_node *node;
u8 flags;
- ACPI_FUNCTION_TRACE_PTR("ns_delete_children", parent_node);
+ ACPI_FUNCTION_TRACE_PTR(ns_delete_children, parent_node);
if (!parent_node) {
return_VOID;
@@ -264,6 +251,7 @@ void acpi_ns_delete_children(struct acpi_namespace_node *parent_node)
* Deallocate all children at this level
*/
do {
+
/* Get the things we need */
next_node = child_node->peer;
@@ -289,26 +277,10 @@ void acpi_ns_delete_children(struct acpi_namespace_node *parent_node)
*/
acpi_ns_detach_object(child_node);
- /*
- * Decrement the reference count(s) of all parents up to
- * the root! (counts were incremented when the node was created)
- */
- node = child_node;
- while ((node = acpi_ns_get_parent_node(node)) != NULL) {
- node->reference_count--;
- }
-
- /* There should be only one reference remaining on this node */
-
- if (child_node->reference_count != 1) {
- ACPI_WARNING((AE_INFO,
- "Existing references (%d) on node being deleted (%p)",
- child_node->reference_count, child_node));
- }
-
/* Now we can delete the node */
- ACPI_MEM_FREE(child_node);
+ (void)acpi_os_release_object(acpi_gbl_namespace_cache,
+ child_node);
/* And move on to the next child in the list */
@@ -341,7 +313,7 @@ void acpi_ns_delete_namespace_subtree(struct acpi_namespace_node *parent_node)
struct acpi_namespace_node *child_node = NULL;
u32 level = 1;
- ACPI_FUNCTION_TRACE("ns_delete_namespace_subtree");
+ ACPI_FUNCTION_TRACE(ns_delete_namespace_subtree);
if (!parent_node) {
return_VOID;
@@ -352,11 +324,14 @@ void acpi_ns_delete_namespace_subtree(struct acpi_namespace_node *parent_node)
* to where we started.
*/
while (level > 0) {
+
/* Get the next node in this scope (NULL if none) */
- child_node = acpi_ns_get_next_node(ACPI_TYPE_ANY, parent_node,
- child_node);
+ child_node =
+ acpi_ns_get_next_node(ACPI_TYPE_ANY, parent_node,
+ child_node);
if (child_node) {
+
/* Found a child node - detach any attached object */
acpi_ns_detach_object(child_node);
@@ -401,55 +376,6 @@ void acpi_ns_delete_namespace_subtree(struct acpi_namespace_node *parent_node)
/*******************************************************************************
*
- * FUNCTION: acpi_ns_remove_reference
- *
- * PARAMETERS: Node - Named node whose reference count is to be
- * decremented
- *
- * RETURN: None.
- *
- * DESCRIPTION: Remove a Node reference. Decrements the reference count
- * of all parent Nodes up to the root. Any node along
- * the way that reaches zero references is freed.
- *
- ******************************************************************************/
-
-static void acpi_ns_remove_reference(struct acpi_namespace_node *node)
-{
- struct acpi_namespace_node *parent_node;
- struct acpi_namespace_node *this_node;
-
- ACPI_FUNCTION_ENTRY();
-
- /*
- * Decrement the reference count(s) of this node and all
- * nodes up to the root, Delete anything with zero remaining references.
- */
- this_node = node;
- while (this_node) {
- /* Prepare to move up to parent */
-
- parent_node = acpi_ns_get_parent_node(this_node);
-
- /* Decrement the reference count on this node */
-
- this_node->reference_count--;
-
- /* Delete the node if no more references */
-
- if (!this_node->reference_count) {
- /* Delete all children and delete the node */
-
- acpi_ns_delete_children(this_node);
- acpi_ns_delete_node(this_node);
- }
-
- this_node = parent_node;
- }
-}
-
-/*******************************************************************************
- *
* FUNCTION: acpi_ns_delete_namespace_by_owner
*
* PARAMETERS: owner_id - All nodes with this owner will be deleted
@@ -469,15 +395,15 @@ void acpi_ns_delete_namespace_by_owner(acpi_owner_id owner_id)
u32 level;
struct acpi_namespace_node *parent_node;
- ACPI_FUNCTION_TRACE_U32("ns_delete_namespace_by_owner", owner_id);
+ ACPI_FUNCTION_TRACE_U32(ns_delete_namespace_by_owner, owner_id);
if (owner_id == 0) {
return_VOID;
}
+ deletion_node = NULL;
parent_node = acpi_gbl_root_node;
child_node = NULL;
- deletion_node = NULL;
level = 1;
/*
@@ -494,12 +420,14 @@ void acpi_ns_delete_namespace_by_owner(acpi_owner_id owner_id)
child_node);
if (deletion_node) {
- acpi_ns_remove_reference(deletion_node);
+ acpi_ns_delete_children(deletion_node);
+ acpi_ns_delete_node(deletion_node);
deletion_node = NULL;
}
if (child_node) {
if (child_node->owner_id == owner_id) {
+
/* Found a matching child node - detach any attached object */
acpi_ns_detach_object(child_node);
diff --git a/drivers/acpi/namespace/nsdump.c b/drivers/acpi/namespace/nsdump.c
index a2807317a84b..d72df66aa965 100644
--- a/drivers/acpi/namespace/nsdump.c
+++ b/drivers/acpi/namespace/nsdump.c
@@ -75,7 +75,7 @@ void acpi_ns_print_pathname(u32 num_segments, char *pathname)
{
acpi_native_uint i;
- ACPI_FUNCTION_NAME("ns_print_pathname");
+ ACPI_FUNCTION_NAME(ns_print_pathname);
if (!(acpi_dbg_level & ACPI_LV_NAMES)
|| !(acpi_dbg_layer & ACPI_NAMESPACE)) {
@@ -123,7 +123,7 @@ void
acpi_ns_dump_pathname(acpi_handle handle, char *msg, u32 level, u32 component)
{
- ACPI_FUNCTION_TRACE("ns_dump_pathname");
+ ACPI_FUNCTION_TRACE(ns_dump_pathname);
/* Do this only if the requested debug level and component are enabled */
@@ -167,7 +167,7 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
u32 dbg_level;
u32 i;
- ACPI_FUNCTION_NAME("ns_dump_one_object");
+ ACPI_FUNCTION_NAME(ns_dump_one_object);
/* Is output enabled? */
@@ -191,6 +191,7 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
}
if (!(info->display_type & ACPI_DISPLAY_SHORT)) {
+
/* Indent the object according to the level */
acpi_os_printf("%2d%*s", (u32) level - 1, (int)level * 2, " ");
@@ -203,6 +204,9 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
}
if (!acpi_ut_valid_acpi_name(this_node->name.integer)) {
+ this_node->name.integer =
+ acpi_ut_repair_name(this_node->name.integer);
+
ACPI_WARNING((AE_INFO, "Invalid ACPI Name %08X",
this_node->name.integer));
}
@@ -226,6 +230,7 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
case ACPI_DISPLAY_SUMMARY:
if (!obj_desc) {
+
/* No attached object, we are done */
acpi_os_printf("\n");
@@ -419,6 +424,7 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
acpi_os_printf("O:%p", obj_desc);
if (!obj_desc) {
+
/* No attached object, we are done */
acpi_os_printf("\n");
@@ -669,7 +675,7 @@ void acpi_ns_dump_tables(acpi_handle search_base, u32 max_depth)
{
acpi_handle search_handle = search_base;
- ACPI_FUNCTION_TRACE("ns_dump_tables");
+ ACPI_FUNCTION_TRACE(ns_dump_tables);
if (!acpi_gbl_root_node) {
/*
@@ -682,6 +688,7 @@ void acpi_ns_dump_tables(acpi_handle search_base, u32 max_depth)
}
if (ACPI_NS_ALL == search_base) {
+
/* Entire namespace */
search_handle = acpi_gbl_root_node;
diff --git a/drivers/acpi/namespace/nsdumpdv.c b/drivers/acpi/namespace/nsdumpdv.c
index aff899a935e3..c6bf5d30fca3 100644
--- a/drivers/acpi/namespace/nsdumpdv.c
+++ b/drivers/acpi/namespace/nsdumpdv.c
@@ -74,7 +74,7 @@ acpi_ns_dump_one_device(acpi_handle obj_handle,
acpi_status status;
u32 i;
- ACPI_FUNCTION_NAME("ns_dump_one_device");
+ ACPI_FUNCTION_NAME(ns_dump_one_device);
status =
acpi_ns_dump_one_object(obj_handle, level, context, return_value);
@@ -92,7 +92,7 @@ acpi_ns_dump_one_device(acpi_handle obj_handle,
info->hardware_id.value,
ACPI_FORMAT_UINT64(info->address),
info->current_status));
- ACPI_MEM_FREE(info);
+ ACPI_FREE(info);
}
return (status);
@@ -115,7 +115,7 @@ void acpi_ns_dump_root_devices(void)
acpi_handle sys_bus_handle;
acpi_status status;
- ACPI_FUNCTION_NAME("ns_dump_root_devices");
+ ACPI_FUNCTION_NAME(ns_dump_root_devices);
/* Only dump the table if tracing is enabled */
diff --git a/drivers/acpi/namespace/nseval.c b/drivers/acpi/namespace/nseval.c
index 19d7b94d40c3..4b0a4a8c9843 100644
--- a/drivers/acpi/namespace/nseval.c
+++ b/drivers/acpi/namespace/nseval.c
@@ -1,7 +1,6 @@
/*******************************************************************************
*
- * Module Name: nseval - Object evaluation interfaces -- includes control
- * method lookup and execution.
+ * Module Name: nseval - Object evaluation, includes control method execution
*
******************************************************************************/
@@ -50,196 +49,14 @@
#define _COMPONENT ACPI_NAMESPACE
ACPI_MODULE_NAME("nseval")
-/* Local prototypes */
-static acpi_status
-acpi_ns_execute_control_method(struct acpi_parameter_info *info);
-
-static acpi_status acpi_ns_get_object_value(struct acpi_parameter_info *info);
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ns_evaluate_relative
- *
- * PARAMETERS: Pathname - Name of method to execute, If NULL, the
- * handle is the object to execute
- * Info - Method info block, contains:
- * return_object - Where to put method's return value (if
- * any). If NULL, no value is returned.
- * Params - List of parameters to pass to the method,
- * terminated by NULL. Params itself may be
- * NULL if no parameters are being passed.
- *
- * RETURN: Status
- *
- * DESCRIPTION: Evaluate the object or find and execute the requested method
- *
- * MUTEX: Locks Namespace
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ns_evaluate_relative(char *pathname, struct acpi_parameter_info *info)
-{
- acpi_status status;
- struct acpi_namespace_node *node = NULL;
- union acpi_generic_state *scope_info;
- char *internal_path = NULL;
-
- ACPI_FUNCTION_TRACE("ns_evaluate_relative");
-
- /*
- * Must have a valid object handle
- */
- if (!info || !info->node) {
- return_ACPI_STATUS(AE_BAD_PARAMETER);
- }
-
- /* Build an internal name string for the method */
-
- status = acpi_ns_internalize_name(pathname, &internal_path);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
-
- scope_info = acpi_ut_create_generic_state();
- if (!scope_info) {
- goto cleanup1;
- }
-
- /* Get the prefix handle and Node */
-
- status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
- if (ACPI_FAILURE(status)) {
- goto cleanup;
- }
-
- info->node = acpi_ns_map_handle_to_node(info->node);
- if (!info->node) {
- (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
- status = AE_BAD_PARAMETER;
- goto cleanup;
- }
-
- /* Lookup the name in the namespace */
-
- scope_info->scope.node = info->node;
- status = acpi_ns_lookup(scope_info, internal_path, ACPI_TYPE_ANY,
- ACPI_IMODE_EXECUTE, ACPI_NS_NO_UPSEARCH, NULL,
- &node);
-
- (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
-
- if (ACPI_FAILURE(status)) {
- ACPI_DEBUG_PRINT((ACPI_DB_NAMES, "Object [%s] not found [%s]\n",
- pathname, acpi_format_exception(status)));
- goto cleanup;
- }
-
- /*
- * Now that we have a handle to the object, we can attempt to evaluate it.
- */
- ACPI_DEBUG_PRINT((ACPI_DB_NAMES, "%s [%p] Value %p\n",
- pathname, node, acpi_ns_get_attached_object(node)));
-
- info->node = node;
- status = acpi_ns_evaluate_by_handle(info);
-
- ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
- "*** Completed eval of object %s ***\n", pathname));
-
- cleanup:
- acpi_ut_delete_generic_state(scope_info);
-
- cleanup1:
- ACPI_MEM_FREE(internal_path);
- return_ACPI_STATUS(status);
-}
-
/*******************************************************************************
*
- * FUNCTION: acpi_ns_evaluate_by_name
+ * FUNCTION: acpi_ns_evaluate
*
- * PARAMETERS: Pathname - Fully qualified pathname to the object
- * Info - Method info block, contains:
- * return_object - Where to put method's return value (if
- * any). If NULL, no value is returned.
- * Params - List of parameters to pass to the method,
- * terminated by NULL. Params itself may be
- * NULL if no parameters are being passed.
- *
- * RETURN: Status
- *
- * DESCRIPTION: Evaluate the object or rind and execute the requested method
- * passing the given parameters
- *
- * MUTEX: Locks Namespace
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ns_evaluate_by_name(char *pathname, struct acpi_parameter_info *info)
-{
- acpi_status status;
- char *internal_path = NULL;
-
- ACPI_FUNCTION_TRACE("ns_evaluate_by_name");
-
- /* Build an internal name string for the method */
-
- status = acpi_ns_internalize_name(pathname, &internal_path);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
-
- status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
- if (ACPI_FAILURE(status)) {
- goto cleanup;
- }
-
- /* Lookup the name in the namespace */
-
- status = acpi_ns_lookup(NULL, internal_path, ACPI_TYPE_ANY,
- ACPI_IMODE_EXECUTE, ACPI_NS_NO_UPSEARCH, NULL,
- &info->node);
-
- (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
-
- if (ACPI_FAILURE(status)) {
- ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
- "Object at [%s] was not found, status=%.4X\n",
- pathname, status));
- goto cleanup;
- }
-
- /*
- * Now that we have a handle to the object, we can attempt to evaluate it.
- */
- ACPI_DEBUG_PRINT((ACPI_DB_NAMES, "%s [%p] Value %p\n",
- pathname, info->node,
- acpi_ns_get_attached_object(info->node)));
-
- status = acpi_ns_evaluate_by_handle(info);
-
- ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
- "*** Completed eval of object %s ***\n", pathname));
-
- cleanup:
-
- /* Cleanup */
-
- if (internal_path) {
- ACPI_MEM_FREE(internal_path);
- }
-
- return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ns_evaluate_by_handle
- *
- * PARAMETERS: Info - Method info block, contains:
- * Node - Method/Object Node to execute
+ * PARAMETERS: Info - Evaluation info block, contains:
+ * prefix_node - Prefix or Method/Object Node to execute
+ * Pathname - Name of method to execute, If NULL, the
+ * Node is the object to execute
* Parameters - List of parameters to pass to the method,
* terminated by NULL. Params itself may be
* NULL if no parameters are being passed.
@@ -248,29 +65,21 @@ acpi_ns_evaluate_by_name(char *pathname, struct acpi_parameter_info *info)
* parameter_type - Type of Parameter list
* return_object - Where to put method's return value (if
* any). If NULL, no value is returned.
+ * Flags - ACPI_IGNORE_RETURN_VALUE to delete return
*
* RETURN: Status
*
- * DESCRIPTION: Evaluate object or execute the requested method passing the
- * given parameters
+ * DESCRIPTION: Execute a control method or return the current value of an
+ * ACPI namespace object.
*
- * MUTEX: Locks Namespace
+ * MUTEX: Locks interpreter
*
******************************************************************************/
-
-acpi_status acpi_ns_evaluate_by_handle(struct acpi_parameter_info *info)
+acpi_status acpi_ns_evaluate(struct acpi_evaluate_info *info)
{
acpi_status status;
- ACPI_FUNCTION_TRACE("ns_evaluate_by_handle");
-
- /* Check if namespace has been initialized */
-
- if (!acpi_gbl_root_node) {
- return_ACPI_STATUS(AE_NO_NAMESPACE);
- }
-
- /* Parameter Validation */
+ ACPI_FUNCTION_TRACE(ns_evaluate);
if (!info) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
@@ -280,202 +89,120 @@ acpi_status acpi_ns_evaluate_by_handle(struct acpi_parameter_info *info)
info->return_object = NULL;
- /* Get the prefix handle and Node */
-
- status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ /*
+ * Get the actual namespace node for the target object. Handles these cases:
+ *
+ * 1) Null node, Pathname (absolute path)
+ * 2) Node, Pathname (path relative to Node)
+ * 3) Node, Null Pathname
+ */
+ status = acpi_ns_get_node(info->prefix_node, info->pathname,
+ ACPI_NS_NO_UPSEARCH, &info->resolved_node);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
- info->node = acpi_ns_map_handle_to_node(info->node);
- if (!info->node) {
- (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
- return_ACPI_STATUS(AE_BAD_PARAMETER);
- }
-
/*
* For a method alias, we must grab the actual method node so that proper
* scoping context will be established before execution.
*/
- if (acpi_ns_get_type(info->node) == ACPI_TYPE_LOCAL_METHOD_ALIAS) {
- info->node =
+ if (acpi_ns_get_type(info->resolved_node) ==
+ ACPI_TYPE_LOCAL_METHOD_ALIAS) {
+ info->resolved_node =
ACPI_CAST_PTR(struct acpi_namespace_node,
- info->node->object);
+ info->resolved_node->object);
}
+ ACPI_DEBUG_PRINT((ACPI_DB_NAMES, "%s [%p] Value %p\n", info->pathname,
+ info->resolved_node,
+ acpi_ns_get_attached_object(info->resolved_node)));
+
/*
* Two major cases here:
- * 1) The object is an actual control method -- execute it.
- * 2) The object is not a method -- just return it's current value
*
- * In both cases, the namespace is unlocked by the acpi_ns* procedure
+ * 1) The object is a control method -- execute it
+ * 2) The object is not a method -- just return it's current value
*/
- if (acpi_ns_get_type(info->node) == ACPI_TYPE_METHOD) {
- /*
- * Case 1) We have an actual control method to execute
- */
- status = acpi_ns_execute_control_method(info);
- } else {
+ if (acpi_ns_get_type(info->resolved_node) == ACPI_TYPE_METHOD) {
/*
- * Case 2) Object is NOT a method, just return its current value
+ * 1) Object is a control method - execute it
*/
- status = acpi_ns_get_object_value(info);
- }
-
- /*
- * Check if there is a return value on the stack that must be dealt with
- */
- if (status == AE_CTRL_RETURN_VALUE) {
- /* Map AE_CTRL_RETURN_VALUE to AE_OK, we are done with it */
-
- status = AE_OK;
- }
-
- /*
- * Namespace was unlocked by the handling acpi_ns* function, so we
- * just return
- */
- return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ns_execute_control_method
- *
- * PARAMETERS: Info - Method info block, contains:
- * Node - Method Node to execute
- * obj_desc - Method object
- * Parameters - List of parameters to pass to the method,
- * terminated by NULL. Params itself may be
- * NULL if no parameters are being passed.
- * return_object - Where to put method's return value (if
- * any). If NULL, no value is returned.
- * parameter_type - Type of Parameter list
- * return_object - Where to put method's return value (if
- * any). If NULL, no value is returned.
- *
- * RETURN: Status
- *
- * DESCRIPTION: Execute the requested method passing the given parameters
- *
- * MUTEX: Assumes namespace is locked
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ns_execute_control_method(struct acpi_parameter_info *info)
-{
- acpi_status status;
-
- ACPI_FUNCTION_TRACE("ns_execute_control_method");
-
- /* Verify that there is a method associated with this object */
-
- info->obj_desc = acpi_ns_get_attached_object(info->node);
- if (!info->obj_desc) {
- ACPI_ERROR((AE_INFO, "No attached method object"));
- (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
- return_ACPI_STATUS(AE_NULL_OBJECT);
- }
-
- ACPI_DUMP_PATHNAME(info->node, "Execute Method:",
- ACPI_LV_INFO, _COMPONENT);
-
- ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Method at AML address %p Length %X\n",
- info->obj_desc->method.aml_start + 1,
- info->obj_desc->method.aml_length - 1));
-
- /*
- * Unlock the namespace before execution. This allows namespace access
- * via the external Acpi* interfaces while a method is being executed.
- * However, any namespace deletion must acquire both the namespace and
- * interpreter locks to ensure that no thread is using the portion of the
- * namespace that is being deleted.
- */
- status = acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
+ /* Verify that there is a method object associated with this node */
- /*
- * Execute the method via the interpreter. The interpreter is locked
- * here before calling into the AML parser
- */
- status = acpi_ex_enter_interpreter();
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
+ info->obj_desc =
+ acpi_ns_get_attached_object(info->resolved_node);
+ if (!info->obj_desc) {
+ ACPI_ERROR((AE_INFO,
+ "Control method has no attached sub-object"));
+ return_ACPI_STATUS(AE_NULL_OBJECT);
+ }
- status = acpi_ps_execute_method(info);
- acpi_ex_exit_interpreter();
+ ACPI_DUMP_PATHNAME(info->resolved_node, "Execute Method:",
+ ACPI_LV_INFO, _COMPONENT);
- return_ACPI_STATUS(status);
-}
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "Method at AML address %p Length %X\n",
+ info->obj_desc->method.aml_start + 1,
+ info->obj_desc->method.aml_length - 1));
-/*******************************************************************************
- *
- * FUNCTION: acpi_ns_get_object_value
- *
- * PARAMETERS: Info - Method info block, contains:
- * Node - Object's NS node
- * return_object - Where to put object value (if
- * any). If NULL, no value is returned.
- *
- * RETURN: Status
- *
- * DESCRIPTION: Return the current value of the object
- *
- * MUTEX: Assumes namespace is locked, leaves namespace unlocked
- *
- ******************************************************************************/
+ /*
+ * Any namespace deletion must acquire both the namespace and
+ * interpreter locks to ensure that no thread is using the portion of
+ * the namespace that is being deleted.
+ *
+ * Execute the method via the interpreter. The interpreter is locked
+ * here before calling into the AML parser
+ */
+ status = acpi_ex_enter_interpreter();
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
-static acpi_status acpi_ns_get_object_value(struct acpi_parameter_info *info)
-{
- acpi_status status = AE_OK;
- struct acpi_namespace_node *resolved_node = info->node;
+ status = acpi_ps_execute_method(info);
+ acpi_ex_exit_interpreter();
+ } else {
+ /*
+ * 2) Object is not a method, return its current value
+ */
- ACPI_FUNCTION_TRACE("ns_get_object_value");
+ /*
+ * Objects require additional resolution steps (e.g., the Node may be
+ * a field that must be read, etc.) -- we can't just grab the object
+ * out of the node.
+ *
+ * Use resolve_node_to_value() to get the associated value.
+ *
+ * NOTE: we can get away with passing in NULL for a walk state because
+ * resolved_node is guaranteed to not be a reference to either a method
+ * local or a method argument (because this interface is never called
+ * from a running method.)
+ *
+ * Even though we do not directly invoke the interpreter for object
+ * resolution, we must lock it because we could access an opregion.
+ * The opregion access code assumes that the interpreter is locked.
+ */
+ status = acpi_ex_enter_interpreter();
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
- /*
- * Objects require additional resolution steps (e.g., the Node may be a
- * field that must be read, etc.) -- we can't just grab the object out of
- * the node.
- */
+ /* Function has a strange interface */
- /*
- * Use resolve_node_to_value() to get the associated value. This call always
- * deletes obj_desc (allocated above).
- *
- * NOTE: we can get away with passing in NULL for a walk state because
- * obj_desc is guaranteed to not be a reference to either a method local or
- * a method argument (because this interface can only be called from the
- * acpi_evaluate external interface, never called from a running method.)
- *
- * Even though we do not directly invoke the interpreter for this, we must
- * enter it because we could access an opregion. The opregion access code
- * assumes that the interpreter is locked.
- *
- * We must release the namespace lock before entering the intepreter.
- */
- status = acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
+ status =
+ acpi_ex_resolve_node_to_value(&info->resolved_node, NULL);
+ acpi_ex_exit_interpreter();
- status = acpi_ex_enter_interpreter();
- if (ACPI_SUCCESS(status)) {
- status = acpi_ex_resolve_node_to_value(&resolved_node, NULL);
/*
* If acpi_ex_resolve_node_to_value() succeeded, the return value was placed
* in resolved_node.
*/
- acpi_ex_exit_interpreter();
-
if (ACPI_SUCCESS(status)) {
status = AE_CTRL_RETURN_VALUE;
- info->return_object = ACPI_CAST_PTR
- (union acpi_operand_object, resolved_node);
+ info->return_object =
+ ACPI_CAST_PTR(union acpi_operand_object,
+ info->resolved_node);
+
ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
"Returning object %p [%s]\n",
info->return_object,
@@ -484,7 +211,30 @@ static acpi_status acpi_ns_get_object_value(struct acpi_parameter_info *info)
}
}
- /* Namespace is unlocked */
+ /*
+ * Check if there is a return value that must be dealt with
+ */
+ if (status == AE_CTRL_RETURN_VALUE) {
+
+ /* If caller does not want the return value, delete it */
+ if (info->flags & ACPI_IGNORE_RETURN_VALUE) {
+ acpi_ut_remove_reference(info->return_object);
+ info->return_object = NULL;
+ }
+
+ /* Map AE_CTRL_RETURN_VALUE to AE_OK, we are done with it */
+
+ status = AE_OK;
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
+ "*** Completed evaluation of object %s ***\n",
+ info->pathname));
+
+ /*
+ * Namespace was unlocked by the handling acpi_ns* function, so we
+ * just return
+ */
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/namespace/nsinit.c b/drivers/acpi/namespace/nsinit.c
index 9f929e479fd8..aec8488c0019 100644
--- a/drivers/acpi/namespace/nsinit.c
+++ b/drivers/acpi/namespace/nsinit.c
@@ -58,6 +58,10 @@ static acpi_status
acpi_ns_init_one_device(acpi_handle obj_handle,
u32 nesting_level, void *context, void **return_value);
+static acpi_status
+acpi_ns_find_ini_methods(acpi_handle obj_handle,
+ u32 nesting_level, void *context, void **return_value);
+
/*******************************************************************************
*
* FUNCTION: acpi_ns_initialize_objects
@@ -76,7 +80,7 @@ acpi_status acpi_ns_initialize_objects(void)
acpi_status status;
struct acpi_init_walk_info info;
- ACPI_FUNCTION_TRACE("ns_initialize_objects");
+ ACPI_FUNCTION_TRACE(ns_initialize_objects);
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"**** Starting initialization of namespace objects ****\n"));
@@ -93,7 +97,7 @@ acpi_status acpi_ns_initialize_objects(void)
ACPI_UINT32_MAX, acpi_ns_init_one_object,
&info, NULL);
if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status, "During walk_namespace"));
+ ACPI_EXCEPTION((AE_INFO, status, "During WalkNamespace"));
}
ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
@@ -133,7 +137,7 @@ acpi_status acpi_ns_initialize_devices(void)
acpi_status status;
struct acpi_device_walk_info info;
- ACPI_FUNCTION_TRACE("ns_initialize_devices");
+ ACPI_FUNCTION_TRACE(ns_initialize_devices);
/* Init counters */
@@ -142,30 +146,46 @@ acpi_status acpi_ns_initialize_devices(void)
info.num_INI = 0;
ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
- "Executing all Device _STA and_INI methods:"));
+ "Initializing Device/Processor/Thermal objects by executing _INI methods:"));
+
+ /* Tree analysis: find all subtrees that contain _INI methods */
- status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ status = acpi_ns_walk_namespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT,
+ ACPI_UINT32_MAX, FALSE,
+ acpi_ns_find_ini_methods, &info, NULL);
if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
+ goto error_exit;
}
- /* Walk namespace for all objects */
+ /* Allocate the evaluation information block */
+
+ info.evaluate_info =
+ ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
+ if (!info.evaluate_info) {
+ status = AE_NO_MEMORY;
+ goto error_exit;
+ }
+
+ /* Walk namespace to execute all _INIs on present devices */
status = acpi_ns_walk_namespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT,
- ACPI_UINT32_MAX, TRUE,
+ ACPI_UINT32_MAX, FALSE,
acpi_ns_init_one_device, &info, NULL);
- (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
-
+ ACPI_FREE(info.evaluate_info);
if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status, "During walk_namespace"));
+ goto error_exit;
}
ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
- "\n%hd Devices found - executed %hd _STA, %hd _INI methods\n",
- info.device_count, info.num_STA, info.num_INI));
+ "\nExecuted %hd _INI methods requiring %hd _STA executions (examined %hd objects)\n",
+ info.num_INI, info.num_STA, info.device_count));
return_ACPI_STATUS(status);
+
+ error_exit:
+ ACPI_EXCEPTION((AE_INFO, status, "During device initialization"));
+ return_ACPI_STATUS(status);
}
/*******************************************************************************
@@ -200,7 +220,7 @@ acpi_ns_init_one_object(acpi_handle obj_handle,
(struct acpi_namespace_node *)obj_handle;
union acpi_operand_object *obj_desc;
- ACPI_FUNCTION_NAME("ns_init_one_object");
+ ACPI_FUNCTION_NAME(ns_init_one_object);
info->object_count++;
@@ -311,6 +331,72 @@ acpi_ns_init_one_object(acpi_handle obj_handle,
/*******************************************************************************
*
+ * FUNCTION: acpi_ns_find_ini_methods
+ *
+ * PARAMETERS: acpi_walk_callback
+ *
+ * RETURN: acpi_status
+ *
+ * DESCRIPTION: Called during namespace walk. Finds objects named _INI under
+ * device/processor/thermal objects, and marks the entire subtree
+ * with a SUBTREE_HAS_INI flag. This flag is used during the
+ * subsequent device initialization walk to avoid entire subtrees
+ * that do not contain an _INI.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ns_find_ini_methods(acpi_handle obj_handle,
+ u32 nesting_level, void *context, void **return_value)
+{
+ struct acpi_device_walk_info *info =
+ ACPI_CAST_PTR(struct acpi_device_walk_info, context);
+ struct acpi_namespace_node *node;
+ struct acpi_namespace_node *parent_node;
+
+ /* Keep count of device/processor/thermal objects */
+
+ node = ACPI_CAST_PTR(struct acpi_namespace_node, obj_handle);
+ if ((node->type == ACPI_TYPE_DEVICE) ||
+ (node->type == ACPI_TYPE_PROCESSOR) ||
+ (node->type == ACPI_TYPE_THERMAL)) {
+ info->device_count++;
+ return (AE_OK);
+ }
+
+ /* We are only looking for methods named _INI */
+
+ if (!ACPI_COMPARE_NAME(node->name.ascii, METHOD_NAME__INI)) {
+ return (AE_OK);
+ }
+
+ /*
+ * The only _INI methods that we care about are those that are
+ * present under Device, Processor, and Thermal objects.
+ */
+ parent_node = acpi_ns_get_parent_node(node);
+ switch (parent_node->type) {
+ case ACPI_TYPE_DEVICE:
+ case ACPI_TYPE_PROCESSOR:
+ case ACPI_TYPE_THERMAL:
+
+ /* Mark parent and bubble up the INI present flag to the root */
+
+ while (parent_node) {
+ parent_node->flags |= ANOBJ_SUBTREE_HAS_INI;
+ parent_node = acpi_ns_get_parent_node(parent_node);
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
* FUNCTION: acpi_ns_init_one_device
*
* PARAMETERS: acpi_walk_callback
@@ -327,119 +413,165 @@ static acpi_status
acpi_ns_init_one_device(acpi_handle obj_handle,
u32 nesting_level, void *context, void **return_value)
{
- struct acpi_device_walk_info *info =
- (struct acpi_device_walk_info *)context;
- struct acpi_parameter_info pinfo;
+ struct acpi_device_walk_info *walk_info =
+ ACPI_CAST_PTR(struct acpi_device_walk_info, context);
+ struct acpi_evaluate_info *info = walk_info->evaluate_info;
u32 flags;
acpi_status status;
- struct acpi_namespace_node *ini_node;
struct acpi_namespace_node *device_node;
- ACPI_FUNCTION_TRACE("ns_init_one_device");
+ ACPI_FUNCTION_TRACE(ns_init_one_device);
- device_node = acpi_ns_map_handle_to_node(obj_handle);
- if (!device_node) {
- return_ACPI_STATUS(AE_BAD_PARAMETER);
- }
+ /* We are interested in Devices, Processors and thermal_zones only */
- /*
- * We will run _STA/_INI on Devices, Processors and thermal_zones only
- */
+ device_node = ACPI_CAST_PTR(struct acpi_namespace_node, obj_handle);
if ((device_node->type != ACPI_TYPE_DEVICE) &&
(device_node->type != ACPI_TYPE_PROCESSOR) &&
(device_node->type != ACPI_TYPE_THERMAL)) {
return_ACPI_STATUS(AE_OK);
}
- if ((acpi_dbg_level <= ACPI_LV_ALL_EXCEPTIONS) &&
- (!(acpi_dbg_level & ACPI_LV_INFO))) {
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, "."));
- }
-
- info->device_count++;
-
/*
- * Check if the _INI method exists for this device -
- * if _INI does not exist, there is no need to run _STA
- * No _INI means device requires no initialization
+ * Because of an earlier namespace analysis, all subtrees that contain an
+ * _INI method are tagged.
+ *
+ * If this device subtree does not contain any _INI methods, we
+ * can exit now and stop traversing this entire subtree.
*/
- status = acpi_ns_search_node(*ACPI_CAST_PTR(u32, METHOD_NAME__INI),
- device_node, ACPI_TYPE_METHOD, &ini_node);
- if (ACPI_FAILURE(status)) {
- /* No _INI method found - move on to next device */
-
- return_ACPI_STATUS(AE_OK);
+ if (!(device_node->flags & ANOBJ_SUBTREE_HAS_INI)) {
+ return_ACPI_STATUS(AE_CTRL_DEPTH);
}
/*
- * Run _STA to determine if we can run _INI on the device -
- * the device must be present before _INI can be run.
- * However, _STA is not required - assume device present if no _STA
+ * Run _STA to determine if this device is present and functioning. We
+ * must know this information for two important reasons (from ACPI spec):
+ *
+ * 1) We can only run _INI if the device is present.
+ * 2) We must abort the device tree walk on this subtree if the device is
+ * not present and is not functional (we will not examine the children)
+ *
+ * The _STA method is not required to be present under the device, we
+ * assume the device is present if _STA does not exist.
*/
- ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname(ACPI_TYPE_METHOD,
- device_node,
- METHOD_NAME__STA));
-
- pinfo.node = device_node;
- pinfo.parameters = NULL;
- pinfo.parameter_type = ACPI_PARAM_ARGS;
+ ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname
+ (ACPI_TYPE_METHOD, device_node, METHOD_NAME__STA));
- status = acpi_ut_execute_STA(pinfo.node, &flags);
+ status = acpi_ut_execute_STA(device_node, &flags);
if (ACPI_FAILURE(status)) {
+
/* Ignore error and move on to next device */
return_ACPI_STATUS(AE_OK);
}
+ /*
+ * Flags == -1 means that _STA was not found. In this case, we assume that
+ * the device is both present and functional.
+ *
+ * From the ACPI spec, description of _STA:
+ *
+ * "If a device object (including the processor object) does not have an
+ * _STA object, then OSPM assumes that all of the above bits are set (in
+ * other words, the device is present, ..., and functioning)"
+ */
if (flags != ACPI_UINT32_MAX) {
- info->num_STA++;
+ walk_info->num_STA++;
}
+ /*
+ * Examine the PRESENT and FUNCTIONING status bits
+ *
+ * Note: ACPI spec does not seem to specify behavior for the present but
+ * not functioning case, so we assume functioning if present.
+ */
if (!(flags & ACPI_STA_DEVICE_PRESENT)) {
- /* Don't look at children of a not present device */
- return_ACPI_STATUS(AE_CTRL_DEPTH);
+ /* Device is not present, we must examine the Functioning bit */
+
+ if (flags & ACPI_STA_DEVICE_FUNCTIONING) {
+ /*
+ * Device is not present but is "functioning". In this case,
+ * we will not run _INI, but we continue to examine the children
+ * of this device.
+ *
+ * From the ACPI spec, description of _STA: (Note - no mention
+ * of whether to run _INI or not on the device in question)
+ *
+ * "_STA may return bit 0 clear (not present) with bit 3 set
+ * (device is functional). This case is used to indicate a valid
+ * device for which no device driver should be loaded (for example,
+ * a bridge device.) Children of this device may be present and
+ * valid. OSPM should continue enumeration below a device whose
+ * _STA returns this bit combination"
+ */
+ return_ACPI_STATUS(AE_OK);
+ } else {
+ /*
+ * Device is not present and is not functioning. We must abort the
+ * walk of this subtree immediately -- don't look at the children
+ * of such a device.
+ *
+ * From the ACPI spec, description of _INI:
+ *
+ * "If the _STA method indicates that the device is not present,
+ * OSPM will not run the _INI and will not examine the children
+ * of the device for _INI methods"
+ */
+ return_ACPI_STATUS(AE_CTRL_DEPTH);
+ }
}
/*
- * The device is present and _INI exists. Run the _INI method.
- * (We already have the _INI node from above)
+ * The device is present or is assumed present if no _STA exists.
+ * Run the _INI if it exists (not required to exist)
+ *
+ * Note: We know there is an _INI within this subtree, but it may not be
+ * under this particular device, it may be lower in the branch.
*/
- ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname(ACPI_TYPE_METHOD,
- pinfo.node,
- METHOD_NAME__INI));
+ ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname
+ (ACPI_TYPE_METHOD, device_node, METHOD_NAME__INI));
+
+ info->prefix_node = device_node;
+ info->pathname = METHOD_NAME__INI;
+ info->parameters = NULL;
+ info->parameter_type = ACPI_PARAM_ARGS;
+ info->flags = ACPI_IGNORE_RETURN_VALUE;
+
+ status = acpi_ns_evaluate(info);
+ if (ACPI_SUCCESS(status)) {
+ walk_info->num_INI++;
+
+ if ((acpi_dbg_level <= ACPI_LV_ALL_EXCEPTIONS) &&
+ (!(acpi_dbg_level & ACPI_LV_INFO))) {
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, "."));
+ }
+ }
+#ifdef ACPI_DEBUG_OUTPUT
+ else if (status != AE_NOT_FOUND) {
- pinfo.node = ini_node;
- status = acpi_ns_evaluate_by_handle(&pinfo);
- if (ACPI_FAILURE(status)) {
/* Ignore error and move on to next device */
-#ifdef ACPI_DEBUG_OUTPUT
- char *scope_name = acpi_ns_get_external_pathname(ini_node);
-
- ACPI_WARNING((AE_INFO, "%s._INI failed: %s",
- scope_name, acpi_format_exception(status)));
+ char *scope_name =
+ acpi_ns_get_external_pathname(info->resolved_node);
- ACPI_MEM_FREE(scope_name);
+ ACPI_EXCEPTION((AE_INFO, status, "during %s._INI execution",
+ scope_name));
+ ACPI_FREE(scope_name);
+ }
#endif
- } else {
- /* Delete any return object (especially if implicit_return is enabled) */
- if (pinfo.return_object) {
- acpi_ut_remove_reference(pinfo.return_object);
- }
+ /* Ignore errors from above */
- /* Count of successful INIs */
-
- info->num_INI++;
- }
+ status = AE_OK;
+ /*
+ * The _INI method has been run if present; call the Global Initialization
+ * Handler for this device.
+ */
if (acpi_gbl_init_handler) {
- /* External initialization handler is present, call it */
-
status =
- acpi_gbl_init_handler(pinfo.node, ACPI_INIT_DEVICE_INI);
+ acpi_gbl_init_handler(device_node, ACPI_INIT_DEVICE_INI);
}
- return_ACPI_STATUS(AE_OK);
+ return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/namespace/nsload.c b/drivers/acpi/namespace/nsload.c
index 4e0b0524c188..fe75d888e183 100644
--- a/drivers/acpi/namespace/nsload.c
+++ b/drivers/acpi/namespace/nsload.c
@@ -77,13 +77,14 @@ acpi_ns_load_table(struct acpi_table_desc *table_desc,
{
acpi_status status;
- ACPI_FUNCTION_TRACE("ns_load_table");
+ ACPI_FUNCTION_TRACE(ns_load_table);
/* Check if table contains valid AML (must be DSDT, PSDT, SSDT, etc.) */
if (!
(acpi_gbl_table_data[table_desc->type].
flags & ACPI_TABLE_EXECUTABLE)) {
+
/* Just ignore this table */
return_ACPI_STATUS(AE_OK);
@@ -168,7 +169,7 @@ static acpi_status acpi_ns_load_table_by_type(acpi_table_type table_type)
acpi_status status;
struct acpi_table_desc *table_desc;
- ACPI_FUNCTION_TRACE("ns_load_table_by_type");
+ ACPI_FUNCTION_TRACE(ns_load_table_by_type);
status = acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
if (ACPI_FAILURE(status)) {
@@ -180,11 +181,11 @@ static acpi_status acpi_ns_load_table_by_type(acpi_table_type table_type)
* DSDT (one), SSDT/PSDT (multiple)
*/
switch (table_type) {
- case ACPI_TABLE_DSDT:
+ case ACPI_TABLE_ID_DSDT:
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Namespace load: DSDT\n"));
- table_desc = acpi_gbl_table_lists[ACPI_TABLE_DSDT].next;
+ table_desc = acpi_gbl_table_lists[ACPI_TABLE_ID_DSDT].next;
/* If table already loaded into namespace, just return */
@@ -200,8 +201,8 @@ static acpi_status acpi_ns_load_table_by_type(acpi_table_type table_type)
}
break;
- case ACPI_TABLE_SSDT:
- case ACPI_TABLE_PSDT:
+ case ACPI_TABLE_ID_SSDT:
+ case ACPI_TABLE_ID_PSDT:
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Namespace load: %d SSDT or PSDTs\n",
@@ -258,7 +259,7 @@ acpi_status acpi_ns_load_namespace(void)
{
acpi_status status;
- ACPI_FUNCTION_TRACE("acpi_load_name_space");
+ ACPI_FUNCTION_TRACE(acpi_load_name_space);
/* There must be at least a DSDT installed */
@@ -271,15 +272,15 @@ acpi_status acpi_ns_load_namespace(void)
* Load the namespace. The DSDT is required,
* but the SSDT and PSDT tables are optional.
*/
- status = acpi_ns_load_table_by_type(ACPI_TABLE_DSDT);
+ status = acpi_ns_load_table_by_type(ACPI_TABLE_ID_DSDT);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
/* Ignore exceptions from these */
- (void)acpi_ns_load_table_by_type(ACPI_TABLE_SSDT);
- (void)acpi_ns_load_table_by_type(ACPI_TABLE_PSDT);
+ (void)acpi_ns_load_table_by_type(ACPI_TABLE_ID_SSDT);
+ (void)acpi_ns_load_table_by_type(ACPI_TABLE_ID_PSDT);
ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
"ACPI Namespace successfully loaded at root %p\n",
@@ -314,7 +315,7 @@ static acpi_status acpi_ns_delete_subtree(acpi_handle start_handle)
acpi_handle dummy;
u32 level;
- ACPI_FUNCTION_TRACE("ns_delete_subtree");
+ ACPI_FUNCTION_TRACE(ns_delete_subtree);
parent_handle = start_handle;
child_handle = NULL;
@@ -325,6 +326,7 @@ static acpi_status acpi_ns_delete_subtree(acpi_handle start_handle)
* to where we started.
*/
while (level > 0) {
+
/* Attempt to get the next object in this scope */
status = acpi_get_next_object(ACPI_TYPE_ANY, parent_handle,
@@ -335,6 +337,7 @@ static acpi_status acpi_ns_delete_subtree(acpi_handle start_handle)
/* Did we get a new object? */
if (ACPI_SUCCESS(status)) {
+
/* Check if this object has any children */
if (ACPI_SUCCESS
@@ -392,7 +395,7 @@ acpi_status acpi_ns_unload_namespace(acpi_handle handle)
{
acpi_status status;
- ACPI_FUNCTION_TRACE("ns_unload_name_space");
+ ACPI_FUNCTION_TRACE(ns_unload_name_space);
/* Parameter validation */
diff --git a/drivers/acpi/namespace/nsnames.c b/drivers/acpi/namespace/nsnames.c
index 639f653b4b6b..97b8332c9746 100644
--- a/drivers/acpi/namespace/nsnames.c
+++ b/drivers/acpi/namespace/nsnames.c
@@ -48,11 +48,6 @@
#define _COMPONENT ACPI_NAMESPACE
ACPI_MODULE_NAME("nsnames")
-/* Local prototypes */
-static void
-acpi_ns_build_external_path(struct acpi_namespace_node *node,
- acpi_size size, char *name_buffer);
-
/*******************************************************************************
*
* FUNCTION: acpi_ns_build_external_path
@@ -67,8 +62,7 @@ acpi_ns_build_external_path(struct acpi_namespace_node *node,
* DESCRIPTION: Generate a full pathaname
*
******************************************************************************/
-
-static void
+void
acpi_ns_build_external_path(struct acpi_namespace_node *node,
acpi_size size, char *name_buffer)
{
@@ -138,7 +132,7 @@ char *acpi_ns_get_external_pathname(struct acpi_namespace_node *node)
char *name_buffer;
acpi_size size;
- ACPI_FUNCTION_TRACE_PTR("ns_get_external_pathname", node);
+ ACPI_FUNCTION_TRACE_PTR(ns_get_external_pathname, node);
/* Calculate required buffer size based on depth below root */
@@ -146,7 +140,7 @@ char *acpi_ns_get_external_pathname(struct acpi_namespace_node *node)
/* Allocate a buffer to be returned to caller */
- name_buffer = ACPI_MEM_CALLOCATE(size);
+ name_buffer = ACPI_ALLOCATE_ZEROED(size);
if (!name_buffer) {
ACPI_ERROR((AE_INFO, "Allocation failure"));
return_PTR(NULL);
@@ -219,7 +213,7 @@ acpi_ns_handle_to_pathname(acpi_handle target_handle,
struct acpi_namespace_node *node;
acpi_size required_size;
- ACPI_FUNCTION_TRACE_PTR("ns_handle_to_pathname", target_handle);
+ ACPI_FUNCTION_TRACE_PTR(ns_handle_to_pathname, target_handle);
node = acpi_ns_map_handle_to_node(target_handle);
if (!node) {
diff --git a/drivers/acpi/namespace/nsobject.c b/drivers/acpi/namespace/nsobject.c
index 10ae6292bca4..aabe8794b908 100644
--- a/drivers/acpi/namespace/nsobject.c
+++ b/drivers/acpi/namespace/nsobject.c
@@ -76,19 +76,21 @@ acpi_ns_attach_object(struct acpi_namespace_node *node,
union acpi_operand_object *last_obj_desc;
acpi_object_type object_type = ACPI_TYPE_ANY;
- ACPI_FUNCTION_TRACE("ns_attach_object");
+ ACPI_FUNCTION_TRACE(ns_attach_object);
/*
* Parameter validation
*/
if (!node) {
+
/* Invalid handle */
- ACPI_ERROR((AE_INFO, "Null named_obj handle"));
+ ACPI_ERROR((AE_INFO, "Null NamedObj handle"));
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
if (!object && (ACPI_TYPE_ANY != type)) {
+
/* Null object */
ACPI_ERROR((AE_INFO,
@@ -97,6 +99,7 @@ acpi_ns_attach_object(struct acpi_namespace_node *node,
}
if (ACPI_GET_DESCRIPTOR_TYPE(node) != ACPI_DESC_TYPE_NAMED) {
+
/* Not a name handle */
ACPI_ERROR((AE_INFO, "Invalid handle %p [%s]",
@@ -108,7 +111,7 @@ acpi_ns_attach_object(struct acpi_namespace_node *node,
if (node->object == object) {
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "Obj %p already installed in name_obj %p\n",
+ "Obj %p already installed in NameObj %p\n",
object, node));
return_ACPI_STATUS(AE_OK);
@@ -201,7 +204,7 @@ void acpi_ns_detach_object(struct acpi_namespace_node *node)
{
union acpi_operand_object *obj_desc;
- ACPI_FUNCTION_TRACE("ns_detach_object");
+ ACPI_FUNCTION_TRACE(ns_detach_object);
obj_desc = node->object;
@@ -252,7 +255,7 @@ union acpi_operand_object *acpi_ns_get_attached_object(struct
acpi_namespace_node
*node)
{
- ACPI_FUNCTION_TRACE_PTR("ns_get_attached_object", node);
+ ACPI_FUNCTION_TRACE_PTR(ns_get_attached_object, node);
if (!node) {
ACPI_WARNING((AE_INFO, "Null Node ptr"));
@@ -287,7 +290,7 @@ union acpi_operand_object *acpi_ns_get_secondary_object(union
acpi_operand_object
*obj_desc)
{
- ACPI_FUNCTION_TRACE_PTR("ns_get_secondary_object", obj_desc);
+ ACPI_FUNCTION_TRACE_PTR(ns_get_secondary_object, obj_desc);
if ((!obj_desc) ||
(ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_LOCAL_DATA) ||
diff --git a/drivers/acpi/namespace/nsparse.c b/drivers/acpi/namespace/nsparse.c
index 232be4303653..155505a4ef69 100644
--- a/drivers/acpi/namespace/nsparse.c
+++ b/drivers/acpi/namespace/nsparse.c
@@ -62,13 +62,13 @@ ACPI_MODULE_NAME("nsparse")
*
******************************************************************************/
acpi_status
-acpi_ns_one_complete_parse(u8 pass_number, struct acpi_table_desc * table_desc)
+acpi_ns_one_complete_parse(u8 pass_number, struct acpi_table_desc *table_desc)
{
union acpi_parse_object *parse_root;
acpi_status status;
struct acpi_walk_state *walk_state;
- ACPI_FUNCTION_TRACE("ns_one_complete_parse");
+ ACPI_FUNCTION_TRACE(ns_one_complete_parse);
/* Create and init a Root Node */
@@ -124,7 +124,7 @@ acpi_ns_parse_table(struct acpi_table_desc *table_desc,
{
acpi_status status;
- ACPI_FUNCTION_TRACE("ns_parse_table");
+ ACPI_FUNCTION_TRACE(ns_parse_table);
/*
* AML Parse, pass 1
diff --git a/drivers/acpi/namespace/nssearch.c b/drivers/acpi/namespace/nssearch.c
index d64b78952f24..500e2bbcfaf7 100644
--- a/drivers/acpi/namespace/nssearch.c
+++ b/drivers/acpi/namespace/nssearch.c
@@ -56,16 +56,16 @@ acpi_ns_search_parent_tree(u32 target_name,
/*******************************************************************************
*
- * FUNCTION: acpi_ns_search_node
+ * FUNCTION: acpi_ns_search_one_scope
*
* PARAMETERS: target_name - Ascii ACPI name to search for
- * Node - Starting node where search will begin
+ * parent_node - Starting node where search will begin
* Type - Object type to match
* return_node - Where the matched Named obj is returned
*
* RETURN: Status
*
- * DESCRIPTION: Search a single level of the namespace. Performs a
+ * DESCRIPTION: Search a single level of the namespace. Performs a
* simple search of the specified level, and does not add
* entries or search parents.
*
@@ -75,35 +75,40 @@ acpi_ns_search_parent_tree(u32 target_name,
*
* All namespace searching is linear in this implementation, but
* could be easily modified to support any improved search
- * algorithm. However, the linear search was chosen for simplicity
+ * algorithm. However, the linear search was chosen for simplicity
* and because the trees are small and the other interpreter
* execution overhead is relatively high.
*
+ * Note: CPU execution analysis has shown that the AML interpreter spends
+ * a very small percentage of its time searching the namespace. Therefore,
+ * the linear search seems to be sufficient, as there would seem to be
+ * little value in improving the search.
+ *
******************************************************************************/
acpi_status
-acpi_ns_search_node(u32 target_name,
- struct acpi_namespace_node *node,
- acpi_object_type type,
- struct acpi_namespace_node **return_node)
+acpi_ns_search_one_scope(u32 target_name,
+ struct acpi_namespace_node *parent_node,
+ acpi_object_type type,
+ struct acpi_namespace_node **return_node)
{
- struct acpi_namespace_node *next_node;
+ struct acpi_namespace_node *node;
- ACPI_FUNCTION_TRACE("ns_search_node");
+ ACPI_FUNCTION_TRACE(ns_search_one_scope);
#ifdef ACPI_DEBUG_OUTPUT
if (ACPI_LV_NAMES & acpi_dbg_level) {
char *scope_name;
- scope_name = acpi_ns_get_external_pathname(node);
+ scope_name = acpi_ns_get_external_pathname(parent_node);
if (scope_name) {
ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
"Searching %s (%p) For [%4.4s] (%s)\n",
- scope_name, node, ACPI_CAST_PTR(char,
- &target_name),
+ scope_name, parent_node,
+ ACPI_CAST_PTR(char, &target_name),
acpi_ut_get_type_name(type)));
- ACPI_MEM_FREE(scope_name);
+ ACPI_FREE(scope_name);
}
}
#endif
@@ -112,32 +117,33 @@ acpi_ns_search_node(u32 target_name,
* Search for name at this namespace level, which is to say that we
* must search for the name among the children of this object
*/
- next_node = node->child;
- while (next_node) {
+ node = parent_node->child;
+ while (node) {
+
/* Check for match against the name */
- if (next_node->name.integer == target_name) {
+ if (node->name.integer == target_name) {
+
/* Resolve a control method alias if any */
- if (acpi_ns_get_type(next_node) ==
+ if (acpi_ns_get_type(node) ==
ACPI_TYPE_LOCAL_METHOD_ALIAS) {
- next_node =
+ node =
ACPI_CAST_PTR(struct acpi_namespace_node,
- next_node->object);
+ node->object);
}
- /*
- * Found matching entry.
- */
+ /* Found matching entry */
+
ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
"Name [%4.4s] (%s) %p found in scope [%4.4s] %p\n",
ACPI_CAST_PTR(char, &target_name),
- acpi_ut_get_type_name(next_node->
- type),
- next_node,
- acpi_ut_get_node_name(node), node));
+ acpi_ut_get_type_name(node->type),
+ node,
+ acpi_ut_get_node_name(parent_node),
+ parent_node));
- *return_node = next_node;
+ *return_node = node;
return_ACPI_STATUS(AE_OK);
}
@@ -145,7 +151,8 @@ acpi_ns_search_node(u32 target_name,
* The last entry in the list points back to the parent,
* so a flag is used to indicate the end-of-list
*/
- if (next_node->flags & ANOBJ_END_OF_PEER_LIST) {
+ if (node->flags & ANOBJ_END_OF_PEER_LIST) {
+
/* Searched entire list, we are done */
break;
@@ -153,7 +160,7 @@ acpi_ns_search_node(u32 target_name,
/* Didn't match name, move on to the next peer object */
- next_node = next_node->peer;
+ node = node->peer;
}
/* Searched entire namespace level, not found */
@@ -162,7 +169,8 @@ acpi_ns_search_node(u32 target_name,
"Name [%4.4s] (%s) not found in search in scope [%4.4s] %p first child %p\n",
ACPI_CAST_PTR(char, &target_name),
acpi_ut_get_type_name(type),
- acpi_ut_get_node_name(node), node, node->child));
+ acpi_ut_get_node_name(parent_node), parent_node,
+ parent_node->child));
return_ACPI_STATUS(AE_NOT_FOUND);
}
@@ -179,14 +187,14 @@ acpi_ns_search_node(u32 target_name,
* RETURN: Status
*
* DESCRIPTION: Called when a name has not been found in the current namespace
- * level. Before adding it or giving up, ACPI scope rules require
+ * level. Before adding it or giving up, ACPI scope rules require
* searching enclosing scopes in cases identified by acpi_ns_local().
*
* "A name is located by finding the matching name in the current
* name space, and then in the parent name space. If the parent
* name space does not contain the name, the search continues
* recursively until either the name is found or the name space
- * does not have a parent (the root of the name space). This
+ * does not have a parent (the root of the name space). This
* indicates that the name is not found" (From ACPI Specification,
* section 5.3)
*
@@ -201,7 +209,7 @@ acpi_ns_search_parent_tree(u32 target_name,
acpi_status status;
struct acpi_namespace_node *parent_node;
- ACPI_FUNCTION_TRACE("ns_search_parent_tree");
+ ACPI_FUNCTION_TRACE(ns_search_parent_tree);
parent_node = acpi_ns_get_parent_node(node);
@@ -235,20 +243,19 @@ acpi_ns_search_parent_tree(u32 target_name,
*/
while (parent_node) {
/*
- * Search parent scope. Use TYPE_ANY because we don't care about the
+ * Search parent scope. Use TYPE_ANY because we don't care about the
* object type at this point, we only care about the existence of
- * the actual name we are searching for. Typechecking comes later.
+ * the actual name we are searching for. Typechecking comes later.
*/
- status = acpi_ns_search_node(target_name, parent_node,
+ status =
+ acpi_ns_search_one_scope(target_name, parent_node,
ACPI_TYPE_ANY, return_node);
if (ACPI_SUCCESS(status)) {
return_ACPI_STATUS(status);
}
- /*
- * Not found here, go up another level
- * (until we reach the root)
- */
+ /* Not found here, go up another level (until we reach the root) */
+
parent_node = acpi_ns_get_parent_node(parent_node);
}
@@ -273,7 +280,7 @@ acpi_ns_search_parent_tree(u32 target_name,
* RETURN: Status
*
* DESCRIPTION: Search for a name segment in a single namespace level,
- * optionally adding it if it is not found. If the passed
+ * optionally adding it if it is not found. If the passed
* Type is not Any and the type previously stored in the
* entry was Any (i.e. unknown), update the stored type.
*
@@ -293,29 +300,46 @@ acpi_ns_search_and_enter(u32 target_name,
acpi_status status;
struct acpi_namespace_node *new_node;
- ACPI_FUNCTION_TRACE("ns_search_and_enter");
+ ACPI_FUNCTION_TRACE(ns_search_and_enter);
/* Parameter validation */
if (!node || !target_name || !return_node) {
ACPI_ERROR((AE_INFO,
- "Null param: Node %p Name %X return_node %p",
+ "Null parameter: Node %p Name %X ReturnNode %p",
node, target_name, return_node));
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
- /* Name must consist of printable characters */
-
+ /*
+ * Name must consist of valid ACPI characters. We will repair the name if
+ * necessary because we don't want to abort because of this, but we want
+ * all namespace names to be printable. A warning message is appropriate.
+ *
+ * This issue came up because there are in fact machines that exhibit
+ * this problem, and we want to be able to enable ACPI support for them,
+ * even though there are a few bad names.
+ */
if (!acpi_ut_valid_acpi_name(target_name)) {
- ACPI_ERROR((AE_INFO, "Bad character in ACPI Name: %X",
- target_name));
- return_ACPI_STATUS(AE_BAD_CHARACTER);
+ target_name = acpi_ut_repair_name(target_name);
+
+ /* Report warning only if in strict mode or debug mode */
+
+ if (!acpi_gbl_enable_interpreter_slack) {
+ ACPI_WARNING((AE_INFO,
+ "Found bad character(s) in name, repaired: [%4.4s]\n",
+ ACPI_CAST_PTR(char, &target_name)));
+ } else {
+ ACPI_DEBUG_PRINT((ACPI_DB_WARN,
+ "Found bad character(s) in name, repaired: [%4.4s]\n",
+ ACPI_CAST_PTR(char, &target_name)));
+ }
}
/* Try to find the name in the namespace level specified by the caller */
*return_node = ACPI_ENTRY_NOT_FOUND;
- status = acpi_ns_search_node(target_name, node, type, return_node);
+ status = acpi_ns_search_one_scope(target_name, node, type, return_node);
if (status != AE_NOT_FOUND) {
/*
* If we found it AND the request specifies that a find is an error,
@@ -325,18 +349,16 @@ acpi_ns_search_and_enter(u32 target_name,
status = AE_ALREADY_EXISTS;
}
- /*
- * Either found it or there was an error
- * -- finished either way
- */
+ /* Either found it or there was an error: finished either way */
+
return_ACPI_STATUS(status);
}
/*
- * The name was not found. If we are NOT performing the first pass
+ * The name was not found. If we are NOT performing the first pass
* (name entry) of loading the namespace, search the parent tree (all the
* way to the root if necessary.) We don't want to perform the parent
- * search when the namespace is actually being loaded. We want to perform
+ * search when the namespace is actually being loaded. We want to perform
* the search when namespace references are being resolved (load pass 2)
* and during the execution phase.
*/
@@ -354,9 +376,8 @@ acpi_ns_search_and_enter(u32 target_name,
}
}
- /*
- * In execute mode, just search, never add names. Exit now.
- */
+ /* In execute mode, just search, never add names. Exit now */
+
if (interpreter_mode == ACPI_IMODE_EXECUTE) {
ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
"%4.4s Not found in %p [Not adding]\n",
@@ -371,11 +392,18 @@ acpi_ns_search_and_enter(u32 target_name,
if (!new_node) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
+#ifdef ACPI_ASL_COMPILER
+ /*
+ * Node is an object defined by an External() statement
+ */
+ if (flags & ACPI_NS_EXTERNAL) {
+ new_node->flags |= ANOBJ_IS_EXTERNAL;
+ }
+#endif
/* Install the new object into the parent's list of children */
acpi_ns_install_node(walk_state, node, new_node, type);
*return_node = new_node;
-
return_ACPI_STATUS(AE_OK);
}
diff --git a/drivers/acpi/namespace/nsutils.c b/drivers/acpi/namespace/nsutils.c
index 3e7cad549a38..aa4e799d9a8c 100644
--- a/drivers/acpi/namespace/nsutils.c
+++ b/drivers/acpi/namespace/nsutils.c
@@ -78,15 +78,17 @@ acpi_ns_report_error(char *module_name,
char *internal_name, acpi_status lookup_status)
{
acpi_status status;
+ u32 bad_name;
char *name = NULL;
- acpi_ut_report_error(module_name, line_number);
+ acpi_os_printf("ACPI Error (%s-%04d): ", module_name, line_number);
if (lookup_status == AE_BAD_CHARACTER) {
+
/* There is a non-ascii character in the name */
- acpi_os_printf("[0x%4.4X] (NON-ASCII)",
- *(ACPI_CAST_PTR(u32, internal_name)));
+ ACPI_MOVE_32_TO_32(&bad_name, internal_name);
+ acpi_os_printf("[0x%4.4X] (NON-ASCII)", bad_name);
} else {
/* Convert path to external format */
@@ -102,7 +104,7 @@ acpi_ns_report_error(char *module_name,
}
if (name) {
- ACPI_MEM_FREE(name);
+ ACPI_FREE(name);
}
}
@@ -137,11 +139,12 @@ acpi_ns_report_method_error(char *module_name,
acpi_status status;
struct acpi_namespace_node *node = prefix_node;
- acpi_ut_report_error(module_name, line_number);
+ acpi_os_printf("ACPI Error (%s-%04d): ", module_name, line_number);
if (path) {
- status = acpi_ns_get_node_by_path(path, prefix_node,
- ACPI_NS_NO_UPSEARCH, &node);
+ status =
+ acpi_ns_get_node(prefix_node, path, ACPI_NS_NO_UPSEARCH,
+ &node);
if (ACPI_FAILURE(status)) {
acpi_os_printf("[Could not get node by pathname]");
}
@@ -185,7 +188,7 @@ acpi_ns_print_node_pathname(struct acpi_namespace_node *node, char *message)
}
acpi_os_printf("[%s] (Node %p)", (char *)buffer.pointer, node);
- ACPI_MEM_FREE(buffer.pointer);
+ ACPI_FREE(buffer.pointer);
}
}
@@ -239,7 +242,7 @@ static u8 acpi_ns_valid_path_separator(char sep)
acpi_object_type acpi_ns_get_type(struct acpi_namespace_node * node)
{
- ACPI_FUNCTION_TRACE("ns_get_type");
+ ACPI_FUNCTION_TRACE(ns_get_type);
if (!node) {
ACPI_WARNING((AE_INFO, "Null Node parameter"));
@@ -264,9 +267,10 @@ acpi_object_type acpi_ns_get_type(struct acpi_namespace_node * node)
u32 acpi_ns_local(acpi_object_type type)
{
- ACPI_FUNCTION_TRACE("ns_local");
+ ACPI_FUNCTION_TRACE(ns_local);
if (!acpi_ut_valid_object_type(type)) {
+
/* Type code out of range */
ACPI_WARNING((AE_INFO, "Invalid Object Type %X", type));
@@ -363,7 +367,7 @@ acpi_status acpi_ns_build_internal_name(struct acpi_namestring_info *info)
char *result = NULL;
acpi_native_uint i;
- ACPI_FUNCTION_TRACE("ns_build_internal_name");
+ ACPI_FUNCTION_TRACE(ns_build_internal_name);
/* Setup the correct prefixes, counts, and pointers */
@@ -411,6 +415,7 @@ acpi_status acpi_ns_build_internal_name(struct acpi_namestring_info *info)
for (i = 0; i < ACPI_NAME_SIZE; i++) {
if (acpi_ns_valid_path_separator(*external_name) ||
(*external_name == 0)) {
+
/* Pad the segment with underscore(s) if segment is short */
result[i] = '_';
@@ -473,7 +478,7 @@ acpi_status acpi_ns_internalize_name(char *external_name, char **converted_name)
struct acpi_namestring_info info;
acpi_status status;
- ACPI_FUNCTION_TRACE("ns_internalize_name");
+ ACPI_FUNCTION_TRACE(ns_internalize_name);
if ((!external_name) || (*external_name == 0) || (!converted_name)) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
@@ -486,7 +491,7 @@ acpi_status acpi_ns_internalize_name(char *external_name, char **converted_name)
/* We need a segment to store the internal name */
- internal_name = ACPI_MEM_CALLOCATE(info.length);
+ internal_name = ACPI_ALLOCATE_ZEROED(info.length);
if (!internal_name) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
@@ -496,7 +501,7 @@ acpi_status acpi_ns_internalize_name(char *external_name, char **converted_name)
info.internal_name = internal_name;
status = acpi_ns_build_internal_name(&info);
if (ACPI_FAILURE(status)) {
- ACPI_MEM_FREE(internal_name);
+ ACPI_FREE(internal_name);
return_ACPI_STATUS(status);
}
@@ -533,7 +538,7 @@ acpi_ns_externalize_name(u32 internal_name_length,
acpi_native_uint i = 0;
acpi_native_uint j = 0;
- ACPI_FUNCTION_TRACE("ns_externalize_name");
+ ACPI_FUNCTION_TRACE(ns_externalize_name);
if (!internal_name_length || !internal_name || !converted_name) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
@@ -628,7 +633,7 @@ acpi_ns_externalize_name(u32 internal_name_length,
/*
* Build converted_name
*/
- *converted_name = ACPI_MEM_CALLOCATE(required_length);
+ *converted_name = ACPI_ALLOCATE_ZEROED(required_length);
if (!(*converted_name)) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
@@ -681,13 +686,9 @@ struct acpi_namespace_node *acpi_ns_map_handle_to_node(acpi_handle handle)
ACPI_FUNCTION_ENTRY();
/*
- * Simple implementation.
+ * Simple implementation
*/
- if (!handle) {
- return (NULL);
- }
-
- if (handle == ACPI_ROOT_OBJECT) {
+ if ((!handle) || (handle == ACPI_ROOT_OBJECT)) {
return (acpi_gbl_root_node);
}
@@ -697,7 +698,7 @@ struct acpi_namespace_node *acpi_ns_map_handle_to_node(acpi_handle handle)
return (NULL);
}
- return ((struct acpi_namespace_node *)handle);
+ return (ACPI_CAST_PTR(struct acpi_namespace_node, handle));
}
/*******************************************************************************
@@ -752,7 +753,7 @@ void acpi_ns_terminate(void)
{
union acpi_operand_object *obj_desc;
- ACPI_FUNCTION_TRACE("ns_terminate");
+ ACPI_FUNCTION_TRACE(ns_terminate);
/*
* 1) Free the entire namespace -- all nodes and objects
@@ -792,9 +793,10 @@ void acpi_ns_terminate(void)
u32 acpi_ns_opens_scope(acpi_object_type type)
{
- ACPI_FUNCTION_TRACE_STR("ns_opens_scope", acpi_ut_get_type_name(type));
+ ACPI_FUNCTION_TRACE_STR(ns_opens_scope, acpi_ut_get_type_name(type));
if (!acpi_ut_valid_object_type(type)) {
+
/* type code out of range */
ACPI_WARNING((AE_INFO, "Invalid Object Type %X", type));
@@ -806,12 +808,12 @@ u32 acpi_ns_opens_scope(acpi_object_type type)
/*******************************************************************************
*
- * FUNCTION: acpi_ns_get_node_by_path
+ * FUNCTION: acpi_ns_get_node
*
* PARAMETERS: *Pathname - Name to be found, in external (ASL) format. The
* \ (backslash) and ^ (carat) prefixes, and the
* . (period) to separate segments are supported.
- * start_node - Root of subtree to be searched, or NS_ALL for the
+ * prefix_node - Root of subtree to be searched, or NS_ALL for the
* root of the name space. If Name is fully
* qualified (first s8 is '\'), the passed value
* of Scope will not be accessed.
@@ -827,23 +829,29 @@ u32 acpi_ns_opens_scope(acpi_object_type type)
******************************************************************************/
acpi_status
-acpi_ns_get_node_by_path(char *pathname,
- struct acpi_namespace_node *start_node,
- u32 flags, struct acpi_namespace_node **return_node)
+acpi_ns_get_node(struct acpi_namespace_node *prefix_node,
+ char *pathname,
+ u32 flags, struct acpi_namespace_node **return_node)
{
union acpi_generic_state scope_info;
acpi_status status;
- char *internal_path = NULL;
-
- ACPI_FUNCTION_TRACE_PTR("ns_get_node_by_path", pathname);
+ char *internal_path;
- if (pathname) {
- /* Convert path to internal representation */
+ ACPI_FUNCTION_TRACE_PTR(ns_get_node, pathname);
- status = acpi_ns_internalize_name(pathname, &internal_path);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
+ if (!pathname) {
+ *return_node = prefix_node;
+ if (!prefix_node) {
+ *return_node = acpi_gbl_root_node;
}
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /* Convert path to internal representation */
+
+ status = acpi_ns_internalize_name(pathname, &internal_path);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
}
/* Must lock namespace during lookup */
@@ -855,26 +863,23 @@ acpi_ns_get_node_by_path(char *pathname,
/* Setup lookup scope (search starting point) */
- scope_info.scope.node = start_node;
+ scope_info.scope.node = prefix_node;
/* Lookup the name in the namespace */
- status = acpi_ns_lookup(&scope_info, internal_path,
- ACPI_TYPE_ANY, ACPI_IMODE_EXECUTE,
- (flags | ACPI_NS_DONT_OPEN_SCOPE),
- NULL, return_node);
+ status = acpi_ns_lookup(&scope_info, internal_path, ACPI_TYPE_ANY,
+ ACPI_IMODE_EXECUTE,
+ (flags | ACPI_NS_DONT_OPEN_SCOPE), NULL,
+ return_node);
if (ACPI_FAILURE(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "%s, %s\n",
- internal_path,
- acpi_format_exception(status)));
+ pathname, acpi_format_exception(status)));
}
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
cleanup:
- if (internal_path) {
- ACPI_MEM_FREE(internal_path);
- }
+ ACPI_FREE(internal_path);
return_ACPI_STATUS(status);
}
@@ -960,9 +965,10 @@ acpi_name acpi_ns_find_parent_name(struct acpi_namespace_node * child_node)
{
struct acpi_namespace_node *parent_node;
- ACPI_FUNCTION_TRACE("ns_find_parent_name");
+ ACPI_FUNCTION_TRACE(ns_find_parent_name);
if (child_node) {
+
/* Valid entry. Get the parent Node */
parent_node = acpi_ns_get_parent_node(child_node);
diff --git a/drivers/acpi/namespace/nswalk.c b/drivers/acpi/namespace/nswalk.c
index fcab1e784b81..c8f6bef16ed0 100644
--- a/drivers/acpi/namespace/nswalk.c
+++ b/drivers/acpi/namespace/nswalk.c
@@ -76,6 +76,7 @@ struct acpi_namespace_node *acpi_ns_get_next_node(acpi_object_type type,
ACPI_FUNCTION_ENTRY();
if (!child_node) {
+
/* It's really the parent's _scope_ that we want */
if (parent_node->child) {
@@ -92,6 +93,7 @@ struct acpi_namespace_node *acpi_ns_get_next_node(acpi_object_type type,
/* If any type is OK, we are done */
if (type == ACPI_TYPE_ANY) {
+
/* next_node is NULL if we are at the end-of-list */
return (next_node);
@@ -100,6 +102,7 @@ struct acpi_namespace_node *acpi_ns_get_next_node(acpi_object_type type,
/* Must search for the node -- but within this scope only */
while (next_node) {
+
/* If type matches, we are done */
if (next_node->type == type) {
@@ -161,7 +164,7 @@ acpi_ns_walk_namespace(acpi_object_type type,
acpi_object_type child_type;
u32 level;
- ACPI_FUNCTION_TRACE("ns_walk_namespace");
+ ACPI_FUNCTION_TRACE(ns_walk_namespace);
/* Special case for the namespace Root Node */
@@ -182,6 +185,7 @@ acpi_ns_walk_namespace(acpi_object_type type,
* bubbled up to (and passed) the original parent handle (start_entry)
*/
while (level > 0) {
+
/* Get the next node in this scope. Null if not found */
status = AE_OK;
diff --git a/drivers/acpi/namespace/nsxfeval.c b/drivers/acpi/namespace/nsxfeval.c
index a95f636dc35d..6d9bd45af30a 100644
--- a/drivers/acpi/namespace/nsxfeval.c
+++ b/drivers/acpi/namespace/nsxfeval.c
@@ -42,8 +42,6 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/module.h>
-
#include <acpi/acpi.h>
#include <acpi/acnamesp.h>
#include <acpi/acinterp.h>
@@ -51,6 +49,7 @@
#define _COMPONENT ACPI_NAMESPACE
ACPI_MODULE_NAME("nsxfeval")
+#ifdef ACPI_FUTURE_USAGE
/*******************************************************************************
*
* FUNCTION: acpi_evaluate_object_typed
@@ -71,18 +70,17 @@ ACPI_MODULE_NAME("nsxfeval")
* be valid (non-null)
*
******************************************************************************/
-#ifdef ACPI_FUTURE_USAGE
acpi_status
acpi_evaluate_object_typed(acpi_handle handle,
acpi_string pathname,
- struct acpi_object_list *external_params,
- struct acpi_buffer *return_buffer,
+ struct acpi_object_list * external_params,
+ struct acpi_buffer * return_buffer,
acpi_object_type return_type)
{
acpi_status status;
u8 must_free = FALSE;
- ACPI_FUNCTION_TRACE("acpi_evaluate_object_typed");
+ ACPI_FUNCTION_TRACE(acpi_evaluate_object_typed);
/* Return buffer must be valid */
@@ -110,6 +108,7 @@ acpi_evaluate_object_typed(acpi_handle handle,
}
if (return_buffer->length == 0) {
+
/* Error because caller specifically asked for a return value */
ACPI_ERROR((AE_INFO, "No return value"));
@@ -131,6 +130,7 @@ acpi_evaluate_object_typed(acpi_handle handle,
acpi_ut_get_type_name(return_type)));
if (must_free) {
+
/* Caller used ACPI_ALLOCATE_BUFFER, free the return buffer */
acpi_os_free(return_buffer->pointer);
@@ -140,6 +140,8 @@ acpi_evaluate_object_typed(acpi_handle handle,
return_buffer->length = 0;
return_ACPI_STATUS(AE_TYPE);
}
+
+ACPI_EXPORT_SYMBOL(acpi_evaluate_object_typed)
#endif /* ACPI_FUTURE_USAGE */
/*******************************************************************************
@@ -161,7 +163,6 @@ acpi_evaluate_object_typed(acpi_handle handle,
* be valid (non-null)
*
******************************************************************************/
-
acpi_status
acpi_evaluate_object(acpi_handle handle,
acpi_string pathname,
@@ -170,51 +171,61 @@ acpi_evaluate_object(acpi_handle handle,
{
acpi_status status;
acpi_status status2;
- struct acpi_parameter_info info;
+ struct acpi_evaluate_info *info;
acpi_size buffer_space_needed;
u32 i;
- ACPI_FUNCTION_TRACE("acpi_evaluate_object");
+ ACPI_FUNCTION_TRACE(acpi_evaluate_object);
- info.node = handle;
- info.parameters = NULL;
- info.return_object = NULL;
- info.parameter_type = ACPI_PARAM_ARGS;
+ /* Allocate and initialize the evaluation information block */
+
+ info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
+ if (!info) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ info->pathname = pathname;
+ info->parameter_type = ACPI_PARAM_ARGS;
+
+ /* Convert and validate the device handle */
+
+ info->prefix_node = acpi_ns_map_handle_to_node(handle);
+ if (!info->prefix_node) {
+ status = AE_BAD_PARAMETER;
+ goto cleanup;
+ }
/*
- * If there are parameters to be passed to the object
- * (which must be a control method), the external objects
- * must be converted to internal objects
+ * If there are parameters to be passed to a control method, the external
+ * objects must all be converted to internal objects
*/
if (external_params && external_params->count) {
/*
* Allocate a new parameter block for the internal objects
* Add 1 to count to allow for null terminated internal list
*/
- info.parameters = ACPI_MEM_CALLOCATE(((acpi_size)
- external_params->count +
- 1) * sizeof(void *));
- if (!info.parameters) {
- return_ACPI_STATUS(AE_NO_MEMORY);
+ info->parameters = ACPI_ALLOCATE_ZEROED(((acpi_size)
+ external_params->
+ count +
+ 1) * sizeof(void *));
+ if (!info->parameters) {
+ status = AE_NO_MEMORY;
+ goto cleanup;
}
- /*
- * Convert each external object in the list to an
- * internal object
- */
+ /* Convert each external object in the list to an internal object */
+
for (i = 0; i < external_params->count; i++) {
status =
acpi_ut_copy_eobject_to_iobject(&external_params->
pointer[i],
- &info.
+ &info->
parameters[i]);
if (ACPI_FAILURE(status)) {
- acpi_ut_delete_internal_object_list(info.
- parameters);
- return_ACPI_STATUS(status);
+ goto cleanup;
}
}
- info.parameters[external_params->count] = NULL;
+ info->parameters[external_params->count] = NULL;
}
/*
@@ -224,43 +235,31 @@ acpi_evaluate_object(acpi_handle handle,
* 3) Valid handle
*/
if ((pathname) && (acpi_ns_valid_root_prefix(pathname[0]))) {
- /*
- * The path is fully qualified, just evaluate by name
- */
- status = acpi_ns_evaluate_by_name(pathname, &info);
+
+ /* The path is fully qualified, just evaluate by name */
+
+ info->prefix_node = NULL;
+ status = acpi_ns_evaluate(info);
} else if (!handle) {
/*
- * A handle is optional iff a fully qualified pathname
- * is specified. Since we've already handled fully
- * qualified names above, this is an error
+ * A handle is optional iff a fully qualified pathname is specified.
+ * Since we've already handled fully qualified names above, this is
+ * an error
*/
if (!pathname) {
- ACPI_ERROR((AE_INFO,
- "Both Handle and Pathname are NULL"));
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Both Handle and Pathname are NULL"));
} else {
- ACPI_ERROR((AE_INFO,
- "Handle is NULL and Pathname is relative"));
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Null Handle with relative pathname [%s]",
+ pathname));
}
status = AE_BAD_PARAMETER;
} else {
- /*
- * We get here if we have a handle -- and if we have a
- * pathname it is relative. The handle will be validated
- * in the lower procedures
- */
- if (!pathname) {
- /*
- * The null pathname case means the handle is for
- * the actual object to be evaluated
- */
- status = acpi_ns_evaluate_by_handle(&info);
- } else {
- /*
- * Both a Handle and a relative Pathname
- */
- status = acpi_ns_evaluate_relative(pathname, &info);
- }
+ /* We have a namespace a node and a possible relative path */
+
+ status = acpi_ns_evaluate(info);
}
/*
@@ -268,10 +267,10 @@ acpi_evaluate_object(acpi_handle handle,
* copy the return value to an external object.
*/
if (return_buffer) {
- if (!info.return_object) {
+ if (!info->return_object) {
return_buffer->length = 0;
} else {
- if (ACPI_GET_DESCRIPTOR_TYPE(info.return_object) ==
+ if (ACPI_GET_DESCRIPTOR_TYPE(info->return_object) ==
ACPI_DESC_TYPE_NAMED) {
/*
* If we received a NS Node as a return object, this means that
@@ -282,19 +281,19 @@ acpi_evaluate_object(acpi_handle handle,
* support for various types at a later date if necessary.
*/
status = AE_TYPE;
- info.return_object = NULL; /* No need to delete a NS Node */
+ info->return_object = NULL; /* No need to delete a NS Node */
return_buffer->length = 0;
}
if (ACPI_SUCCESS(status)) {
- /*
- * Find out how large a buffer is needed
- * to contain the returned object
- */
+
+ /* Get the size of the returned object */
+
status =
- acpi_ut_get_object_size(info.return_object,
+ acpi_ut_get_object_size(info->return_object,
&buffer_space_needed);
if (ACPI_SUCCESS(status)) {
+
/* Validate/Allocate/Clear caller buffer */
status =
@@ -303,7 +302,8 @@ acpi_evaluate_object(acpi_handle handle,
buffer_space_needed);
if (ACPI_FAILURE(status)) {
/*
- * Caller's buffer is too small or a new one can't be allocated
+ * Caller's buffer is too small or a new one can't
+ * be allocated
*/
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Needed buffer size %X, %s\n",
@@ -312,12 +312,11 @@ acpi_evaluate_object(acpi_handle handle,
acpi_format_exception
(status)));
} else {
- /*
- * We have enough space for the object, build it
- */
+ /* We have enough space for the object, build it */
+
status =
acpi_ut_copy_iobject_to_eobject
- (info.return_object,
+ (info->return_object,
return_buffer);
}
}
@@ -325,35 +324,37 @@ acpi_evaluate_object(acpi_handle handle,
}
}
- if (info.return_object) {
+ if (info->return_object) {
/*
- * Delete the internal return object. NOTE: Interpreter
- * must be locked to avoid race condition.
+ * Delete the internal return object. NOTE: Interpreter must be
+ * locked to avoid race condition.
*/
status2 = acpi_ex_enter_interpreter();
if (ACPI_SUCCESS(status2)) {
- /*
- * Delete the internal return object. (Or at least
- * decrement the reference count by one)
- */
- acpi_ut_remove_reference(info.return_object);
+
+ /* Remove one reference on the return object (should delete it) */
+
+ acpi_ut_remove_reference(info->return_object);
acpi_ex_exit_interpreter();
}
}
- /*
- * Free the input parameter list (if we created one),
- */
- if (info.parameters) {
+ cleanup:
+
+ /* Free the input parameter list (if we created one) */
+
+ if (info->parameters) {
+
/* Free the allocated parameter block */
- acpi_ut_delete_internal_object_list(info.parameters);
+ acpi_ut_delete_internal_object_list(info->parameters);
}
+ ACPI_FREE(info);
return_ACPI_STATUS(status);
}
-EXPORT_SYMBOL(acpi_evaluate_object);
+ACPI_EXPORT_SYMBOL(acpi_evaluate_object)
/*******************************************************************************
*
@@ -384,7 +385,6 @@ EXPORT_SYMBOL(acpi_evaluate_object);
* function, etc.
*
******************************************************************************/
-
acpi_status
acpi_walk_namespace(acpi_object_type type,
acpi_handle start_object,
@@ -394,7 +394,7 @@ acpi_walk_namespace(acpi_object_type type,
{
acpi_status status;
- ACPI_FUNCTION_TRACE("acpi_walk_namespace");
+ ACPI_FUNCTION_TRACE(acpi_walk_namespace);
/* Parameter validation */
@@ -421,7 +421,7 @@ acpi_walk_namespace(acpi_object_type type,
return_ACPI_STATUS(status);
}
-EXPORT_SYMBOL(acpi_walk_namespace);
+ACPI_EXPORT_SYMBOL(acpi_walk_namespace)
/*******************************************************************************
*
@@ -436,7 +436,6 @@ EXPORT_SYMBOL(acpi_walk_namespace);
* on that.
*
******************************************************************************/
-
static acpi_status
acpi_ns_get_device_callback(acpi_handle obj_handle,
u32 nesting_level,
@@ -473,6 +472,7 @@ acpi_ns_get_device_callback(acpi_handle obj_handle,
}
if (!(flags & ACPI_STA_DEVICE_PRESENT)) {
+
/* Don't examine children of the device if not present */
return (AE_CTRL_DEPTH);
@@ -489,6 +489,7 @@ acpi_ns_get_device_callback(acpi_handle obj_handle,
}
if (ACPI_STRNCMP(hid.value, info->hid, sizeof(hid.value)) != 0) {
+
/* Get the list of Compatible IDs */
status = acpi_ut_execute_CID(node, &cid);
@@ -505,11 +506,11 @@ acpi_ns_get_device_callback(acpi_handle obj_handle,
sizeof(struct
acpi_compatible_id)) !=
0) {
- ACPI_MEM_FREE(cid);
+ ACPI_FREE(cid);
return (AE_OK);
}
}
- ACPI_MEM_FREE(cid);
+ ACPI_FREE(cid);
}
}
@@ -551,7 +552,7 @@ acpi_get_devices(char *HID,
acpi_status status;
struct acpi_get_devices_info info;
- ACPI_FUNCTION_TRACE("acpi_get_devices");
+ ACPI_FUNCTION_TRACE(acpi_get_devices);
/* Parameter validation */
@@ -563,9 +564,9 @@ acpi_get_devices(char *HID,
* We're going to call their callback from OUR callback, so we need
* to know what it is, and their context parameter.
*/
+ info.hid = HID;
info.context = context;
info.user_function = user_function;
- info.hid = HID;
/*
* Lock the namespace around the walk.
@@ -578,9 +579,8 @@ acpi_get_devices(char *HID,
return_ACPI_STATUS(status);
}
- status = acpi_ns_walk_namespace(ACPI_TYPE_DEVICE,
- ACPI_ROOT_OBJECT, ACPI_UINT32_MAX,
- ACPI_NS_WALK_UNLOCK,
+ status = acpi_ns_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
+ ACPI_UINT32_MAX, ACPI_NS_WALK_UNLOCK,
acpi_ns_get_device_callback, &info,
return_value);
@@ -588,7 +588,7 @@ acpi_get_devices(char *HID,
return_ACPI_STATUS(status);
}
-EXPORT_SYMBOL(acpi_get_devices);
+ACPI_EXPORT_SYMBOL(acpi_get_devices)
/*******************************************************************************
*
@@ -603,7 +603,6 @@ EXPORT_SYMBOL(acpi_get_devices);
* DESCRIPTION: Attach arbitrary data and handler to a namespace node.
*
******************************************************************************/
-
acpi_status
acpi_attach_data(acpi_handle obj_handle,
acpi_object_handler handler, void *data)
@@ -637,6 +636,8 @@ acpi_attach_data(acpi_handle obj_handle,
return (status);
}
+ACPI_EXPORT_SYMBOL(acpi_attach_data)
+
/*******************************************************************************
*
* FUNCTION: acpi_detach_data
@@ -649,7 +650,6 @@ acpi_attach_data(acpi_handle obj_handle,
* DESCRIPTION: Remove data that was previously attached to a node.
*
******************************************************************************/
-
acpi_status
acpi_detach_data(acpi_handle obj_handle, acpi_object_handler handler)
{
@@ -682,6 +682,8 @@ acpi_detach_data(acpi_handle obj_handle, acpi_object_handler handler)
return (status);
}
+ACPI_EXPORT_SYMBOL(acpi_detach_data)
+
/*******************************************************************************
*
* FUNCTION: acpi_get_data
@@ -695,7 +697,6 @@ acpi_detach_data(acpi_handle obj_handle, acpi_object_handler handler)
* DESCRIPTION: Retrieve data that was previously attached to a namespace node.
*
******************************************************************************/
-
acpi_status
acpi_get_data(acpi_handle obj_handle, acpi_object_handler handler, void **data)
{
@@ -727,3 +728,5 @@ acpi_get_data(acpi_handle obj_handle, acpi_object_handler handler, void **data)
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return (status);
}
+
+ACPI_EXPORT_SYMBOL(acpi_get_data)
diff --git a/drivers/acpi/namespace/nsxfname.c b/drivers/acpi/namespace/nsxfname.c
index 8cd8675a47c0..978213a6c19f 100644
--- a/drivers/acpi/namespace/nsxfname.c
+++ b/drivers/acpi/namespace/nsxfname.c
@@ -42,8 +42,6 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/module.h>
-
#include <acpi/acpi.h>
#include <acpi/acnamesp.h>
@@ -114,9 +112,8 @@ acpi_get_handle(acpi_handle parent,
/*
* Find the Node and convert to a handle
*/
- status =
- acpi_ns_get_node_by_path(pathname, prefix_node, ACPI_NS_NO_UPSEARCH,
- &node);
+ status = acpi_ns_get_node(prefix_node, pathname, ACPI_NS_NO_UPSEARCH,
+ &node);
*ret_handle = NULL;
if (ACPI_SUCCESS(status)) {
@@ -126,7 +123,7 @@ acpi_get_handle(acpi_handle parent,
return (status);
}
-EXPORT_SYMBOL(acpi_get_handle);
+ACPI_EXPORT_SYMBOL(acpi_get_handle)
/******************************************************************************
*
@@ -143,7 +140,6 @@ EXPORT_SYMBOL(acpi_get_handle);
* complementary functions.
*
******************************************************************************/
-
acpi_status
acpi_get_name(acpi_handle handle, u32 name_type, struct acpi_buffer * buffer)
{
@@ -162,6 +158,7 @@ acpi_get_name(acpi_handle handle, u32 name_type, struct acpi_buffer * buffer)
}
if (name_type == ACPI_FULL_PATHNAME) {
+
/* Get the full pathname (From the namespace root) */
status = acpi_ns_handle_to_pathname(handle, buffer);
@@ -203,7 +200,7 @@ acpi_get_name(acpi_handle handle, u32 name_type, struct acpi_buffer * buffer)
return (status);
}
-EXPORT_SYMBOL(acpi_get_name);
+ACPI_EXPORT_SYMBOL(acpi_get_name)
/******************************************************************************
*
@@ -219,7 +216,6 @@ EXPORT_SYMBOL(acpi_get_name);
* control methods (Such as in the case of a device.)
*
******************************************************************************/
-
acpi_status
acpi_get_object_info(acpi_handle handle, struct acpi_buffer * buffer)
{
@@ -241,7 +237,7 @@ acpi_get_object_info(acpi_handle handle, struct acpi_buffer * buffer)
return (status);
}
- info = ACPI_MEM_CALLOCATE(sizeof(struct acpi_device_info));
+ info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_device_info));
if (!info) {
return (AE_NO_MEMORY);
}
@@ -345,11 +341,11 @@ acpi_get_object_info(acpi_handle handle, struct acpi_buffer * buffer)
}
cleanup:
- ACPI_MEM_FREE(info);
+ ACPI_FREE(info);
if (cid_list) {
- ACPI_MEM_FREE(cid_list);
+ ACPI_FREE(cid_list);
}
return (status);
}
-EXPORT_SYMBOL(acpi_get_object_info);
+ACPI_EXPORT_SYMBOL(acpi_get_object_info)
diff --git a/drivers/acpi/namespace/nsxfobj.c b/drivers/acpi/namespace/nsxfobj.c
index a0332595677a..a163e1d3708d 100644
--- a/drivers/acpi/namespace/nsxfobj.c
+++ b/drivers/acpi/namespace/nsxfobj.c
@@ -42,8 +42,6 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/module.h>
-
#include <acpi/acpi.h>
#include <acpi/acnamesp.h>
@@ -101,7 +99,7 @@ acpi_status acpi_get_type(acpi_handle handle, acpi_object_type * ret_type)
return (status);
}
-EXPORT_SYMBOL(acpi_get_type);
+ACPI_EXPORT_SYMBOL(acpi_get_type)
/*******************************************************************************
*
@@ -116,7 +114,6 @@ EXPORT_SYMBOL(acpi_get_type);
* Handle.
*
******************************************************************************/
-
acpi_status acpi_get_parent(acpi_handle handle, acpi_handle * ret_handle)
{
struct acpi_namespace_node *node;
@@ -162,7 +159,7 @@ acpi_status acpi_get_parent(acpi_handle handle, acpi_handle * ret_handle)
return (status);
}
-EXPORT_SYMBOL(acpi_get_parent);
+ACPI_EXPORT_SYMBOL(acpi_get_parent)
/*******************************************************************************
*
@@ -181,7 +178,6 @@ EXPORT_SYMBOL(acpi_get_parent);
* Scope is returned.
*
******************************************************************************/
-
acpi_status
acpi_get_next_object(acpi_object_type type,
acpi_handle parent,
@@ -206,6 +202,7 @@ acpi_get_next_object(acpi_object_type type,
/* If null handle, use the parent */
if (!child) {
+
/* Start search at the beginning of the specified scope */
parent_node = acpi_ns_map_handle_to_node(parent);
@@ -242,4 +239,4 @@ acpi_get_next_object(acpi_object_type type,
return (status);
}
-EXPORT_SYMBOL(acpi_get_next_object);
+ACPI_EXPORT_SYMBOL(acpi_get_next_object)
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index 64b98e82feb7..e2c1a16078c9 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -36,12 +36,60 @@
#define _COMPONENT ACPI_NUMA
ACPI_MODULE_NAME("numa")
+static nodemask_t nodes_found_map = NODE_MASK_NONE;
+#define PXM_INVAL -1
+#define NID_INVAL -1
+
+/* maps to convert between proximity domain and logical node ID */
+int __cpuinitdata pxm_to_node_map[MAX_PXM_DOMAINS]
+ = { [0 ... MAX_PXM_DOMAINS - 1] = NID_INVAL };
+int __cpuinitdata node_to_pxm_map[MAX_NUMNODES]
+ = { [0 ... MAX_NUMNODES - 1] = PXM_INVAL };
+
extern int __init acpi_table_parse_madt_family(enum acpi_table_id id,
unsigned long madt_size,
int entry_id,
acpi_madt_entry_handler handler,
unsigned int max_entries);
+int __cpuinit pxm_to_node(int pxm)
+{
+ if (pxm < 0)
+ return NID_INVAL;
+ return pxm_to_node_map[pxm];
+}
+
+int __cpuinit node_to_pxm(int node)
+{
+ if (node < 0)
+ return PXM_INVAL;
+ return node_to_pxm_map[node];
+}
+
+int __cpuinit acpi_map_pxm_to_node(int pxm)
+{
+ int node = pxm_to_node_map[pxm];
+
+ if (node < 0){
+ if (nodes_weight(nodes_found_map) >= MAX_NUMNODES)
+ return NID_INVAL;
+ node = first_unset_node(nodes_found_map);
+ pxm_to_node_map[pxm] = node;
+ node_to_pxm_map[node] = pxm;
+ node_set(node, nodes_found_map);
+ }
+
+ return node;
+}
+
+void __cpuinit acpi_unmap_pxm_to_node(int node)
+{
+ int pxm = node_to_pxm_map[node];
+ pxm_to_node_map[pxm] = NID_INVAL;
+ node_to_pxm_map[node] = PXM_INVAL;
+ node_clear(node, nodes_found_map);
+}
+
void __init acpi_table_print_srat_entry(acpi_table_entry_header * header)
{
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 13b5fd5854a8..1bb558adee66 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -37,6 +37,7 @@
#include <linux/delay.h>
#include <linux/workqueue.h>
#include <linux/nmi.h>
+#include <linux/kthread.h>
#include <acpi/acpi.h>
#include <asm/io.h>
#include <acpi/acpi_bus.h>
@@ -600,23 +601,41 @@ static void acpi_os_execute_deferred(void *context)
return_VOID;
}
-acpi_status
-acpi_os_queue_for_execution(u32 priority,
+static int acpi_os_execute_thread(void *context)
+{
+ struct acpi_os_dpc *dpc = (struct acpi_os_dpc *)context;
+ if (dpc) {
+ dpc->function(dpc->context);
+ kfree(dpc);
+ }
+ do_exit(0);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_os_execute
+ *
+ * PARAMETERS: Type - Type of the callback
+ * Function - Function to be executed
+ * Context - Function parameters
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Depending on type, either queues function for deferred execution or
+ * immediately executes function on a separate thread.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_os_execute(acpi_execute_type type,
acpi_osd_exec_callback function, void *context)
{
acpi_status status = AE_OK;
struct acpi_os_dpc *dpc;
struct work_struct *task;
-
- ACPI_FUNCTION_TRACE("os_queue_for_execution");
-
- ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "Scheduling function [%p(%p)] for deferred execution.\n",
- function, context));
+ struct task_struct *p;
if (!function)
- return_ACPI_STATUS(AE_BAD_PARAMETER);
-
+ return AE_BAD_PARAMETER;
/*
* Allocate/initialize DPC structure. Note that this memory will be
* freed by the callee. The kernel handles the tq_struct list in a
@@ -627,30 +646,37 @@ acpi_os_queue_for_execution(u32 priority,
* We can save time and code by allocating the DPC and tq_structs
* from the same memory.
*/
-
- dpc =
- kmalloc(sizeof(struct acpi_os_dpc) + sizeof(struct work_struct),
- GFP_ATOMIC);
+ if (type == OSL_NOTIFY_HANDLER) {
+ dpc = kmalloc(sizeof(struct acpi_os_dpc), GFP_KERNEL);
+ } else {
+ dpc = kmalloc(sizeof(struct acpi_os_dpc) +
+ sizeof(struct work_struct), GFP_ATOMIC);
+ }
if (!dpc)
- return_ACPI_STATUS(AE_NO_MEMORY);
-
+ return AE_NO_MEMORY;
dpc->function = function;
dpc->context = context;
- task = (void *)(dpc + 1);
- INIT_WORK(task, acpi_os_execute_deferred, (void *)dpc);
-
- if (!queue_work(kacpid_wq, task)) {
- ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
- "Call to queue_work() failed.\n"));
- kfree(dpc);
- status = AE_ERROR;
+ if (type == OSL_NOTIFY_HANDLER) {
+ p = kthread_create(acpi_os_execute_thread, dpc, "kacpid_notify");
+ if (!IS_ERR(p)) {
+ wake_up_process(p);
+ } else {
+ status = AE_NO_MEMORY;
+ kfree(dpc);
+ }
+ } else {
+ task = (void *)(dpc + 1);
+ INIT_WORK(task, acpi_os_execute_deferred, (void *)dpc);
+ if (!queue_work(kacpid_wq, task)) {
+ status = AE_ERROR;
+ kfree(dpc);
+ }
}
-
- return_ACPI_STATUS(status);
+ return status;
}
-EXPORT_SYMBOL(acpi_os_queue_for_execution);
+EXPORT_SYMBOL(acpi_os_execute);
void acpi_os_wait_events_complete(void *context)
{
@@ -769,9 +795,6 @@ acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout)
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n",
handle, units, timeout));
- if (in_atomic())
- timeout = 0;
-
switch (timeout) {
/*
* No Wait:
@@ -896,14 +919,6 @@ u8 acpi_os_writable(void *ptr, acpi_size len)
}
#endif
-u32 acpi_os_get_thread_id(void)
-{
- if (!in_atomic())
- return current->pid;
-
- return 0;
-}
-
acpi_status acpi_os_signal(u32 function, void *info)
{
switch (function) {
@@ -1050,12 +1065,12 @@ void acpi_os_release_lock(acpi_handle handle, acpi_cpu_flags flags)
*
* FUNCTION: acpi_os_create_cache
*
- * PARAMETERS: CacheName - Ascii name for the cache
- * ObjectSize - Size of each cached object
- * MaxDepth - Maximum depth of the cache (in objects)
- * ReturnCache - Where the new cache object is returned
+ * PARAMETERS: name - Ascii name for the cache
+ * size - Size of each cached object
+ * depth - Maximum depth of the cache (in objects) <ignored>
+ * cache - Where the new cache object is returned
*
- * RETURN: Status
+ * RETURN: status
*
* DESCRIPTION: Create a cache object
*
@@ -1065,7 +1080,10 @@ acpi_status
acpi_os_create_cache(char *name, u16 size, u16 depth, acpi_cache_t ** cache)
{
*cache = kmem_cache_create(name, size, 0, 0, NULL, NULL);
- return AE_OK;
+ if (cache == NULL)
+ return AE_ERROR;
+ else
+ return AE_OK;
}
/*******************************************************************************
@@ -1134,16 +1152,63 @@ acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object)
*
* RETURN: Status
*
- * DESCRIPTION: Get an object from the specified cache. If cache is empty,
- * the object is allocated.
+ * DESCRIPTION: Return a zero-filled object.
*
******************************************************************************/
void *acpi_os_acquire_object(acpi_cache_t * cache)
{
- void *object = kmem_cache_alloc(cache, GFP_KERNEL);
+ void *object = kmem_cache_zalloc(cache, GFP_KERNEL);
WARN_ON(!object);
return object;
}
+/******************************************************************************
+ *
+ * FUNCTION: acpi_os_validate_interface
+ *
+ * PARAMETERS: interface - Requested interface to be validated
+ *
+ * RETURN: AE_OK if interface is supported, AE_SUPPORT otherwise
+ *
+ * DESCRIPTION: Match an interface string to the interfaces supported by the
+ * host. Strings originate from an AML call to the _OSI method.
+ *
+ *****************************************************************************/
+
+acpi_status
+acpi_os_validate_interface (char *interface)
+{
+
+ return AE_SUPPORT;
+}
+
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_os_validate_address
+ *
+ * PARAMETERS: space_id - ACPI space ID
+ * address - Physical address
+ * length - Address length
+ *
+ * RETURN: AE_OK if address/length is valid for the space_id. Otherwise,
+ * should return AE_AML_ILLEGAL_ADDRESS.
+ *
+ * DESCRIPTION: Validate a system address via the host OS. Used to validate
+ * the addresses accessed by AML operation regions.
+ *
+ *****************************************************************************/
+
+acpi_status
+acpi_os_validate_address (
+ u8 space_id,
+ acpi_physical_address address,
+ acpi_size length)
+{
+
+ return AE_OK;
+}
+
+
#endif
diff --git a/drivers/acpi/parser/psargs.c b/drivers/acpi/parser/psargs.c
index de573be52718..bf88e076c3e9 100644
--- a/drivers/acpi/parser/psargs.c
+++ b/drivers/acpi/parser/psargs.c
@@ -79,7 +79,7 @@ acpi_ps_get_next_package_length(struct acpi_parse_state *parser_state)
acpi_native_uint byte_count;
u8 byte_zero_mask = 0x3F; /* Default [0:5] */
- ACPI_FUNCTION_TRACE("ps_get_next_package_length");
+ ACPI_FUNCTION_TRACE(ps_get_next_package_length);
/*
* Byte 0 bits [6:7] contain the number of additional bytes
@@ -128,7 +128,7 @@ u8 *acpi_ps_get_next_package_end(struct acpi_parse_state *parser_state)
u8 *start = parser_state->aml;
u32 package_length;
- ACPI_FUNCTION_TRACE("ps_get_next_package_end");
+ ACPI_FUNCTION_TRACE(ps_get_next_package_end);
/* Function below updates parser_state->Aml */
@@ -157,7 +157,7 @@ char *acpi_ps_get_next_namestring(struct acpi_parse_state *parser_state)
u8 *start = parser_state->aml;
u8 *end = parser_state->aml;
- ACPI_FUNCTION_TRACE("ps_get_next_namestring");
+ ACPI_FUNCTION_TRACE(ps_get_next_namestring);
/* Point past any namestring prefix characters (backslash or carat) */
@@ -237,7 +237,7 @@ acpi_ps_get_next_namepath(struct acpi_walk_state *walk_state,
struct acpi_namespace_node *node;
union acpi_generic_state scope_info;
- ACPI_FUNCTION_TRACE("ps_get_next_namepath");
+ ACPI_FUNCTION_TRACE(ps_get_next_namepath);
path = acpi_ps_get_next_namestring(parser_state);
acpi_ps_init_op(arg, AML_INT_NAMEPATH_OP);
@@ -275,6 +275,7 @@ acpi_ps_get_next_namepath(struct acpi_walk_state *walk_state,
*/
if (ACPI_SUCCESS(status) &&
possible_method_call && (node->type == ACPI_TYPE_METHOD)) {
+
/* This name is actually a control method invocation */
method_desc = acpi_ns_get_attached_object(node);
@@ -319,6 +320,7 @@ acpi_ps_get_next_namepath(struct acpi_walk_state *walk_state,
* some not_found cases are allowed
*/
if (status == AE_NOT_FOUND) {
+
/* 1) not_found is ok during load pass 1/2 (allow forward references) */
if ((walk_state->parse_flags & ACPI_PARSE_MODE_MASK) !=
@@ -354,6 +356,7 @@ acpi_ps_get_next_namepath(struct acpi_walk_state *walk_state,
if ((walk_state->parse_flags & ACPI_PARSE_MODE_MASK) ==
ACPI_PARSE_EXECUTE) {
+
/* Report a control method execution error */
status = acpi_ds_method_error(status, walk_state);
@@ -388,7 +391,7 @@ acpi_ps_get_next_simple_arg(struct acpi_parse_state *parser_state,
u16 opcode;
u8 *aml = parser_state->aml;
- ACPI_FUNCTION_TRACE_U32("ps_get_next_simple_arg", arg_type);
+ ACPI_FUNCTION_TRACE_U32(ps_get_next_simple_arg, arg_type);
switch (arg_type) {
case ARGP_BYTEDATA:
@@ -453,7 +456,7 @@ acpi_ps_get_next_simple_arg(struct acpi_parse_state *parser_state,
default:
- ACPI_ERROR((AE_INFO, "Invalid arg_type %X", arg_type));
+ ACPI_ERROR((AE_INFO, "Invalid ArgType %X", arg_type));
return_VOID;
}
@@ -484,7 +487,7 @@ static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state
u16 opcode;
u32 name;
- ACPI_FUNCTION_TRACE("ps_get_next_field");
+ ACPI_FUNCTION_TRACE(ps_get_next_field);
/* Determine field type */
@@ -590,7 +593,7 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state,
u32 subop;
acpi_status status = AE_OK;
- ACPI_FUNCTION_TRACE_PTR("ps_get_next_arg", parser_state);
+ ACPI_FUNCTION_TRACE_PTR(ps_get_next_arg, parser_state);
switch (arg_type) {
case ARGP_BYTEDATA:
@@ -620,6 +623,7 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state,
case ARGP_FIELDLIST:
if (parser_state->aml < parser_state->pkg_end) {
+
/* Non-empty list */
while (parser_state->aml < parser_state->pkg_end) {
@@ -645,6 +649,7 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state,
case ARGP_BYTELIST:
if (parser_state->aml < parser_state->pkg_end) {
+
/* Non-empty list */
arg = acpi_ps_alloc_op(AML_INT_BYTELIST_OP);
@@ -673,6 +678,7 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state,
if (subop == 0 ||
acpi_ps_is_leading_char(subop) ||
acpi_ps_is_prefix_char(subop)) {
+
/* null_name or name_string */
arg = acpi_ps_alloc_op(AML_INT_NAMEPATH_OP);
@@ -703,6 +709,7 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state,
case ARGP_OBJLIST:
if (parser_state->aml < parser_state->pkg_end) {
+
/* Non-empty list of variable arguments, nothing returned */
walk_state->arg_count = ACPI_VAR_ARGS;
@@ -711,7 +718,7 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state,
default:
- ACPI_ERROR((AE_INFO, "Invalid arg_type: %X", arg_type));
+ ACPI_ERROR((AE_INFO, "Invalid ArgType: %X", arg_type));
status = AE_AML_OPERAND_TYPE;
break;
}
diff --git a/drivers/acpi/parser/psloop.c b/drivers/acpi/parser/psloop.c
index 00b072e15d19..e1541db3753a 100644
--- a/drivers/acpi/parser/psloop.c
+++ b/drivers/acpi/parser/psloop.c
@@ -83,7 +83,7 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
struct acpi_parse_state *parser_state;
u8 *aml_op_start = NULL;
- ACPI_FUNCTION_TRACE_PTR("ps_parse_loop", walk_state);
+ ACPI_FUNCTION_TRACE_PTR(ps_parse_loop, walk_state);
if (walk_state->descending_callback == NULL) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
@@ -95,6 +95,7 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
#if (!defined (ACPI_NO_METHOD_EXECUTION) && !defined (ACPI_CONSTANT_EVAL_ONLY))
if (walk_state->walk_type & ACPI_WALK_METHOD_RESTART) {
+
/* We are restarting a preempted control method */
if (acpi_ps_has_completed_scope(parser_state)) {
@@ -128,7 +129,7 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
}
ACPI_EXCEPTION((AE_INFO, status,
- "get_predicate Failed"));
+ "GetPredicate Failed"));
return_ACPI_STATUS(status);
}
@@ -143,6 +144,7 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
"Popped scope, Op=%p\n", op));
} else if (walk_state->prev_op) {
+
/* We were in the middle of an op */
op = walk_state->prev_op;
@@ -156,6 +158,7 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
while ((parser_state->aml < parser_state->aml_end) || (op)) {
aml_op_start = parser_state->aml;
if (!op) {
+
/* Get the next opcode from the AML stream */
walk_state->aml_offset =
@@ -213,6 +216,7 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
/* Create Op structure and append to parent's argument list */
if (walk_state->op_info->flags & AML_NAMED) {
+
/* Allocate a new pre_op if necessary */
if (!pre_op) {
@@ -371,7 +375,7 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
if (walk_state->op_info) {
ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
- "Opcode %4.4X [%s] Op %p Aml %p aml_offset %5.5X\n",
+ "Opcode %4.4X [%s] Op %p Aml %p AmlOffset %5.5X\n",
(u32) op->common.aml_opcode,
walk_state->op_info->name, op,
parser_state->aml,
@@ -388,6 +392,7 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
/* Are there any arguments that must be processed? */
if (walk_state->arg_types) {
+
/* Get arguments */
switch (op->common.aml_opcode) {
@@ -742,7 +747,19 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
if (ACPI_FAILURE(status2)) {
return_ACPI_STATUS(status2);
}
+
+ status2 =
+ acpi_ds_result_stack_pop
+ (walk_state);
+ if (ACPI_FAILURE(status2)) {
+ return_ACPI_STATUS(status2);
+ }
+
+ acpi_ut_delete_generic_state
+ (acpi_ut_pop_generic_state
+ (&walk_state->control_state));
}
+
acpi_ps_pop_scope(parser_state, &op,
&walk_state->arg_types,
&walk_state->arg_count);
@@ -762,6 +779,7 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
return_ACPI_STATUS(status2);
}
}
+
acpi_ps_pop_scope(parser_state, &op,
&walk_state->arg_types,
&walk_state->arg_count);
@@ -853,6 +871,7 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
}
else if (ACPI_FAILURE(status)) {
+
/* First error is most important */
(void)
diff --git a/drivers/acpi/parser/psopcode.c b/drivers/acpi/parser/psopcode.c
index 11d6351ab8b2..4bd25e32769f 100644
--- a/drivers/acpi/parser/psopcode.c
+++ b/drivers/acpi/parser/psopcode.c
@@ -725,12 +725,13 @@ static const u8 acpi_gbl_long_op_index[NUM_EXTENDED_OPCODE] = {
const struct acpi_opcode_info *acpi_ps_get_opcode_info(u16 opcode)
{
- ACPI_FUNCTION_NAME("ps_get_opcode_info");
+ ACPI_FUNCTION_NAME(ps_get_opcode_info);
/*
* Detect normal 8-bit opcode or extended 16-bit opcode
*/
if (!(opcode & 0xFF00)) {
+
/* Simple (8-bit) opcode: 0-255, can't index beyond table */
return (&acpi_gbl_aml_op_info
@@ -739,6 +740,7 @@ const struct acpi_opcode_info *acpi_ps_get_opcode_info(u16 opcode)
if (((opcode & 0xFF00) == AML_EXTENDED_OPCODE) &&
(((u8) opcode) <= MAX_EXTENDED_OPCODE)) {
+
/* Valid extended (16-bit) opcode */
return (&acpi_gbl_aml_op_info
@@ -779,7 +781,7 @@ char *acpi_ps_get_opcode_name(u16 opcode)
return (op->name);
#else
- return ("AE_NOT_CONFIGURED");
+ return ("OpcodeName unavailable");
#endif
}
diff --git a/drivers/acpi/parser/psparse.c b/drivers/acpi/parser/psparse.c
index a9f3229f4106..7ee2f2e77525 100644
--- a/drivers/acpi/parser/psparse.c
+++ b/drivers/acpi/parser/psparse.c
@@ -106,6 +106,7 @@ u16 acpi_ps_peek_opcode(struct acpi_parse_state * parser_state)
opcode = (u16) ACPI_GET8(aml);
if (opcode == AML_EXTENDED_OP_PREFIX) {
+
/* Extended opcode, get the second opcode byte */
aml++;
@@ -137,7 +138,7 @@ acpi_ps_complete_this_op(struct acpi_walk_state * walk_state,
const struct acpi_opcode_info *parent_info;
union acpi_parse_object *replacement_op = NULL;
- ACPI_FUNCTION_TRACE_PTR("ps_complete_this_op", op);
+ ACPI_FUNCTION_TRACE_PTR(ps_complete_this_op, op);
/* Check for null Op, can happen if AML code is corrupt */
@@ -158,6 +159,7 @@ acpi_ps_complete_this_op(struct acpi_walk_state * walk_state,
if (op->common.parent) {
prev = op->common.parent->common.value.arg;
if (!prev) {
+
/* Nothing more to do */
goto cleanup;
@@ -245,6 +247,7 @@ acpi_ps_complete_this_op(struct acpi_walk_state * walk_state,
/* We must unlink this op from the parent tree */
if (prev == op) {
+
/* This op is the first in the list */
if (replacement_op) {
@@ -265,6 +268,7 @@ acpi_ps_complete_this_op(struct acpi_walk_state * walk_state,
else
while (prev) {
+
/* Traverse all siblings in the parent's argument list */
next = prev->common.next;
@@ -329,7 +333,7 @@ acpi_ps_next_parse_state(struct acpi_walk_state *walk_state,
struct acpi_parse_state *parser_state = &walk_state->parser_state;
acpi_status status = AE_CTRL_PENDING;
- ACPI_FUNCTION_TRACE_PTR("ps_next_parse_state", op);
+ ACPI_FUNCTION_TRACE_PTR(ps_next_parse_state, op);
switch (callback_status) {
case AE_CTRL_TERMINATE:
@@ -449,10 +453,10 @@ acpi_status acpi_ps_parse_aml(struct acpi_walk_state *walk_state)
struct acpi_thread_state *prev_walk_list = acpi_gbl_current_walk_list;
struct acpi_walk_state *previous_walk_state;
- ACPI_FUNCTION_TRACE("ps_parse_aml");
+ ACPI_FUNCTION_TRACE(ps_parse_aml);
ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
- "Entered with walk_state=%p Aml=%p size=%X\n",
+ "Entered with WalkState=%p Aml=%p size=%X\n",
walk_state, walk_state->parser_state.aml,
walk_state->parser_state.aml_size));
@@ -460,6 +464,7 @@ acpi_status acpi_ps_parse_aml(struct acpi_walk_state *walk_state)
thread = acpi_ut_create_thread_state();
if (!thread) {
+ acpi_ds_delete_walk_state(walk_state);
return_ACPI_STATUS(AE_NO_MEMORY);
}
@@ -510,6 +515,7 @@ acpi_status acpi_ps_parse_aml(struct acpi_walk_state *walk_state)
} else if (status == AE_CTRL_TERMINATE) {
status = AE_OK;
} else if ((status != AE_OK) && (walk_state->method_desc)) {
+
/* Either the method parse or actual execution failed */
ACPI_ERROR_METHOD("Method parse/execution failed",
@@ -550,20 +556,9 @@ acpi_status acpi_ps_parse_aml(struct acpi_walk_state *walk_state)
*/
if (((walk_state->parse_flags & ACPI_PARSE_MODE_MASK) ==
ACPI_PARSE_EXECUTE) || (ACPI_FAILURE(status))) {
- if (walk_state->method_desc) {
- /* Decrement the thread count on the method parse tree */
-
- if (walk_state->method_desc->method.
- thread_count) {
- walk_state->method_desc->method.
- thread_count--;
- } else {
- ACPI_ERROR((AE_INFO,
- "Invalid zero thread count in method"));
- }
- }
-
- acpi_ds_terminate_control_method(walk_state);
+ acpi_ds_terminate_control_method(walk_state->
+ method_desc,
+ walk_state);
}
/* Delete this walk state and all linked control states */
@@ -572,7 +567,7 @@ acpi_status acpi_ps_parse_aml(struct acpi_walk_state *walk_state)
previous_walk_state = walk_state;
ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
- "return_value=%p, implicit_value=%p State=%p\n",
+ "ReturnValue=%p, ImplicitValue=%p State=%p\n",
walk_state->return_desc,
walk_state->implicit_return_obj, walk_state));
@@ -633,12 +628,14 @@ acpi_status acpi_ps_parse_aml(struct acpi_walk_state *walk_state)
}
} else {
if (previous_walk_state->return_desc) {
+
/* Caller doesn't want it, must delete it */
acpi_ut_remove_reference(previous_walk_state->
return_desc);
}
if (previous_walk_state->implicit_return_obj) {
+
/* Caller doesn't want it, must delete it */
acpi_ut_remove_reference(previous_walk_state->
diff --git a/drivers/acpi/parser/psscope.c b/drivers/acpi/parser/psscope.c
index bc6047caccd9..a3e0314de24d 100644
--- a/drivers/acpi/parser/psscope.c
+++ b/drivers/acpi/parser/psscope.c
@@ -106,14 +106,14 @@ acpi_ps_init_scope(struct acpi_parse_state * parser_state,
{
union acpi_generic_state *scope;
- ACPI_FUNCTION_TRACE_PTR("ps_init_scope", root_op);
+ ACPI_FUNCTION_TRACE_PTR(ps_init_scope, root_op);
scope = acpi_ut_create_generic_state();
if (!scope) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
- scope->common.data_type = ACPI_DESC_TYPE_STATE_RPSCOPE;
+ scope->common.descriptor_type = ACPI_DESC_TYPE_STATE_RPSCOPE;
scope->parse_scope.op = root_op;
scope->parse_scope.arg_count = ACPI_VAR_ARGS;
scope->parse_scope.arg_end = parser_state->aml_end;
@@ -147,14 +147,14 @@ acpi_ps_push_scope(struct acpi_parse_state *parser_state,
{
union acpi_generic_state *scope;
- ACPI_FUNCTION_TRACE_PTR("ps_push_scope", op);
+ ACPI_FUNCTION_TRACE_PTR(ps_push_scope, op);
scope = acpi_ut_create_generic_state();
if (!scope) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
- scope->common.data_type = ACPI_DESC_TYPE_STATE_PSCOPE;
+ scope->common.descriptor_type = ACPI_DESC_TYPE_STATE_PSCOPE;
scope->parse_scope.op = op;
scope->parse_scope.arg_list = remaining_args;
scope->parse_scope.arg_count = arg_count;
@@ -165,6 +165,7 @@ acpi_ps_push_scope(struct acpi_parse_state *parser_state,
acpi_ut_push_generic_state(&parser_state->scope, scope);
if (arg_count == ACPI_VAR_ARGS) {
+
/* Multiple arguments */
scope->parse_scope.arg_end = parser_state->pkg_end;
@@ -199,14 +200,14 @@ acpi_ps_pop_scope(struct acpi_parse_state *parser_state,
{
union acpi_generic_state *scope = parser_state->scope;
- ACPI_FUNCTION_TRACE("ps_pop_scope");
+ ACPI_FUNCTION_TRACE(ps_pop_scope);
/* Only pop the scope if there is in fact a next scope */
if (scope->common.next) {
scope = acpi_ut_pop_generic_state(&parser_state->scope);
- /* return to parsing previous op */
+ /* Return to parsing previous op */
*op = scope->parse_scope.op;
*arg_list = scope->parse_scope.arg_list;
@@ -217,7 +218,7 @@ acpi_ps_pop_scope(struct acpi_parse_state *parser_state,
acpi_ut_delete_generic_state(scope);
} else {
- /* empty parse stack, prepare to fetch next opcode */
+ /* Empty parse stack, prepare to fetch next opcode */
*op = NULL;
*arg_list = 0;
@@ -246,7 +247,7 @@ void acpi_ps_cleanup_scope(struct acpi_parse_state *parser_state)
{
union acpi_generic_state *scope;
- ACPI_FUNCTION_TRACE_PTR("ps_cleanup_scope", parser_state);
+ ACPI_FUNCTION_TRACE_PTR(ps_cleanup_scope, parser_state);
if (!parser_state) {
return_VOID;
diff --git a/drivers/acpi/parser/pstree.c b/drivers/acpi/parser/pstree.c
index dd6f16726fc4..0015717ef096 100644
--- a/drivers/acpi/parser/pstree.c
+++ b/drivers/acpi/parser/pstree.c
@@ -77,6 +77,7 @@ union acpi_parse_object *acpi_ps_get_arg(union acpi_parse_object *op, u32 argn)
op_info = acpi_ps_get_opcode_info(op->common.aml_opcode);
if (op_info->class == AML_CLASS_UNKNOWN) {
+
/* Invalid opcode or ASCII character */
return (NULL);
@@ -85,6 +86,7 @@ union acpi_parse_object *acpi_ps_get_arg(union acpi_parse_object *op, u32 argn)
/* Check if this opcode requires argument sub-objects */
if (!(op_info->flags & AML_HAS_ARGS)) {
+
/* Has no linked argument objects */
return (NULL);
@@ -130,6 +132,7 @@ acpi_ps_append_arg(union acpi_parse_object *op, union acpi_parse_object *arg)
op_info = acpi_ps_get_opcode_info(op->common.aml_opcode);
if (op_info->class == AML_CLASS_UNKNOWN) {
+
/* Invalid opcode */
ACPI_ERROR((AE_INFO, "Invalid AML Opcode: 0x%2.2X",
@@ -140,6 +143,7 @@ acpi_ps_append_arg(union acpi_parse_object *op, union acpi_parse_object *arg)
/* Check if this opcode requires argument sub-objects */
if (!(op_info->flags & AML_HAS_ARGS)) {
+
/* Has no linked argument objects */
return;
@@ -148,6 +152,7 @@ acpi_ps_append_arg(union acpi_parse_object *op, union acpi_parse_object *arg)
/* Append the argument to the linked argument list */
if (op->common.value.arg) {
+
/* Append to existing argument list */
prev_arg = op->common.value.arg;
@@ -222,12 +227,14 @@ union acpi_parse_object *acpi_ps_get_depth_next(union acpi_parse_object *origin,
}
if (arg == origin) {
+
/* Reached parent of origin, end search */
return (NULL);
}
if (parent->common.next) {
+
/* Found sibling of parent */
return (parent->common.next);
@@ -299,5 +306,4 @@ union acpi_parse_object *acpi_ps_get_child(union acpi_parse_object *op)
return (child);
}
#endif
-
#endif /* ACPI_FUTURE_USAGE */
diff --git a/drivers/acpi/parser/psutils.c b/drivers/acpi/parser/psutils.c
index 3e07cb9cb748..182474ae8ce9 100644
--- a/drivers/acpi/parser/psutils.c
+++ b/drivers/acpi/parser/psutils.c
@@ -89,7 +89,7 @@ void acpi_ps_init_op(union acpi_parse_object *op, u16 opcode)
{
ACPI_FUNCTION_ENTRY();
- op->common.data_type = ACPI_DESC_TYPE_PARSER;
+ op->common.descriptor_type = ACPI_DESC_TYPE_PARSER;
op->common.aml_opcode = opcode;
ACPI_DISASM_ONLY_MEMBERS(ACPI_STRNCPY(op->common.aml_op_name,
@@ -135,6 +135,7 @@ union acpi_parse_object *acpi_ps_alloc_op(u16 opcode)
/* Allocate the minimum required size object */
if (flags == ACPI_PARSEOP_GENERIC) {
+
/* The generic op (default) is by far the most common (16 to 1) */
op = acpi_os_acquire_object(acpi_gbl_ps_node_cache);
@@ -171,7 +172,7 @@ union acpi_parse_object *acpi_ps_alloc_op(u16 opcode)
void acpi_ps_free_op(union acpi_parse_object *op)
{
- ACPI_FUNCTION_NAME("ps_free_op");
+ ACPI_FUNCTION_NAME(ps_free_op);
if (op->common.aml_opcode == AML_INT_RETURN_VALUE_OP) {
ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, "Free retval op: %p\n",
diff --git a/drivers/acpi/parser/pswalk.c b/drivers/acpi/parser/pswalk.c
index 06f05bfd7612..a84a547a0f1b 100644
--- a/drivers/acpi/parser/pswalk.c
+++ b/drivers/acpi/parser/pswalk.c
@@ -64,18 +64,21 @@ void acpi_ps_delete_parse_tree(union acpi_parse_object *subtree_root)
union acpi_parse_object *next = NULL;
union acpi_parse_object *parent = NULL;
- ACPI_FUNCTION_TRACE_PTR("ps_delete_parse_tree", subtree_root);
+ ACPI_FUNCTION_TRACE_PTR(ps_delete_parse_tree, subtree_root);
/* Visit all nodes in the subtree */
while (op) {
+
/* Check if we are not ascending */
if (op != parent) {
+
/* Look for an argument or child of the current op */
next = acpi_ps_get_arg(op, 0);
if (next) {
+
/* Still going downward in tree (Op is not completed yet) */
op = next;
diff --git a/drivers/acpi/parser/psxface.c b/drivers/acpi/parser/psxface.c
index 2dd48cbb7c02..5d996c1140af 100644
--- a/drivers/acpi/parser/psxface.c
+++ b/drivers/acpi/parser/psxface.c
@@ -50,14 +50,14 @@
ACPI_MODULE_NAME("psxface")
/* Local Prototypes */
-static void acpi_ps_start_trace(struct acpi_parameter_info *info);
+static void acpi_ps_start_trace(struct acpi_evaluate_info *info);
-static void acpi_ps_stop_trace(struct acpi_parameter_info *info);
+static void acpi_ps_stop_trace(struct acpi_evaluate_info *info);
-static acpi_status acpi_ps_execute_pass(struct acpi_parameter_info *info);
+static acpi_status acpi_ps_execute_pass(struct acpi_evaluate_info *info);
static void
-acpi_ps_update_parameter_list(struct acpi_parameter_info *info, u16 action);
+acpi_ps_update_parameter_list(struct acpi_evaluate_info *info, u16 action);
/*******************************************************************************
*
@@ -113,7 +113,7 @@ acpi_debug_trace(char *name, u32 debug_level, u32 debug_layer, u32 flags)
*
******************************************************************************/
-static void acpi_ps_start_trace(struct acpi_parameter_info *info)
+static void acpi_ps_start_trace(struct acpi_evaluate_info *info)
{
acpi_status status;
@@ -125,7 +125,7 @@ static void acpi_ps_start_trace(struct acpi_parameter_info *info)
}
if ((!acpi_gbl_trace_method_name) ||
- (acpi_gbl_trace_method_name != info->node->name.integer)) {
+ (acpi_gbl_trace_method_name != info->resolved_node->name.integer)) {
goto exit;
}
@@ -158,7 +158,7 @@ static void acpi_ps_start_trace(struct acpi_parameter_info *info)
*
******************************************************************************/
-static void acpi_ps_stop_trace(struct acpi_parameter_info *info)
+static void acpi_ps_stop_trace(struct acpi_evaluate_info *info)
{
acpi_status status;
@@ -170,7 +170,7 @@ static void acpi_ps_stop_trace(struct acpi_parameter_info *info)
}
if ((!acpi_gbl_trace_method_name) ||
- (acpi_gbl_trace_method_name != info->node->name.integer)) {
+ (acpi_gbl_trace_method_name != info->resolved_node->name.integer)) {
goto exit;
}
@@ -212,22 +212,23 @@ static void acpi_ps_stop_trace(struct acpi_parameter_info *info)
*
******************************************************************************/
-acpi_status acpi_ps_execute_method(struct acpi_parameter_info *info)
+acpi_status acpi_ps_execute_method(struct acpi_evaluate_info *info)
{
acpi_status status;
- ACPI_FUNCTION_TRACE("ps_execute_method");
+ ACPI_FUNCTION_TRACE(ps_execute_method);
/* Validate the Info and method Node */
- if (!info || !info->node) {
+ if (!info || !info->resolved_node) {
return_ACPI_STATUS(AE_NULL_ENTRY);
}
/* Init for new method, wait on concurrency semaphore */
status =
- acpi_ds_begin_method_execution(info->node, info->obj_desc, NULL);
+ acpi_ds_begin_method_execution(info->resolved_node, info->obj_desc,
+ NULL);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
@@ -248,7 +249,7 @@ acpi_status acpi_ps_execute_method(struct acpi_parameter_info *info)
*/
ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
"**** Begin Method Parse **** Entry=%p obj=%p\n",
- info->node, info->obj_desc));
+ info->resolved_node, info->obj_desc));
info->pass_number = 1;
status = acpi_ps_execute_pass(info);
@@ -261,7 +262,7 @@ acpi_status acpi_ps_execute_method(struct acpi_parameter_info *info)
*/
ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
"**** Begin Method Execution **** Entry=%p obj=%p\n",
- info->node, info->obj_desc));
+ info->resolved_node, info->obj_desc));
info->pass_number = 3;
status = acpi_ps_execute_pass(info);
@@ -286,8 +287,7 @@ acpi_status acpi_ps_execute_method(struct acpi_parameter_info *info)
* a control exception code
*/
if (info->return_object) {
- ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
- "Method returned obj_desc=%p\n",
+ ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "Method returned ObjDesc=%p\n",
info->return_object));
ACPI_DUMP_STACK_ENTRY(info->return_object);
@@ -301,7 +301,7 @@ acpi_status acpi_ps_execute_method(struct acpi_parameter_info *info)
*
* FUNCTION: acpi_ps_update_parameter_list
*
- * PARAMETERS: Info - See struct acpi_parameter_info
+ * PARAMETERS: Info - See struct acpi_evaluate_info
* (Used: parameter_type and Parameters)
* Action - Add or Remove reference
*
@@ -312,14 +312,16 @@ acpi_status acpi_ps_execute_method(struct acpi_parameter_info *info)
******************************************************************************/
static void
-acpi_ps_update_parameter_list(struct acpi_parameter_info *info, u16 action)
+acpi_ps_update_parameter_list(struct acpi_evaluate_info *info, u16 action)
{
acpi_native_uint i;
if ((info->parameter_type == ACPI_PARAM_ARGS) && (info->parameters)) {
+
/* Update reference count for each parameter */
for (i = 0; info->parameters[i]; i++) {
+
/* Ignore errors, just do them all */
(void)acpi_ut_update_object_reference(info->
@@ -333,7 +335,7 @@ acpi_ps_update_parameter_list(struct acpi_parameter_info *info, u16 action)
*
* FUNCTION: acpi_ps_execute_pass
*
- * PARAMETERS: Info - See struct acpi_parameter_info
+ * PARAMETERS: Info - See struct acpi_evaluate_info
* (Used: pass_number, Node, and obj_desc)
*
* RETURN: Status
@@ -342,13 +344,13 @@ acpi_ps_update_parameter_list(struct acpi_parameter_info *info, u16 action)
*
******************************************************************************/
-static acpi_status acpi_ps_execute_pass(struct acpi_parameter_info *info)
+static acpi_status acpi_ps_execute_pass(struct acpi_evaluate_info *info)
{
acpi_status status;
union acpi_parse_object *op;
struct acpi_walk_state *walk_state;
- ACPI_FUNCTION_TRACE("ps_execute_pass");
+ ACPI_FUNCTION_TRACE(ps_execute_pass);
/* Create and init a Root Node */
@@ -367,7 +369,7 @@ static acpi_status acpi_ps_execute_pass(struct acpi_parameter_info *info)
goto cleanup;
}
- status = acpi_ds_init_aml_walk(walk_state, op, info->node,
+ status = acpi_ds_init_aml_walk(walk_state, op, info->resolved_node,
info->obj_desc->method.aml_start,
info->obj_desc->method.aml_length,
info->pass_number == 1 ? NULL : info,
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index 8920e8c6e246..228bdb626502 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -38,6 +38,7 @@
#include <linux/spinlock.h>
#include <linux/pm.h>
#include <linux/pci.h>
+#include <linux/mutex.h>
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
@@ -91,7 +92,7 @@ static struct {
int count;
struct list_head entries;
} acpi_link;
-DECLARE_MUTEX(acpi_link_lock);
+DEFINE_MUTEX(acpi_link_lock);
/* --------------------------------------------------------------------------
PCI Link Device Management
@@ -641,19 +642,19 @@ acpi_pci_link_allocate_irq(acpi_handle handle,
return_VALUE(-1);
}
- down(&acpi_link_lock);
+ mutex_lock(&acpi_link_lock);
if (acpi_pci_link_allocate(link)) {
- up(&acpi_link_lock);
+ mutex_unlock(&acpi_link_lock);
return_VALUE(-1);
}
if (!link->irq.active) {
- up(&acpi_link_lock);
+ mutex_unlock(&acpi_link_lock);
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Link active IRQ is 0!\n"));
return_VALUE(-1);
}
link->refcnt++;
- up(&acpi_link_lock);
+ mutex_unlock(&acpi_link_lock);
if (triggering)
*triggering = link->irq.triggering;
@@ -691,9 +692,9 @@ int acpi_pci_link_free_irq(acpi_handle handle)
return_VALUE(-1);
}
- down(&acpi_link_lock);
+ mutex_lock(&acpi_link_lock);
if (!link->irq.initialized) {
- up(&acpi_link_lock);
+ mutex_unlock(&acpi_link_lock);
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Link isn't initialized\n"));
return_VALUE(-1);
}
@@ -716,7 +717,7 @@ int acpi_pci_link_free_irq(acpi_handle handle)
if (link->refcnt == 0) {
acpi_ut_evaluate_object(link->handle, "_DIS", 0, NULL);
}
- up(&acpi_link_lock);
+ mutex_unlock(&acpi_link_lock);
return_VALUE(link->irq.active);
}
@@ -747,7 +748,7 @@ static int acpi_pci_link_add(struct acpi_device *device)
strcpy(acpi_device_class(device), ACPI_PCI_LINK_CLASS);
acpi_driver_data(device) = link;
- down(&acpi_link_lock);
+ mutex_lock(&acpi_link_lock);
result = acpi_pci_link_get_possible(link);
if (result)
goto end;
@@ -782,7 +783,7 @@ static int acpi_pci_link_add(struct acpi_device *device)
end:
/* disable all links -- to be activated on use */
acpi_ut_evaluate_object(link->handle, "_DIS", 0, NULL);
- up(&acpi_link_lock);
+ mutex_unlock(&acpi_link_lock);
if (result)
kfree(link);
@@ -840,9 +841,9 @@ static int acpi_pci_link_remove(struct acpi_device *device, int type)
link = (struct acpi_pci_link *)acpi_driver_data(device);
- down(&acpi_link_lock);
+ mutex_lock(&acpi_link_lock);
list_del(&link->node);
- up(&acpi_link_lock);
+ mutex_unlock(&acpi_link_lock);
kfree(link);
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index 713b763884a9..decaebb4cbe9 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -110,7 +110,7 @@ static struct file_operations acpi_processor_info_fops = {
};
struct acpi_processor *processors[NR_CPUS];
-struct acpi_processor_errata errata;
+struct acpi_processor_errata errata __read_mostly;
/* --------------------------------------------------------------------------
Errata Handling
@@ -388,7 +388,7 @@ static int acpi_processor_remove_fs(struct acpi_device *device)
/* Use the acpiid in MADT to map cpus in case of SMP */
#ifndef CONFIG_SMP
-#define convert_acpiid_to_cpu(acpi_id) (0xff)
+#define convert_acpiid_to_cpu(acpi_id) (-1)
#else
#ifdef CONFIG_IA64
@@ -401,7 +401,7 @@ static int acpi_processor_remove_fs(struct acpi_device *device)
#define ARCH_BAD_APICID (0xff)
#endif
-static u8 convert_acpiid_to_cpu(u8 acpi_id)
+static int convert_acpiid_to_cpu(u8 acpi_id)
{
u16 apic_id;
int i;
@@ -427,7 +427,7 @@ static int acpi_processor_get_info(struct acpi_processor *pr)
acpi_status status = 0;
union acpi_object object = { 0 };
struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
- u8 cpu_index;
+ int cpu_index;
static int cpu0_initialized;
ACPI_FUNCTION_TRACE("acpi_processor_get_info");
@@ -473,7 +473,7 @@ static int acpi_processor_get_info(struct acpi_processor *pr)
cpu_index = convert_acpiid_to_cpu(pr->acpi_id);
/* Handle UP system running SMP kernel, with no LAPIC in MADT */
- if (!cpu0_initialized && (cpu_index == 0xff) &&
+ if (!cpu0_initialized && (cpu_index == -1) &&
(num_online_cpus() == 1)) {
cpu_index = 0;
}
@@ -487,7 +487,7 @@ static int acpi_processor_get_info(struct acpi_processor *pr)
* less than the max # of CPUs. They should be ignored _iff
* they are physically not present.
*/
- if (cpu_index >= NR_CPUS) {
+ if (cpu_index == -1) {
if (ACPI_FAILURE
(acpi_processor_hotadd_init(pr->handle, &pr->id))) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
@@ -558,8 +558,8 @@ static int acpi_processor_start(struct acpi_device *device)
*/
if (processor_device_array[pr->id] != NULL &&
processor_device_array[pr->id] != (void *)device) {
- ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "BIOS reporting wrong ACPI id"
- "for the processor\n"));
+ printk(KERN_WARNING "BIOS reported wrong ACPI id"
+ "for the processor\n");
return_VALUE(-ENODEV);
}
processor_device_array[pr->id] = (void *)device;
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 80fa43471f48..3b97a5eae9e8 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -54,10 +54,10 @@ ACPI_MODULE_NAME("acpi_processor")
#define US_TO_PM_TIMER_TICKS(t) ((t * (PM_TIMER_FREQUENCY/1000)) / 1000)
#define C2_OVERHEAD 4 /* 1us (3.579 ticks per us) */
#define C3_OVERHEAD 4 /* 1us (3.579 ticks per us) */
-static void (*pm_idle_save) (void);
+static void (*pm_idle_save) (void) __read_mostly;
module_param(max_cstate, uint, 0644);
-static unsigned int nocst = 0;
+static unsigned int nocst __read_mostly;
module_param(nocst, uint, 0000);
/*
@@ -67,7 +67,7 @@ module_param(nocst, uint, 0000);
* 100 HZ: 0x0000000F: 4 jiffies = 40ms
* reduce history for more aggressive entry into C3
*/
-static unsigned int bm_history =
+static unsigned int bm_history __read_mostly =
(HZ >= 800 ? 0xFFFFFFFF : ((1U << (HZ / 25)) - 1));
module_param(bm_history, uint, 0644);
/* --------------------------------------------------------------------------
@@ -1081,7 +1081,7 @@ int acpi_processor_power_init(struct acpi_processor *pr,
struct acpi_device *device)
{
acpi_status status = 0;
- static int first_run = 0;
+ static int first_run;
struct proc_dir_entry *entry = NULL;
unsigned int i;
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index f36db22ce1ae..41aaaba74b19 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -34,6 +34,7 @@
#ifdef CONFIG_X86_ACPI_CPUFREQ_PROC_INTF
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
+#include <linux/mutex.h>
#include <asm/uaccess.h>
#endif
@@ -48,7 +49,7 @@
#define _COMPONENT ACPI_PROCESSOR_COMPONENT
ACPI_MODULE_NAME("acpi_processor")
-static DECLARE_MUTEX(performance_sem);
+static DEFINE_MUTEX(performance_mutex);
/*
* _PPC support is implemented as a CPUfreq policy notifier:
@@ -72,7 +73,7 @@ static int acpi_processor_ppc_notifier(struct notifier_block *nb,
struct acpi_processor *pr;
unsigned int ppc = 0;
- down(&performance_sem);
+ mutex_lock(&performance_mutex);
if (event != CPUFREQ_INCOMPATIBLE)
goto out;
@@ -93,7 +94,7 @@ static int acpi_processor_ppc_notifier(struct notifier_block *nb,
core_frequency * 1000);
out:
- up(&performance_sem);
+ mutex_unlock(&performance_mutex);
return 0;
}
@@ -553,6 +554,230 @@ static void acpi_cpufreq_remove_file(struct acpi_processor *pr)
}
#endif /* CONFIG_X86_ACPI_CPUFREQ_PROC_INTF */
+static int acpi_processor_get_psd(struct acpi_processor *pr)
+{
+ int result = 0;
+ acpi_status status = AE_OK;
+ struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
+ struct acpi_buffer format = {sizeof("NNNNN"), "NNNNN"};
+ struct acpi_buffer state = {0, NULL};
+ union acpi_object *psd = NULL;
+ struct acpi_psd_package *pdomain;
+
+ status = acpi_evaluate_object(pr->handle, "_PSD", NULL, &buffer);
+ if (ACPI_FAILURE(status)) {
+ return -ENODEV;
+ }
+
+ psd = (union acpi_object *) buffer.pointer;
+ if (!psd || (psd->type != ACPI_TYPE_PACKAGE)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _PSD data\n"));
+ result = -EFAULT;
+ goto end;
+ }
+
+ if (psd->package.count != 1) {
+ ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _PSD data\n"));
+ result = -EFAULT;
+ goto end;
+ }
+
+ pdomain = &(pr->performance->domain_info);
+
+ state.length = sizeof(struct acpi_psd_package);
+ state.pointer = pdomain;
+
+ status = acpi_extract_package(&(psd->package.elements[0]),
+ &format, &state);
+ if (ACPI_FAILURE(status)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _PSD data\n"));
+ result = -EFAULT;
+ goto end;
+ }
+
+ if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) {
+ ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Unknown _PSD:num_entries\n"));
+ result = -EFAULT;
+ goto end;
+ }
+
+ if (pdomain->revision != ACPI_PSD_REV0_REVISION) {
+ ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Unknown _PSD:revision\n"));
+ result = -EFAULT;
+ goto end;
+ }
+
+end:
+ acpi_os_free(buffer.pointer);
+ return result;
+}
+
+int acpi_processor_preregister_performance(
+ struct acpi_processor_performance **performance)
+{
+ int count, count_target;
+ int retval = 0;
+ unsigned int i, j;
+ cpumask_t covered_cpus;
+ struct acpi_processor *pr;
+ struct acpi_psd_package *pdomain;
+ struct acpi_processor *match_pr;
+ struct acpi_psd_package *match_pdomain;
+
+ mutex_lock(&performance_mutex);
+
+ retval = 0;
+
+ /* Call _PSD for all CPUs */
+ for_each_possible_cpu(i) {
+ pr = processors[i];
+ if (!pr) {
+ /* Look only at processors in ACPI namespace */
+ continue;
+ }
+
+ if (pr->performance) {
+ retval = -EBUSY;
+ continue;
+ }
+
+ if (!performance || !performance[i]) {
+ retval = -EINVAL;
+ continue;
+ }
+
+ pr->performance = performance[i];
+ cpu_set(i, pr->performance->shared_cpu_map);
+ if (acpi_processor_get_psd(pr)) {
+ retval = -EINVAL;
+ continue;
+ }
+ }
+ if (retval)
+ goto err_ret;
+
+ /*
+ * Now that we have _PSD data from all CPUs, lets setup P-state
+ * domain info.
+ */
+ for_each_possible_cpu(i) {
+ pr = processors[i];
+ if (!pr)
+ continue;
+
+ /* Basic validity check for domain info */
+ pdomain = &(pr->performance->domain_info);
+ if ((pdomain->revision != ACPI_PSD_REV0_REVISION) ||
+ (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES)) {
+ retval = -EINVAL;
+ goto err_ret;
+ }
+ if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
+ pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
+ pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
+ retval = -EINVAL;
+ goto err_ret;
+ }
+ }
+
+ cpus_clear(covered_cpus);
+ for_each_possible_cpu(i) {
+ pr = processors[i];
+ if (!pr)
+ continue;
+
+ if (cpu_isset(i, covered_cpus))
+ continue;
+
+ pdomain = &(pr->performance->domain_info);
+ cpu_set(i, pr->performance->shared_cpu_map);
+ cpu_set(i, covered_cpus);
+ if (pdomain->num_processors <= 1)
+ continue;
+
+ /* Validate the Domain info */
+ count_target = pdomain->num_processors;
+ count = 1;
+ if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL ||
+ pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL) {
+ pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL;
+ } else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY) {
+ pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ANY;
+ }
+
+ for_each_possible_cpu(j) {
+ if (i == j)
+ continue;
+
+ match_pr = processors[j];
+ if (!match_pr)
+ continue;
+
+ match_pdomain = &(match_pr->performance->domain_info);
+ if (match_pdomain->domain != pdomain->domain)
+ continue;
+
+ /* Here i and j are in the same domain */
+
+ if (match_pdomain->num_processors != count_target) {
+ retval = -EINVAL;
+ goto err_ret;
+ }
+
+ if (pdomain->coord_type != match_pdomain->coord_type) {
+ retval = -EINVAL;
+ goto err_ret;
+ }
+
+ cpu_set(j, covered_cpus);
+ cpu_set(j, pr->performance->shared_cpu_map);
+ count++;
+ }
+
+ for_each_possible_cpu(j) {
+ if (i == j)
+ continue;
+
+ match_pr = processors[j];
+ if (!match_pr)
+ continue;
+
+ match_pdomain = &(match_pr->performance->domain_info);
+ if (match_pdomain->domain != pdomain->domain)
+ continue;
+
+ match_pr->performance->shared_type =
+ pr->performance->shared_type;
+ match_pr->performance->shared_cpu_map =
+ pr->performance->shared_cpu_map;
+ }
+ }
+
+err_ret:
+ if (retval) {
+ ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Error while parsing _PSD domain information. Assuming no coordination\n"));
+ }
+
+ for_each_possible_cpu(i) {
+ pr = processors[i];
+ if (!pr || !pr->performance)
+ continue;
+
+ /* Assume no coordination on any error parsing domain info */
+ if (retval) {
+ cpus_clear(pr->performance->shared_cpu_map);
+ cpu_set(i, pr->performance->shared_cpu_map);
+ pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL;
+ }
+ pr->performance = NULL; /* Will be set for real in register */
+ }
+
+ mutex_unlock(&performance_mutex);
+ return retval;
+}
+EXPORT_SYMBOL(acpi_processor_preregister_performance);
+
+
int
acpi_processor_register_performance(struct acpi_processor_performance
*performance, unsigned int cpu)
@@ -564,16 +789,16 @@ acpi_processor_register_performance(struct acpi_processor_performance
if (!(acpi_processor_ppc_status & PPC_REGISTERED))
return_VALUE(-EINVAL);
- down(&performance_sem);
+ mutex_lock(&performance_mutex);
pr = processors[cpu];
if (!pr) {
- up(&performance_sem);
+ mutex_unlock(&performance_mutex);
return_VALUE(-ENODEV);
}
if (pr->performance) {
- up(&performance_sem);
+ mutex_unlock(&performance_mutex);
return_VALUE(-EBUSY);
}
@@ -583,13 +808,13 @@ acpi_processor_register_performance(struct acpi_processor_performance
if (acpi_processor_get_performance_info(pr)) {
pr->performance = NULL;
- up(&performance_sem);
+ mutex_unlock(&performance_mutex);
return_VALUE(-EIO);
}
acpi_cpufreq_add_file(pr);
- up(&performance_sem);
+ mutex_unlock(&performance_mutex);
return_VALUE(0);
}
@@ -603,11 +828,11 @@ acpi_processor_unregister_performance(struct acpi_processor_performance
ACPI_FUNCTION_TRACE("acpi_processor_unregister_performance");
- down(&performance_sem);
+ mutex_lock(&performance_mutex);
pr = processors[cpu];
if (!pr) {
- up(&performance_sem);
+ mutex_unlock(&performance_mutex);
return_VOID;
}
@@ -617,7 +842,7 @@ acpi_processor_unregister_performance(struct acpi_processor_performance
acpi_cpufreq_remove_file(pr);
- up(&performance_sem);
+ mutex_unlock(&performance_mutex);
return_VOID;
}
diff --git a/drivers/acpi/resources/rscalc.c b/drivers/acpi/resources/rscalc.c
index 4038dbfa63a0..cf87b0230026 100644
--- a/drivers/acpi/resources/rscalc.c
+++ b/drivers/acpi/resources/rscalc.c
@@ -78,6 +78,7 @@ static u8 acpi_rs_count_set_bits(u16 bit_field)
ACPI_FUNCTION_ENTRY();
for (bits_set = 0; bit_field; bits_set++) {
+
/* Zero the least significant bit that is set */
bit_field &= (bit_field - 1);
@@ -154,15 +155,18 @@ acpi_rs_stream_option_length(u32 resource_length,
* length, minus one byte for the resource_source_index itself.
*/
if (resource_length > minimum_aml_resource_length) {
+
/* Compute the length of the optional string */
string_length =
resource_length - minimum_aml_resource_length - 1;
}
- /* Round up length to 32 bits for internal structure alignment */
-
- return (ACPI_ROUND_UP_to_32_bITS(string_length));
+ /*
+ * Round the length up to a multiple of the native word in order to
+ * guarantee that the entire resource descriptor is native word aligned
+ */
+ return ((u32) ACPI_ROUND_UP_TO_NATIVE_WORD(string_length));
}
/*******************************************************************************
@@ -186,11 +190,12 @@ acpi_rs_get_aml_length(struct acpi_resource * resource, acpi_size * size_needed)
acpi_size aml_size_needed = 0;
acpi_rs_length total_size;
- ACPI_FUNCTION_TRACE("rs_get_aml_length");
+ ACPI_FUNCTION_TRACE(rs_get_aml_length);
/* Traverse entire list of internal resource descriptors */
while (resource) {
+
/* Validate the descriptor type */
if (resource->type > ACPI_RESOURCE_TYPE_MAX) {
@@ -214,6 +219,7 @@ acpi_rs_get_aml_length(struct acpi_resource * resource, acpi_size * size_needed)
* is a Large Resource data type.
*/
if (resource->data.vendor.byte_length > 7) {
+
/* Base size of a Large resource descriptor */
total_size =
@@ -332,20 +338,22 @@ acpi_rs_get_list_length(u8 * aml_buffer,
acpi_status status;
u8 *end_aml;
u8 *buffer;
- u32 buffer_size = 0;
+ u32 buffer_size;
u16 temp16;
u16 resource_length;
u32 extra_struct_bytes;
u8 resource_index;
u8 minimum_aml_resource_length;
- ACPI_FUNCTION_TRACE("rs_get_list_length");
+ ACPI_FUNCTION_TRACE(rs_get_list_length);
+ *size_needed = 0;
end_aml = aml_buffer + aml_buffer_length;
/* Walk the list of AML resource descriptors */
while (aml_buffer < end_aml) {
+
/* Validate the Resource Type and Resource Length */
status = acpi_ut_validate_resource(aml_buffer, &resource_index);
@@ -386,35 +394,28 @@ acpi_rs_get_list_length(u8 * aml_buffer,
break;
case ACPI_RESOURCE_NAME_VENDOR_SMALL:
+ case ACPI_RESOURCE_NAME_VENDOR_LARGE:
/*
* Vendor Resource:
- * Ensure a 32-bit boundary for the structure
+ * Get the number of vendor data bytes
*/
- extra_struct_bytes =
- ACPI_ROUND_UP_to_32_bITS(resource_length);
+ extra_struct_bytes = resource_length;
break;
case ACPI_RESOURCE_NAME_END_TAG:
/*
- * End Tag: This is the normal exit, add size of end_tag
+ * End Tag:
+ * This is the normal exit, add size of end_tag
*/
- *size_needed = buffer_size + ACPI_RS_SIZE_MIN;
+ *size_needed += ACPI_RS_SIZE_MIN;
return_ACPI_STATUS(AE_OK);
- case ACPI_RESOURCE_NAME_VENDOR_LARGE:
- /*
- * Vendor Resource:
- * Add vendor data and ensure a 32-bit boundary for the structure
- */
- extra_struct_bytes =
- ACPI_ROUND_UP_to_32_bITS(resource_length);
- break;
-
case ACPI_RESOURCE_NAME_ADDRESS32:
case ACPI_RESOURCE_NAME_ADDRESS16:
+ case ACPI_RESOURCE_NAME_ADDRESS64:
/*
- * 32-Bit or 16-bit Address Resource:
- * Add the size of any optional data (resource_source)
+ * Address Resource:
+ * Add the size of the optional resource_source
*/
extra_struct_bytes =
acpi_rs_stream_option_length(resource_length,
@@ -423,50 +424,46 @@ acpi_rs_get_list_length(u8 * aml_buffer,
case ACPI_RESOURCE_NAME_EXTENDED_IRQ:
/*
- * Extended IRQ:
- * Point past the interrupt_vector_flags to get the
- * interrupt_table_length.
+ * Extended IRQ Resource:
+ * Using the interrupt_table_length, add 4 bytes for each additional
+ * interrupt. Note: at least one interrupt is required and is
+ * included in the minimum descriptor size (reason for the -1)
*/
- buffer++;
+ extra_struct_bytes = (buffer[1] - 1) * sizeof(u32);
- extra_struct_bytes =
- /*
- * Add 4 bytes for each additional interrupt. Note: at
- * least one interrupt is required and is included in
- * the minimum descriptor size
- */
- ((*buffer - 1) * sizeof(u32)) +
- /* Add the size of any optional data (resource_source) */
+ /* Add the size of the optional resource_source */
+
+ extra_struct_bytes +=
acpi_rs_stream_option_length(resource_length -
extra_struct_bytes,
minimum_aml_resource_length);
break;
- case ACPI_RESOURCE_NAME_ADDRESS64:
- /*
- * 64-Bit Address Resource:
- * Add the size of any optional data (resource_source)
- * Ensure a 64-bit boundary for the structure
- */
- extra_struct_bytes =
- ACPI_ROUND_UP_to_64_bITS
- (acpi_rs_stream_option_length
- (resource_length, minimum_aml_resource_length));
- break;
-
default:
break;
}
- /* Update the required buffer size for the internal descriptor structs */
+ /*
+ * Update the required buffer size for the internal descriptor structs
+ *
+ * Important: Round the size up for the appropriate alignment. This
+ * is a requirement on IA64.
+ */
+ buffer_size = acpi_gbl_resource_struct_sizes[resource_index] +
+ extra_struct_bytes;
+ buffer_size = (u32) ACPI_ROUND_UP_TO_NATIVE_WORD(buffer_size);
- temp16 = (u16) (acpi_gbl_resource_struct_sizes[resource_index] +
- extra_struct_bytes);
- buffer_size += (u32) ACPI_ROUND_UP_TO_NATIVE_WORD(temp16);
+ *size_needed += buffer_size;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES,
+ "Type %.2X, AmlLength %.2X InternalLength %.2X\n",
+ acpi_ut_get_resource_type(aml_buffer),
+ acpi_ut_get_descriptor_length(aml_buffer),
+ buffer_size));
/*
- * Point to the next resource within the stream
- * using the size of the header plus the length contained in the header
+ * Point to the next resource within the AML stream using the length
+ * contained in the resource descriptor header
*/
aml_buffer += acpi_ut_get_descriptor_length(aml_buffer);
}
@@ -506,7 +503,7 @@ acpi_rs_get_pci_routing_table_length(union acpi_operand_object *package_object,
u8 name_found;
u32 table_index;
- ACPI_FUNCTION_TRACE("rs_get_pci_routing_table_length");
+ ACPI_FUNCTION_TRACE(rs_get_pci_routing_table_length);
number_of_elements = package_object->package.count;
@@ -523,6 +520,7 @@ acpi_rs_get_pci_routing_table_length(union acpi_operand_object *package_object,
top_object_list = package_object->package.elements;
for (index = 0; index < number_of_elements; index++) {
+
/* Dereference the sub-package */
package_element = *top_object_list;
@@ -581,7 +579,7 @@ acpi_rs_get_pci_routing_table_length(union acpi_operand_object *package_object,
/* Round up the size since each element must be aligned */
- temp_size_needed = ACPI_ROUND_UP_to_64_bITS(temp_size_needed);
+ temp_size_needed = ACPI_ROUND_UP_TO_64BIT(temp_size_needed);
/* Point to the next union acpi_operand_object */
@@ -589,7 +587,7 @@ acpi_rs_get_pci_routing_table_length(union acpi_operand_object *package_object,
}
/*
- * Adding an extra element to the end of the list, essentially a
+ * Add an extra element to the end of the list, essentially a
* NULL terminator
*/
*buffer_size_needed =
diff --git a/drivers/acpi/resources/rscreate.c b/drivers/acpi/resources/rscreate.c
index 8c128dea3252..008058acdd39 100644
--- a/drivers/acpi/resources/rscreate.c
+++ b/drivers/acpi/resources/rscreate.c
@@ -75,10 +75,11 @@ acpi_rs_create_resource_list(union acpi_operand_object *aml_buffer,
u8 *aml_start;
acpi_size list_size_needed = 0;
u32 aml_buffer_length;
+ void *resource;
- ACPI_FUNCTION_TRACE("rs_create_resource_list");
+ ACPI_FUNCTION_TRACE(rs_create_resource_list);
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "aml_buffer = %p\n", aml_buffer));
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "AmlBuffer = %p\n", aml_buffer));
/* Params already validated, so we don't re-validate here */
@@ -92,7 +93,7 @@ acpi_rs_create_resource_list(union acpi_operand_object *aml_buffer,
status = acpi_rs_get_list_length(aml_start, aml_buffer_length,
&list_size_needed);
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Status=%X list_size_needed=%X\n",
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Status=%X ListSizeNeeded=%X\n",
status, (u32) list_size_needed));
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
@@ -107,13 +108,15 @@ acpi_rs_create_resource_list(union acpi_operand_object *aml_buffer,
/* Do the conversion */
- status = acpi_rs_convert_aml_to_resources(aml_start, aml_buffer_length,
- output_buffer->pointer);
+ resource = output_buffer->pointer;
+ status = acpi_ut_walk_aml_resources(aml_start, aml_buffer_length,
+ acpi_rs_convert_aml_to_resources,
+ &resource);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "output_buffer %p Length %X\n",
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "OutputBuffer %p Length %X\n",
output_buffer->pointer, (u32) output_buffer->length));
return_ACPI_STATUS(AE_OK);
}
@@ -155,7 +158,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
acpi_status status;
struct acpi_buffer path_buffer;
- ACPI_FUNCTION_TRACE("rs_create_pci_routing_table");
+ ACPI_FUNCTION_TRACE(rs_create_pci_routing_table);
/* Params already validated, so we don't re-validate here */
@@ -167,7 +170,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
return_ACPI_STATUS(status);
}
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "buffer_size_needed = %X\n",
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "BufferSizeNeeded = %X\n",
(u32) buffer_size_needed));
/* Validate/Allocate/Clear caller buffer */
@@ -332,7 +335,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
/* Now align the current length */
user_prt->length =
- (u32) ACPI_ROUND_UP_to_64_bITS(user_prt->length);
+ (u32) ACPI_ROUND_UP_TO_64BIT(user_prt->length);
/* 4) Fourth subobject: Dereference the PRT.source_index */
@@ -341,7 +344,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
user_prt->source_index = (u32) obj_desc->integer.value;
} else {
ACPI_ERROR((AE_INFO,
- "(PRT[%X].source_index) Need Integer, found %s",
+ "(PRT[%X].SourceIndex) Need Integer, found %s",
index,
acpi_ut_get_object_type_name(obj_desc)));
return_ACPI_STATUS(AE_BAD_DATA);
@@ -352,7 +355,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
top_object_list++;
}
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "output_buffer %p Length %X\n",
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "OutputBuffer %p Length %X\n",
output_buffer->pointer, (u32) output_buffer->length));
return_ACPI_STATUS(AE_OK);
}
@@ -382,9 +385,9 @@ acpi_rs_create_aml_resources(struct acpi_resource *linked_list_buffer,
acpi_status status;
acpi_size aml_size_needed = 0;
- ACPI_FUNCTION_TRACE("rs_create_aml_resources");
+ ACPI_FUNCTION_TRACE(rs_create_aml_resources);
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "linked_list_buffer = %p\n",
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "LinkedListBuffer = %p\n",
linked_list_buffer));
/*
@@ -395,7 +398,7 @@ acpi_rs_create_aml_resources(struct acpi_resource *linked_list_buffer,
*/
status = acpi_rs_get_aml_length(linked_list_buffer, &aml_size_needed);
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "aml_size_needed=%X, %s\n",
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "AmlSizeNeeded=%X, %s\n",
(u32) aml_size_needed,
acpi_format_exception(status)));
if (ACPI_FAILURE(status)) {
@@ -419,7 +422,7 @@ acpi_rs_create_aml_resources(struct acpi_resource *linked_list_buffer,
return_ACPI_STATUS(status);
}
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "output_buffer %p Length %X\n",
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "OutputBuffer %p Length %X\n",
output_buffer->pointer, (u32) output_buffer->length));
return_ACPI_STATUS(AE_OK);
}
diff --git a/drivers/acpi/resources/rsdump.c b/drivers/acpi/resources/rsdump.c
index e7de061cf883..9c99a723a860 100644
--- a/drivers/acpi/resources/rsdump.c
+++ b/drivers/acpi/resources/rsdump.c
@@ -91,11 +91,11 @@ acpi_rs_dump_descriptor(void *resource, struct acpi_rsdump_info *table);
struct acpi_rsdump_info acpi_rs_dump_irq[6] = {
{ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_irq), "IRQ", NULL},
{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(irq.triggering), "Triggering",
- acpi_gbl_HEdecode},
+ acpi_gbl_he_decode},
{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(irq.polarity), "Polarity",
- acpi_gbl_LLdecode},
+ acpi_gbl_ll_decode},
{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(irq.sharable), "Sharing",
- acpi_gbl_SHRdecode},
+ acpi_gbl_shr_decode},
{ACPI_RSD_UINT8, ACPI_RSD_OFFSET(irq.interrupt_count),
"Interrupt Count", NULL},
{ACPI_RSD_SHORTLIST, ACPI_RSD_OFFSET(irq.interrupts[0]),
@@ -105,11 +105,11 @@ struct acpi_rsdump_info acpi_rs_dump_irq[6] = {
struct acpi_rsdump_info acpi_rs_dump_dma[6] = {
{ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_dma), "DMA", NULL},
{ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(dma.type), "Speed",
- acpi_gbl_TYPdecode},
+ acpi_gbl_typ_decode},
{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(dma.bus_master), "Mastering",
- acpi_gbl_BMdecode},
+ acpi_gbl_bm_decode},
{ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(dma.transfer), "Transfer Type",
- acpi_gbl_SIZdecode},
+ acpi_gbl_siz_decode},
{ACPI_RSD_UINT8, ACPI_RSD_OFFSET(dma.channel_count), "Channel Count",
NULL},
{ACPI_RSD_SHORTLIST, ACPI_RSD_OFFSET(dma.channels[0]), "Channel List",
@@ -158,7 +158,7 @@ struct acpi_rsdump_info acpi_rs_dump_vendor[3] = {
};
struct acpi_rsdump_info acpi_rs_dump_end_tag[1] = {
- {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_end_tag), "end_tag",
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_end_tag), "EndTag",
NULL}
};
@@ -166,7 +166,7 @@ struct acpi_rsdump_info acpi_rs_dump_memory24[6] = {
{ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_memory24),
"24-Bit Memory Range", NULL},
{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(memory24.write_protect),
- "Write Protect", acpi_gbl_RWdecode},
+ "Write Protect", acpi_gbl_rw_decode},
{ACPI_RSD_UINT16, ACPI_RSD_OFFSET(memory24.minimum), "Address Minimum",
NULL},
{ACPI_RSD_UINT16, ACPI_RSD_OFFSET(memory24.maximum), "Address Maximum",
@@ -181,7 +181,7 @@ struct acpi_rsdump_info acpi_rs_dump_memory32[6] = {
{ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_memory32),
"32-Bit Memory Range", NULL},
{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(memory32.write_protect),
- "Write Protect", acpi_gbl_RWdecode},
+ "Write Protect", acpi_gbl_rw_decode},
{ACPI_RSD_UINT32, ACPI_RSD_OFFSET(memory32.minimum), "Address Minimum",
NULL},
{ACPI_RSD_UINT32, ACPI_RSD_OFFSET(memory32.maximum), "Address Maximum",
@@ -196,7 +196,7 @@ struct acpi_rsdump_info acpi_rs_dump_fixed_memory32[4] = {
{ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_fixed_memory32),
"32-Bit Fixed Memory Range", NULL},
{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(fixed_memory32.write_protect),
- "Write Protect", acpi_gbl_RWdecode},
+ "Write Protect", acpi_gbl_rw_decode},
{ACPI_RSD_UINT32, ACPI_RSD_OFFSET(fixed_memory32.address), "Address",
NULL},
{ACPI_RSD_UINT32, ACPI_RSD_OFFSET(fixed_memory32.address_length),
@@ -278,11 +278,11 @@ struct acpi_rsdump_info acpi_rs_dump_ext_irq[8] = {
{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(extended_irq.producer_consumer),
"Type", acpi_gbl_consume_decode},
{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(extended_irq.triggering),
- "Triggering", acpi_gbl_HEdecode},
+ "Triggering", acpi_gbl_he_decode},
{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(extended_irq.polarity), "Polarity",
- acpi_gbl_LLdecode},
+ acpi_gbl_ll_decode},
{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(extended_irq.sharable), "Sharing",
- acpi_gbl_SHRdecode},
+ acpi_gbl_shr_decode},
{ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(extended_irq.resource_source), NULL,
NULL},
{ACPI_RSD_UINT8, ACPI_RSD_OFFSET(extended_irq.interrupt_count),
@@ -314,7 +314,7 @@ static struct acpi_rsdump_info acpi_rs_dump_general_flags[5] = {
{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.producer_consumer),
"Consumer/Producer", acpi_gbl_consume_decode},
{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.decode), "Address Decode",
- acpi_gbl_DECdecode},
+ acpi_gbl_dec_decode},
{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.min_address_fixed),
"Min Relocatability", acpi_gbl_min_decode},
{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.max_address_fixed),
@@ -325,24 +325,24 @@ static struct acpi_rsdump_info acpi_rs_dump_memory_flags[5] = {
{ACPI_RSD_LITERAL, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_memory_flags),
"Resource Type", (void *)"Memory Range"},
{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.info.mem.write_protect),
- "Write Protect", acpi_gbl_RWdecode},
+ "Write Protect", acpi_gbl_rw_decode},
{ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(address.info.mem.caching),
- "Caching", acpi_gbl_MEMdecode},
+ "Caching", acpi_gbl_mem_decode},
{ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(address.info.mem.range_type),
- "Range Type", acpi_gbl_MTPdecode},
+ "Range Type", acpi_gbl_mtp_decode},
{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.info.mem.translation),
- "Translation", acpi_gbl_TTPdecode}
+ "Translation", acpi_gbl_ttp_decode}
};
static struct acpi_rsdump_info acpi_rs_dump_io_flags[4] = {
{ACPI_RSD_LITERAL, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_io_flags),
"Resource Type", (void *)"I/O Range"},
{ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(address.info.io.range_type),
- "Range Type", acpi_gbl_RNGdecode},
+ "Range Type", acpi_gbl_rng_decode},
{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.info.io.translation),
- "Translation", acpi_gbl_TTPdecode},
+ "Translation", acpi_gbl_ttp_decode},
{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.info.io.translation_type),
- "Translation Type", acpi_gbl_TRSdecode}
+ "Translation Type", acpi_gbl_trs_decode}
};
/*
diff --git a/drivers/acpi/resources/rsinfo.c b/drivers/acpi/resources/rsinfo.c
index d9ae64b77bd9..9e7ae2f8a1d3 100644
--- a/drivers/acpi/resources/rsinfo.c
+++ b/drivers/acpi/resources/rsinfo.c
@@ -141,6 +141,7 @@ struct acpi_rsdump_info *acpi_gbl_dump_resource_dispatch[] = {
acpi_rs_dump_generic_reg, /* ACPI_RESOURCE_TYPE_GENERIC_REGISTER */
};
#endif
+
#endif /* ACPI_FUTURE_USAGE */
/*
* Base sizes for external AML resource descriptors, indexed by internal type.
diff --git a/drivers/acpi/resources/rslist.c b/drivers/acpi/resources/rslist.c
index 1434e786477e..29423ce030ca 100644
--- a/drivers/acpi/resources/rslist.c
+++ b/drivers/acpi/resources/rslist.c
@@ -51,76 +51,62 @@ ACPI_MODULE_NAME("rslist")
*
* FUNCTION: acpi_rs_convert_aml_to_resources
*
- * PARAMETERS: Aml - Pointer to the resource byte stream
- * aml_length - Length of Aml
- * output_buffer - Pointer to the buffer that will
- * contain the output structures
+ * PARAMETERS: acpi_walk_aml_callback
+ * resource_ptr - Pointer to the buffer that will
+ * contain the output structures
*
* RETURN: Status
*
- * DESCRIPTION: Takes the resource byte stream and parses it, creating a
- * linked list of resources in the caller's output buffer
+ * DESCRIPTION: Convert an AML resource to an internal representation of the
+ * resource that is aligned and easier to access.
*
******************************************************************************/
acpi_status
-acpi_rs_convert_aml_to_resources(u8 * aml, u32 aml_length, u8 * output_buffer)
+acpi_rs_convert_aml_to_resources(u8 * aml,
+ u32 length,
+ u32 offset, u8 resource_index, void **context)
{
- struct acpi_resource *resource = (void *)output_buffer;
+ struct acpi_resource **resource_ptr =
+ ACPI_CAST_INDIRECT_PTR(struct acpi_resource, context);
+ struct acpi_resource *resource;
acpi_status status;
- u8 resource_index;
- u8 *end_aml;
-
- ACPI_FUNCTION_TRACE("rs_convert_aml_to_resources");
-
- end_aml = aml + aml_length;
-
- /* Loop until end-of-buffer or an end_tag is found */
-
- while (aml < end_aml) {
- /* Validate the Resource Type and Resource Length */
-
- status = acpi_ut_validate_resource(aml, &resource_index);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
- /* Convert the AML byte stream resource to a local resource struct */
-
- status =
- acpi_rs_convert_aml_to_resource(resource,
- ACPI_CAST_PTR(union
- aml_resource,
- aml),
- acpi_gbl_get_resource_dispatch
- [resource_index]);
- if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status,
- "Could not convert AML resource (Type %X)",
- *aml));
- return_ACPI_STATUS(status);
- }
+ ACPI_FUNCTION_TRACE(rs_convert_aml_to_resources);
- /* Normal exit on completion of an end_tag resource descriptor */
-
- if (acpi_ut_get_resource_type(aml) ==
- ACPI_RESOURCE_NAME_END_TAG) {
- return_ACPI_STATUS(AE_OK);
- }
-
- /* Point to the next input AML resource */
-
- aml += acpi_ut_get_descriptor_length(aml);
-
- /* Point to the next structure in the output buffer */
+ /*
+ * Check that the input buffer and all subsequent pointers into it
+ * are aligned on a native word boundary. Most important on IA64
+ */
+ resource = *resource_ptr;
+ if (ACPI_IS_MISALIGNED(resource)) {
+ ACPI_WARNING((AE_INFO,
+ "Misaligned resource pointer %p", resource));
+ }
- resource =
- ACPI_ADD_PTR(struct acpi_resource, resource,
- resource->length);
+ /* Convert the AML byte stream resource to a local resource struct */
+
+ status =
+ acpi_rs_convert_aml_to_resource(resource,
+ ACPI_CAST_PTR(union aml_resource,
+ aml),
+ acpi_gbl_get_resource_dispatch
+ [resource_index]);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Could not convert AML resource (Type %X)",
+ *aml));
+ return_ACPI_STATUS(status);
}
- /* Did not find an end_tag resource descriptor */
+ ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES,
+ "Type %.2X, AmlLength %.2X InternalLength %.2X\n",
+ acpi_ut_get_resource_type(aml), length,
+ resource->length));
- return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
+ /* Point to the next structure in the output buffer */
+
+ *resource_ptr = ACPI_ADD_PTR(void, resource, resource->length);
+ return_ACPI_STATUS(AE_OK);
}
/*******************************************************************************
@@ -150,11 +136,12 @@ acpi_rs_convert_resources_to_aml(struct acpi_resource *resource,
u8 *end_aml = output_buffer + aml_size_needed;
acpi_status status;
- ACPI_FUNCTION_TRACE("rs_convert_resources_to_aml");
+ ACPI_FUNCTION_TRACE(rs_convert_resources_to_aml);
/* Walk the resource descriptor list, convert each descriptor */
while (aml < end_aml) {
+
/* Validate the (internal) Resource Type */
if (resource->type > ACPI_RESOURCE_TYPE_MAX) {
@@ -191,6 +178,7 @@ acpi_rs_convert_resources_to_aml(struct acpi_resource *resource,
/* Check for end-of-list, normal exit */
if (resource->type == ACPI_RESOURCE_TYPE_END_TAG) {
+
/* An End Tag indicates the end of the input Resource Template */
return_ACPI_STATUS(AE_OK);
diff --git a/drivers/acpi/resources/rsmisc.c b/drivers/acpi/resources/rsmisc.c
index ed866cf1c6d2..faf6e106b785 100644
--- a/drivers/acpi/resources/rsmisc.c
+++ b/drivers/acpi/resources/rsmisc.c
@@ -81,9 +81,10 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
u16 item_count = 0;
u16 temp16 = 0;
- ACPI_FUNCTION_TRACE("rs_get_resource");
+ ACPI_FUNCTION_TRACE(rs_convert_aml_to_resource);
if (((acpi_native_uint) resource) & 0x3) {
+
/* Each internal resource struct is expected to be 32-bit aligned */
ACPI_WARNING((AE_INFO,
@@ -295,9 +296,11 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
exit:
if (!flags_mode) {
- /* Round the resource struct length up to the next 32-bit boundary */
- resource->length = ACPI_ROUND_UP_to_32_bITS(resource->length);
+ /* Round the resource struct length up to the next boundary (32 or 64) */
+
+ resource->length =
+ (u32) ACPI_ROUND_UP_TO_NATIVE_WORD(resource->length);
}
return_ACPI_STATUS(AE_OK);
}
@@ -329,7 +332,7 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
u16 temp16 = 0;
u16 item_count = 0;
- ACPI_FUNCTION_TRACE("rs_convert_resource_to_aml");
+ ACPI_FUNCTION_TRACE(rs_convert_resource_to_aml);
/*
* First table entry must be ACPI_RSC_INITxxx and must contain the
@@ -535,6 +538,7 @@ if (((aml->irq.flags & 0x09) == 0x00) || ((aml->irq.flags & 0x09) == 0x09)) {
resource->data.extended_irq.interrupt_count = temp8;
if (temp8 < 1) {
+
/* Must have at least one IRQ */
return_ACPI_STATUS(AE_AML_BAD_RESOURCE_LENGTH);
diff --git a/drivers/acpi/resources/rsutils.c b/drivers/acpi/resources/rsutils.c
index 25b5aedd6612..a9cbee8e8b44 100644
--- a/drivers/acpi/resources/rsutils.c
+++ b/drivers/acpi/resources/rsutils.c
@@ -205,6 +205,7 @@ acpi_rs_set_resource_length(acpi_rsdesc_size total_length,
/* Length is stored differently for large and small descriptors */
if (aml->small_header.descriptor_type & ACPI_RESOURCE_NAME_LARGE) {
+
/* Large descriptor -- bytes 1-2 contain the 16-bit length */
ACPI_MOVE_16_TO_16(&aml->large_header.resource_length,
@@ -298,7 +299,8 @@ static u16 acpi_rs_strcpy(char *destination, char *source)
* string_ptr - (optional) where to store the actual
* resource_source string
*
- * RETURN: Length of the string plus NULL terminator, rounded up to 32 bit
+ * RETURN: Length of the string plus NULL terminator, rounded up to native
+ * word boundary
*
* DESCRIPTION: Copy the optional resource_source data from a raw AML descriptor
* to an internal resource descriptor
@@ -328,6 +330,7 @@ acpi_rs_get_resource_source(acpi_rs_length resource_length,
* we add 1 to the minimum length.
*/
if (total_length > (acpi_rsdesc_size) (minimum_length + 1)) {
+
/* Get the resource_source_index */
resource_source->index = aml_resource_source[0];
@@ -344,23 +347,26 @@ acpi_rs_get_resource_source(acpi_rs_length resource_length,
}
/*
- * In order for the struct_size to fall on a 32-bit boundary, calculate
- * the length of the string (+1 for the NULL terminator) and expand the
- * struct_size to the next 32-bit boundary.
+ * In order for the Resource length to be a multiple of the native
+ * word, calculate the length of the string (+1 for NULL terminator)
+ * and expand to the next word multiple.
*
* Zero the entire area of the buffer.
*/
total_length =
- ACPI_ROUND_UP_to_32_bITS(ACPI_STRLEN
- ((char *)&aml_resource_source[1]) +
- 1);
+ (u32)
+ ACPI_STRLEN(ACPI_CAST_PTR(char, &aml_resource_source[1])) +
+ 1;
+ total_length = (u32) ACPI_ROUND_UP_TO_NATIVE_WORD(total_length);
+
ACPI_MEMSET(resource_source->string_ptr, 0, total_length);
/* Copy the resource_source string to the destination */
resource_source->string_length =
acpi_rs_strcpy(resource_source->string_ptr,
- (char *)&aml_resource_source[1]);
+ ACPI_CAST_PTR(char,
+ &aml_resource_source[1]));
return ((acpi_rs_length) total_length);
}
@@ -405,6 +411,7 @@ acpi_rs_set_resource_source(union aml_resource * aml,
/* Non-zero string length indicates presence of a resource_source */
if (resource_source->string_length) {
+
/* Point to the end of the AML descriptor */
aml_resource_source = ACPI_ADD_PTR(u8, aml, minimum_length);
@@ -415,7 +422,7 @@ acpi_rs_set_resource_source(union aml_resource * aml,
/* Copy the resource_source string */
- ACPI_STRCPY((char *)&aml_resource_source[1],
+ ACPI_STRCPY(ACPI_CAST_PTR(char, &aml_resource_source[1]),
resource_source->string_ptr);
/*
@@ -435,9 +442,9 @@ acpi_rs_set_resource_source(union aml_resource * aml,
*
* FUNCTION: acpi_rs_get_prt_method_data
*
- * PARAMETERS: Handle - a handle to the containing object
- * ret_buffer - a pointer to a buffer structure for the
- * results
+ * PARAMETERS: Node - Device node
+ * ret_buffer - Pointer to a buffer structure for the
+ * results
*
* RETURN: Status
*
@@ -450,18 +457,19 @@ acpi_rs_set_resource_source(union aml_resource * aml,
******************************************************************************/
acpi_status
-acpi_rs_get_prt_method_data(acpi_handle handle, struct acpi_buffer * ret_buffer)
+acpi_rs_get_prt_method_data(struct acpi_namespace_node * node,
+ struct acpi_buffer * ret_buffer)
{
union acpi_operand_object *obj_desc;
acpi_status status;
- ACPI_FUNCTION_TRACE("rs_get_prt_method_data");
+ ACPI_FUNCTION_TRACE(rs_get_prt_method_data);
/* Parameters guaranteed valid by caller */
/* Execute the method, no parameters */
- status = acpi_ut_evaluate_object(handle, METHOD_NAME__PRT,
+ status = acpi_ut_evaluate_object(node, METHOD_NAME__PRT,
ACPI_BTYPE_PACKAGE, &obj_desc);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
@@ -483,9 +491,9 @@ acpi_rs_get_prt_method_data(acpi_handle handle, struct acpi_buffer * ret_buffer)
*
* FUNCTION: acpi_rs_get_crs_method_data
*
- * PARAMETERS: Handle - a handle to the containing object
- * ret_buffer - a pointer to a buffer structure for the
- * results
+ * PARAMETERS: Node - Device node
+ * ret_buffer - Pointer to a buffer structure for the
+ * results
*
* RETURN: Status
*
@@ -498,18 +506,19 @@ acpi_rs_get_prt_method_data(acpi_handle handle, struct acpi_buffer * ret_buffer)
******************************************************************************/
acpi_status
-acpi_rs_get_crs_method_data(acpi_handle handle, struct acpi_buffer *ret_buffer)
+acpi_rs_get_crs_method_data(struct acpi_namespace_node *node,
+ struct acpi_buffer *ret_buffer)
{
union acpi_operand_object *obj_desc;
acpi_status status;
- ACPI_FUNCTION_TRACE("rs_get_crs_method_data");
+ ACPI_FUNCTION_TRACE(rs_get_crs_method_data);
/* Parameters guaranteed valid by caller */
/* Execute the method, no parameters */
- status = acpi_ut_evaluate_object(handle, METHOD_NAME__CRS,
+ status = acpi_ut_evaluate_object(node, METHOD_NAME__CRS,
ACPI_BTYPE_BUFFER, &obj_desc);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
@@ -522,7 +531,7 @@ acpi_rs_get_crs_method_data(acpi_handle handle, struct acpi_buffer *ret_buffer)
*/
status = acpi_rs_create_resource_list(obj_desc, ret_buffer);
- /* on exit, we must delete the object returned by evaluate_object */
+ /* On exit, we must delete the object returned by evaluate_object */
acpi_ut_remove_reference(obj_desc);
return_ACPI_STATUS(status);
@@ -532,9 +541,9 @@ acpi_rs_get_crs_method_data(acpi_handle handle, struct acpi_buffer *ret_buffer)
*
* FUNCTION: acpi_rs_get_prs_method_data
*
- * PARAMETERS: Handle - a handle to the containing object
- * ret_buffer - a pointer to a buffer structure for the
- * results
+ * PARAMETERS: Node - Device node
+ * ret_buffer - Pointer to a buffer structure for the
+ * results
*
* RETURN: Status
*
@@ -548,18 +557,19 @@ acpi_rs_get_crs_method_data(acpi_handle handle, struct acpi_buffer *ret_buffer)
#ifdef ACPI_FUTURE_USAGE
acpi_status
-acpi_rs_get_prs_method_data(acpi_handle handle, struct acpi_buffer *ret_buffer)
+acpi_rs_get_prs_method_data(struct acpi_namespace_node *node,
+ struct acpi_buffer *ret_buffer)
{
union acpi_operand_object *obj_desc;
acpi_status status;
- ACPI_FUNCTION_TRACE("rs_get_prs_method_data");
+ ACPI_FUNCTION_TRACE(rs_get_prs_method_data);
/* Parameters guaranteed valid by caller */
/* Execute the method, no parameters */
- status = acpi_ut_evaluate_object(handle, METHOD_NAME__PRS,
+ status = acpi_ut_evaluate_object(node, METHOD_NAME__PRS,
ACPI_BTYPE_BUFFER, &obj_desc);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
@@ -572,7 +582,7 @@ acpi_rs_get_prs_method_data(acpi_handle handle, struct acpi_buffer *ret_buffer)
*/
status = acpi_rs_create_resource_list(obj_desc, ret_buffer);
- /* on exit, we must delete the object returned by evaluate_object */
+ /* On exit, we must delete the object returned by evaluate_object */
acpi_ut_remove_reference(obj_desc);
return_ACPI_STATUS(status);
@@ -583,10 +593,10 @@ acpi_rs_get_prs_method_data(acpi_handle handle, struct acpi_buffer *ret_buffer)
*
* FUNCTION: acpi_rs_get_method_data
*
- * PARAMETERS: Handle - a handle to the containing object
+ * PARAMETERS: Handle - Handle to the containing object
* Path - Path to method, relative to Handle
- * ret_buffer - a pointer to a buffer structure for the
- * results
+ * ret_buffer - Pointer to a buffer structure for the
+ * results
*
* RETURN: Status
*
@@ -605,7 +615,7 @@ acpi_rs_get_method_data(acpi_handle handle,
union acpi_operand_object *obj_desc;
acpi_status status;
- ACPI_FUNCTION_TRACE("rs_get_method_data");
+ ACPI_FUNCTION_TRACE(rs_get_method_data);
/* Parameters guaranteed valid by caller */
@@ -634,9 +644,9 @@ acpi_rs_get_method_data(acpi_handle handle,
*
* FUNCTION: acpi_rs_set_srs_method_data
*
- * PARAMETERS: Handle - a handle to the containing object
- * in_buffer - a pointer to a buffer structure of the
- * parameter
+ * PARAMETERS: Node - Device node
+ * in_buffer - Pointer to a buffer structure of the
+ * parameter
*
* RETURN: Status
*
@@ -646,23 +656,37 @@ acpi_rs_get_method_data(acpi_handle handle,
* If the function fails an appropriate status will be returned
* and the contents of the callers buffer is undefined.
*
+ * Note: Parameters guaranteed valid by caller
+ *
******************************************************************************/
acpi_status
-acpi_rs_set_srs_method_data(acpi_handle handle, struct acpi_buffer *in_buffer)
+acpi_rs_set_srs_method_data(struct acpi_namespace_node *node,
+ struct acpi_buffer *in_buffer)
{
- struct acpi_parameter_info info;
- union acpi_operand_object *params[2];
+ struct acpi_evaluate_info *info;
+ union acpi_operand_object *args[2];
acpi_status status;
struct acpi_buffer buffer;
- ACPI_FUNCTION_TRACE("rs_set_srs_method_data");
+ ACPI_FUNCTION_TRACE(rs_set_srs_method_data);
- /* Parameters guaranteed valid by caller */
+ /* Allocate and initialize the evaluation information block */
+
+ info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
+ if (!info) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ info->prefix_node = node;
+ info->pathname = METHOD_NAME__SRS;
+ info->parameters = args;
+ info->parameter_type = ACPI_PARAM_ARGS;
+ info->flags = ACPI_IGNORE_RETURN_VALUE;
/*
* The in_buffer parameter will point to a linked list of
- * resource parameters. It needs to be formatted into a
+ * resource parameters. It needs to be formatted into a
* byte stream to be sent in as an input parameter to _SRS
*
* Convert the linked list into a byte stream
@@ -670,41 +694,36 @@ acpi_rs_set_srs_method_data(acpi_handle handle, struct acpi_buffer *in_buffer)
buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
status = acpi_rs_create_aml_resources(in_buffer->pointer, &buffer);
if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
+ goto cleanup;
}
- /* Init the param object */
+ /* Create and initialize the method parameter object */
- params[0] = acpi_ut_create_internal_object(ACPI_TYPE_BUFFER);
- if (!params[0]) {
- acpi_os_free(buffer.pointer);
- return_ACPI_STATUS(AE_NO_MEMORY);
+ args[0] = acpi_ut_create_internal_object(ACPI_TYPE_BUFFER);
+ if (!args[0]) {
+ /*
+ * Must free the buffer allocated above (otherwise it is freed
+ * later)
+ */
+ ACPI_FREE(buffer.pointer);
+ status = AE_NO_MEMORY;
+ goto cleanup;
}
- /* Set up the parameter object */
-
- params[0]->buffer.length = (u32) buffer.length;
- params[0]->buffer.pointer = buffer.pointer;
- params[0]->common.flags = AOPOBJ_DATA_VALID;
- params[1] = NULL;
-
- info.node = handle;
- info.parameters = params;
- info.parameter_type = ACPI_PARAM_ARGS;
+ args[0]->buffer.length = (u32) buffer.length;
+ args[0]->buffer.pointer = buffer.pointer;
+ args[0]->common.flags = AOPOBJ_DATA_VALID;
+ args[1] = NULL;
- /* Execute the method, no return value */
+ /* Execute the method, no return value is expected */
- status = acpi_ns_evaluate_relative(METHOD_NAME__SRS, &info);
- if (ACPI_SUCCESS(status)) {
- /* Delete any return object (especially if implicit_return is enabled) */
+ status = acpi_ns_evaluate(info);
- if (info.return_object) {
- acpi_ut_remove_reference(info.return_object);
- }
- }
+ /* Clean up and return the status from acpi_ns_evaluate */
- /* Clean up and return the status from acpi_ns_evaluate_relative */
+ acpi_ut_remove_reference(args[0]);
- acpi_ut_remove_reference(params[0]);
+ cleanup:
+ ACPI_FREE(info);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/resources/rsxface.c b/drivers/acpi/resources/rsxface.c
index 88b67077aeeb..1999e2ab7daa 100644
--- a/drivers/acpi/resources/rsxface.c
+++ b/drivers/acpi/resources/rsxface.c
@@ -41,10 +41,9 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/module.h>
-
#include <acpi/acpi.h>
#include <acpi/acresrc.h>
+#include <acpi/acnamesp.h>
#define _COMPONENT ACPI_RESOURCES
ACPI_MODULE_NAME("rsxface")
@@ -68,312 +67,262 @@ ACPI_MODULE_NAME("rsxface")
static acpi_status
acpi_rs_match_vendor_resource(struct acpi_resource *resource, void *context);
+static acpi_status
+acpi_rs_validate_parameters(acpi_handle device_handle,
+ struct acpi_buffer *buffer,
+ struct acpi_namespace_node **return_node);
+
/*******************************************************************************
*
- * FUNCTION: acpi_get_irq_routing_table
+ * FUNCTION: acpi_rs_validate_parameters
*
- * PARAMETERS: device_handle - a handle to the Bus device we are querying
- * ret_buffer - a pointer to a buffer to receive the
- * current resources for the device
+ * PARAMETERS: device_handle - Handle to a device
+ * Buffer - Pointer to a data buffer
+ * return_node - Pointer to where the device node is returned
*
* RETURN: Status
*
- * DESCRIPTION: This function is called to get the IRQ routing table for a
- * specific bus. The caller must first acquire a handle for the
- * desired bus. The routine table is placed in the buffer pointed
- * to by the ret_buffer variable parameter.
- *
- * If the function fails an appropriate status will be returned
- * and the value of ret_buffer is undefined.
- *
- * This function attempts to execute the _PRT method contained in
- * the object indicated by the passed device_handle.
+ * DESCRIPTION: Common parameter validation for resource interfaces
*
******************************************************************************/
-acpi_status
-acpi_get_irq_routing_table(acpi_handle device_handle,
- struct acpi_buffer *ret_buffer)
+static acpi_status
+acpi_rs_validate_parameters(acpi_handle device_handle,
+ struct acpi_buffer *buffer,
+ struct acpi_namespace_node **return_node)
{
acpi_status status;
+ struct acpi_namespace_node *node;
- ACPI_FUNCTION_TRACE("acpi_get_irq_routing_table ");
+ ACPI_FUNCTION_TRACE(rs_validate_parameters);
/*
- * Must have a valid handle and buffer, So we have to have a handle
- * and a return buffer structure, and if there is a non-zero buffer length
- * we also need a valid pointer in the buffer. If it's a zero buffer length,
- * we'll be returning the needed buffer size, so keep going.
+ * Must have a valid handle to an ACPI device
*/
if (!device_handle) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
- status = acpi_ut_validate_buffer(ret_buffer);
+ node = acpi_ns_map_handle_to_node(device_handle);
+ if (!node) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ if (node->type != ACPI_TYPE_DEVICE) {
+ return_ACPI_STATUS(AE_TYPE);
+ }
+
+ /*
+ * Validate the user buffer object
+ *
+ * if there is a non-zero buffer length we also need a valid pointer in
+ * the buffer. If it's a zero buffer length, we'll be returning the
+ * needed buffer size (later), so keep going.
+ */
+ status = acpi_ut_validate_buffer(buffer);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
- status = acpi_rs_get_prt_method_data(device_handle, ret_buffer);
- return_ACPI_STATUS(status);
+ *return_node = node;
+ return_ACPI_STATUS(AE_OK);
}
/*******************************************************************************
*
- * FUNCTION: acpi_get_current_resources
+ * FUNCTION: acpi_get_irq_routing_table
*
- * PARAMETERS: device_handle - a handle to the device object for the
- * device we are querying
- * ret_buffer - a pointer to a buffer to receive the
+ * PARAMETERS: device_handle - Handle to the Bus device we are querying
+ * ret_buffer - Pointer to a buffer to receive the
* current resources for the device
*
* RETURN: Status
*
- * DESCRIPTION: This function is called to get the current resources for a
- * specific device. The caller must first acquire a handle for
- * the desired device. The resource data is placed in the buffer
- * pointed to by the ret_buffer variable parameter.
+ * DESCRIPTION: This function is called to get the IRQ routing table for a
+ * specific bus. The caller must first acquire a handle for the
+ * desired bus. The routine table is placed in the buffer pointed
+ * to by the ret_buffer variable parameter.
*
* If the function fails an appropriate status will be returned
* and the value of ret_buffer is undefined.
*
- * This function attempts to execute the _CRS method contained in
+ * This function attempts to execute the _PRT method contained in
* the object indicated by the passed device_handle.
*
******************************************************************************/
acpi_status
-acpi_get_current_resources(acpi_handle device_handle,
+acpi_get_irq_routing_table(acpi_handle device_handle,
struct acpi_buffer *ret_buffer)
{
acpi_status status;
+ struct acpi_namespace_node *node;
- ACPI_FUNCTION_TRACE("acpi_get_current_resources");
+ ACPI_FUNCTION_TRACE(acpi_get_irq_routing_table);
- /*
- * Must have a valid handle and buffer, So we have to have a handle
- * and a return buffer structure, and if there is a non-zero buffer length
- * we also need a valid pointer in the buffer. If it's a zero buffer length,
- * we'll be returning the needed buffer size, so keep going.
- */
- if (!device_handle) {
- return_ACPI_STATUS(AE_BAD_PARAMETER);
- }
+ /* Validate parameters then dispatch to internal routine */
- status = acpi_ut_validate_buffer(ret_buffer);
+ status = acpi_rs_validate_parameters(device_handle, ret_buffer, &node);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
- status = acpi_rs_get_crs_method_data(device_handle, ret_buffer);
+ status = acpi_rs_get_prt_method_data(node, ret_buffer);
return_ACPI_STATUS(status);
}
-EXPORT_SYMBOL(acpi_get_current_resources);
+ACPI_EXPORT_SYMBOL(acpi_get_irq_routing_table)
/*******************************************************************************
*
- * FUNCTION: acpi_get_possible_resources
+ * FUNCTION: acpi_get_current_resources
*
- * PARAMETERS: device_handle - a handle to the device object for the
+ * PARAMETERS: device_handle - Handle to the device object for the
* device we are querying
- * ret_buffer - a pointer to a buffer to receive the
- * resources for the device
+ * ret_buffer - Pointer to a buffer to receive the
+ * current resources for the device
*
* RETURN: Status
*
- * DESCRIPTION: This function is called to get a list of the possible resources
- * for a specific device. The caller must first acquire a handle
- * for the desired device. The resource data is placed in the
- * buffer pointed to by the ret_buffer variable.
+ * DESCRIPTION: This function is called to get the current resources for a
+ * specific device. The caller must first acquire a handle for
+ * the desired device. The resource data is placed in the buffer
+ * pointed to by the ret_buffer variable parameter.
*
* If the function fails an appropriate status will be returned
* and the value of ret_buffer is undefined.
*
+ * This function attempts to execute the _CRS method contained in
+ * the object indicated by the passed device_handle.
+ *
******************************************************************************/
-
-#ifdef ACPI_FUTURE_USAGE
acpi_status
-acpi_get_possible_resources(acpi_handle device_handle,
- struct acpi_buffer *ret_buffer)
+acpi_get_current_resources(acpi_handle device_handle,
+ struct acpi_buffer *ret_buffer)
{
acpi_status status;
+ struct acpi_namespace_node *node;
- ACPI_FUNCTION_TRACE("acpi_get_possible_resources");
+ ACPI_FUNCTION_TRACE(acpi_get_current_resources);
- /*
- * Must have a valid handle and buffer, So we have to have a handle
- * and a return buffer structure, and if there is a non-zero buffer length
- * we also need a valid pointer in the buffer. If it's a zero buffer length,
- * we'll be returning the needed buffer size, so keep going.
- */
- if (!device_handle) {
- return_ACPI_STATUS(AE_BAD_PARAMETER);
- }
+ /* Validate parameters then dispatch to internal routine */
- status = acpi_ut_validate_buffer(ret_buffer);
+ status = acpi_rs_validate_parameters(device_handle, ret_buffer, &node);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
- status = acpi_rs_get_prs_method_data(device_handle, ret_buffer);
+ status = acpi_rs_get_crs_method_data(node, ret_buffer);
return_ACPI_STATUS(status);
}
-EXPORT_SYMBOL(acpi_get_possible_resources);
-#endif /* ACPI_FUTURE_USAGE */
+ACPI_EXPORT_SYMBOL(acpi_get_current_resources)
+#ifdef ACPI_FUTURE_USAGE
/*******************************************************************************
*
- * FUNCTION: acpi_walk_resources
+ * FUNCTION: acpi_get_possible_resources
*
* PARAMETERS: device_handle - Handle to the device object for the
* device we are querying
- * Name - Method name of the resources we want
- * (METHOD_NAME__CRS or METHOD_NAME__PRS)
- * user_function - Called for each resource
- * Context - Passed to user_function
+ * ret_buffer - Pointer to a buffer to receive the
+ * resources for the device
*
* RETURN: Status
*
- * DESCRIPTION: Retrieves the current or possible resource list for the
- * specified device. The user_function is called once for
- * each resource in the list.
+ * DESCRIPTION: This function is called to get a list of the possible resources
+ * for a specific device. The caller must first acquire a handle
+ * for the desired device. The resource data is placed in the
+ * buffer pointed to by the ret_buffer variable.
+ *
+ * If the function fails an appropriate status will be returned
+ * and the value of ret_buffer is undefined.
*
******************************************************************************/
-
acpi_status
-acpi_walk_resources(acpi_handle device_handle,
- char *name,
- ACPI_WALK_RESOURCE_CALLBACK user_function, void *context)
+acpi_get_possible_resources(acpi_handle device_handle,
+ struct acpi_buffer *ret_buffer)
{
acpi_status status;
- struct acpi_buffer buffer;
- struct acpi_resource *resource;
- struct acpi_resource *resource_end;
-
- ACPI_FUNCTION_TRACE("acpi_walk_resources");
-
- /* Parameter validation */
+ struct acpi_namespace_node *node;
- if (!device_handle || !user_function || !name ||
- (ACPI_STRNCMP(name, METHOD_NAME__CRS, sizeof(METHOD_NAME__CRS)) &&
- ACPI_STRNCMP(name, METHOD_NAME__PRS, sizeof(METHOD_NAME__PRS)))) {
- return_ACPI_STATUS(AE_BAD_PARAMETER);
- }
+ ACPI_FUNCTION_TRACE(acpi_get_possible_resources);
- /* Get the _CRS or _PRS resource list */
+ /* Validate parameters then dispatch to internal routine */
- buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
- status = acpi_rs_get_method_data(device_handle, name, &buffer);
+ status = acpi_rs_validate_parameters(device_handle, ret_buffer, &node);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
- /* Buffer now contains the resource list */
-
- resource = ACPI_CAST_PTR(struct acpi_resource, buffer.pointer);
- resource_end =
- ACPI_ADD_PTR(struct acpi_resource, buffer.pointer, buffer.length);
-
- /* Walk the resource list until the end_tag is found (or buffer end) */
-
- while (resource < resource_end) {
- /* Sanity check the resource */
-
- if (resource->type > ACPI_RESOURCE_TYPE_MAX) {
- status = AE_AML_INVALID_RESOURCE_TYPE;
- break;
- }
-
- /* Invoke the user function, abort on any error returned */
-
- status = user_function(resource, context);
- if (ACPI_FAILURE(status)) {
- if (status == AE_CTRL_TERMINATE) {
- /* This is an OK termination by the user function */
-
- status = AE_OK;
- }
- break;
- }
-
- /* end_tag indicates end-of-list */
-
- if (resource->type == ACPI_RESOURCE_TYPE_END_TAG) {
- break;
- }
-
- /* Get the next resource descriptor */
-
- resource =
- ACPI_ADD_PTR(struct acpi_resource, resource,
- resource->length);
- }
-
- ACPI_MEM_FREE(buffer.pointer);
+ status = acpi_rs_get_prs_method_data(node, ret_buffer);
return_ACPI_STATUS(status);
}
-EXPORT_SYMBOL(acpi_walk_resources);
+ACPI_EXPORT_SYMBOL(acpi_get_possible_resources)
+#endif /* ACPI_FUTURE_USAGE */
/*******************************************************************************
*
* FUNCTION: acpi_set_current_resources
*
- * PARAMETERS: device_handle - a handle to the device object for the
- * device we are changing the resources of
- * in_buffer - a pointer to a buffer containing the
+ * PARAMETERS: device_handle - Handle to the device object for the
+ * device we are setting resources
+ * in_buffer - Pointer to a buffer containing the
* resources to be set for the device
*
* RETURN: Status
*
* DESCRIPTION: This function is called to set the current resources for a
- * specific device. The caller must first acquire a handle for
- * the desired device. The resource data is passed to the routine
+ * specific device. The caller must first acquire a handle for
+ * the desired device. The resource data is passed to the routine
* the buffer pointed to by the in_buffer variable.
*
******************************************************************************/
-
acpi_status
acpi_set_current_resources(acpi_handle device_handle,
struct acpi_buffer *in_buffer)
{
acpi_status status;
+ struct acpi_namespace_node *node;
- ACPI_FUNCTION_TRACE("acpi_set_current_resources");
+ ACPI_FUNCTION_TRACE(acpi_set_current_resources);
- /* Must have a valid handle and buffer */
+ /* Validate the buffer, don't allow zero length */
- if ((!device_handle) ||
- (!in_buffer) || (!in_buffer->pointer) || (!in_buffer->length)) {
+ if ((!in_buffer) || (!in_buffer->pointer) || (!in_buffer->length)) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
- status = acpi_rs_set_srs_method_data(device_handle, in_buffer);
+ /* Validate parameters then dispatch to internal routine */
+
+ status = acpi_rs_validate_parameters(device_handle, in_buffer, &node);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ status = acpi_rs_set_srs_method_data(node, in_buffer);
return_ACPI_STATUS(status);
}
-EXPORT_SYMBOL(acpi_set_current_resources);
+ACPI_EXPORT_SYMBOL(acpi_set_current_resources)
/******************************************************************************
*
* FUNCTION: acpi_resource_to_address64
*
- * PARAMETERS: Resource - Pointer to a resource
- * Out - Pointer to the users's return
- * buffer (a struct
- * struct acpi_resource_address64)
+ * PARAMETERS: Resource - Pointer to a resource
+ * Out - Pointer to the users's return buffer
+ * (a struct acpi_resource_address64)
*
* RETURN: Status
*
* DESCRIPTION: If the resource is an address16, address32, or address64,
- * copy it to the address64 return buffer. This saves the
+ * copy it to the address64 return buffer. This saves the
* caller from having to duplicate code for different-sized
* addresses.
*
******************************************************************************/
-
acpi_status
acpi_resource_to_address64(struct acpi_resource *resource,
struct acpi_resource_address64 *out)
@@ -415,18 +364,18 @@ acpi_resource_to_address64(struct acpi_resource *resource,
return (AE_OK);
}
-EXPORT_SYMBOL(acpi_resource_to_address64);
+ACPI_EXPORT_SYMBOL(acpi_resource_to_address64)
/*******************************************************************************
*
* FUNCTION: acpi_get_vendor_resource
*
- * PARAMETERS: device_handle - Handle for the parent device object
- * Name - Method name for the parent resource
- * (METHOD_NAME__CRS or METHOD_NAME__PRS)
- * Uuid - Pointer to the UUID to be matched.
- * includes both subtype and 16-byte UUID
- * ret_buffer - Where the vendor resource is returned
+ * PARAMETERS: device_handle - Handle for the parent device object
+ * Name - Method name for the parent resource
+ * (METHOD_NAME__CRS or METHOD_NAME__PRS)
+ * Uuid - Pointer to the UUID to be matched.
+ * includes both subtype and 16-byte UUID
+ * ret_buffer - Where the vendor resource is returned
*
* RETURN: Status
*
@@ -435,7 +384,6 @@ EXPORT_SYMBOL(acpi_resource_to_address64);
* UUID subtype. Returns a struct acpi_resource of type Vendor.
*
******************************************************************************/
-
acpi_status
acpi_get_vendor_resource(acpi_handle device_handle,
char *name,
@@ -467,18 +415,19 @@ acpi_get_vendor_resource(acpi_handle device_handle,
return (info.status);
}
+ACPI_EXPORT_SYMBOL(acpi_get_vendor_resource)
+
/*******************************************************************************
*
* FUNCTION: acpi_rs_match_vendor_resource
*
- * PARAMETERS: ACPI_WALK_RESOURCE_CALLBACK
+ * PARAMETERS: acpi_walk_resource_callback
*
* RETURN: Status
*
* DESCRIPTION: Match a vendor resource via the ACPI 3.0 UUID
*
******************************************************************************/
-
static acpi_status
acpi_rs_match_vendor_resource(struct acpi_resource *resource, void *context)
{
@@ -526,3 +475,101 @@ acpi_rs_match_vendor_resource(struct acpi_resource *resource, void *context)
info->status = AE_OK;
return (AE_CTRL_TERMINATE);
}
+
+ACPI_EXPORT_SYMBOL(acpi_rs_match_vendor_resource)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_walk_resources
+ *
+ * PARAMETERS: device_handle - Handle to the device object for the
+ * device we are querying
+ * Name - Method name of the resources we want
+ * (METHOD_NAME__CRS or METHOD_NAME__PRS)
+ * user_function - Called for each resource
+ * Context - Passed to user_function
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Retrieves the current or possible resource list for the
+ * specified device. The user_function is called once for
+ * each resource in the list.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_walk_resources(acpi_handle device_handle,
+ char *name,
+ acpi_walk_resource_callback user_function, void *context)
+{
+ acpi_status status;
+ struct acpi_buffer buffer;
+ struct acpi_resource *resource;
+ struct acpi_resource *resource_end;
+
+ ACPI_FUNCTION_TRACE(acpi_walk_resources);
+
+ /* Parameter validation */
+
+ if (!device_handle || !user_function || !name ||
+ (!ACPI_COMPARE_NAME(name, METHOD_NAME__CRS) &&
+ !ACPI_COMPARE_NAME(name, METHOD_NAME__PRS))) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ /* Get the _CRS or _PRS resource list */
+
+ buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
+ status = acpi_rs_get_method_data(device_handle, name, &buffer);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Buffer now contains the resource list */
+
+ resource = ACPI_CAST_PTR(struct acpi_resource, buffer.pointer);
+ resource_end =
+ ACPI_ADD_PTR(struct acpi_resource, buffer.pointer, buffer.length);
+
+ /* Walk the resource list until the end_tag is found (or buffer end) */
+
+ while (resource < resource_end) {
+
+ /* Sanity check the resource */
+
+ if (resource->type > ACPI_RESOURCE_TYPE_MAX) {
+ status = AE_AML_INVALID_RESOURCE_TYPE;
+ break;
+ }
+
+ /* Invoke the user function, abort on any error returned */
+
+ status = user_function(resource, context);
+ if (ACPI_FAILURE(status)) {
+ if (status == AE_CTRL_TERMINATE) {
+
+ /* This is an OK termination by the user function */
+
+ status = AE_OK;
+ }
+ break;
+ }
+
+ /* end_tag indicates end-of-list */
+
+ if (resource->type == ACPI_RESOURCE_TYPE_END_TAG) {
+ break;
+ }
+
+ /* Get the next resource descriptor */
+
+ resource =
+ ACPI_ADD_PTR(struct acpi_resource, resource,
+ resource->length);
+ }
+
+ ACPI_FREE(buffer.pointer);
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_walk_resources)
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index a0ab828b2cc5..f8316a05ede7 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -142,7 +142,7 @@ static void acpi_device_register(struct acpi_device *device,
create_sysfs_device_files(device);
}
-static int acpi_device_unregister(struct acpi_device *device, int type)
+static void acpi_device_unregister(struct acpi_device *device, int type)
{
spin_lock(&acpi_device_lock);
if (device->parent) {
@@ -158,7 +158,6 @@ static int acpi_device_unregister(struct acpi_device *device, int type)
acpi_detach_data(device->handle, acpi_bus_data_handler);
remove_sysfs_device_files(device);
kobject_unregister(&device->kobj);
- return 0;
}
void acpi_bus_data_handler(acpi_handle handle, u32 function, void *context)
@@ -234,12 +233,9 @@ static int acpi_bus_get_power_flags(struct acpi_device *device)
int acpi_match_ids(struct acpi_device *device, char *ids)
{
- int error = 0;
- struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
-
if (device->flags.hardware_id)
if (strstr(ids, device->pnp.hardware_id))
- goto Done;
+ return 0;
if (device->flags.compatible_ids) {
struct acpi_compatible_id_list *cid_list = device->pnp.cid_list;
@@ -248,15 +244,10 @@ int acpi_match_ids(struct acpi_device *device, char *ids)
/* compare multiple _CID entries against driver ids */
for (i = 0; i < cid_list->count; i++) {
if (strstr(ids, cid_list->id[i].value))
- goto Done;
+ return 0;
}
}
- error = -ENOENT;
-
- Done:
- if (buffer.pointer)
- acpi_os_free(buffer.pointer);
- return error;
+ return -ENOENT;
}
static acpi_status
@@ -441,10 +432,7 @@ acpi_eject_store(struct acpi_device *device, const char *buf, size_t count)
islockable = device->flags.lockable;
handle = device->handle;
- if (type == ACPI_TYPE_PROCESSOR)
- result = acpi_bus_trim(device, 0);
- else
- result = acpi_bus_trim(device, 1);
+ result = acpi_bus_trim(device, 1);
if (!result)
result = acpi_eject_operation(handle, islockable);
@@ -471,7 +459,6 @@ static int acpi_bus_get_perf_flags(struct acpi_device *device)
-------------------------------------------------------------------------- */
static LIST_HEAD(acpi_bus_drivers);
-static DECLARE_MUTEX(acpi_bus_drivers_lock);
/**
* acpi_bus_match - match device IDs to driver's supported IDs
@@ -548,10 +535,9 @@ static int acpi_start_single_object(struct acpi_device *device)
return_VALUE(result);
}
-static int acpi_driver_attach(struct acpi_driver *drv)
+static void acpi_driver_attach(struct acpi_driver *drv)
{
struct list_head *node, *next;
- int count = 0;
ACPI_FUNCTION_TRACE("acpi_driver_attach");
@@ -568,7 +554,6 @@ static int acpi_driver_attach(struct acpi_driver *drv)
if (!acpi_bus_driver_init(dev, drv)) {
acpi_start_single_object(dev);
atomic_inc(&drv->references);
- count++;
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Found driver [%s] for device [%s]\n",
drv->name, dev->pnp.bus_id));
@@ -577,10 +562,9 @@ static int acpi_driver_attach(struct acpi_driver *drv)
spin_lock(&acpi_device_lock);
}
spin_unlock(&acpi_device_lock);
- return_VALUE(count);
}
-static int acpi_driver_detach(struct acpi_driver *drv)
+static void acpi_driver_detach(struct acpi_driver *drv)
{
struct list_head *node, *next;
@@ -602,7 +586,6 @@ static int acpi_driver_detach(struct acpi_driver *drv)
}
}
spin_unlock(&acpi_device_lock);
- return_VALUE(0);
}
/**
@@ -610,28 +593,22 @@ static int acpi_driver_detach(struct acpi_driver *drv)
* @driver: driver being registered
*
* Registers a driver with the ACPI bus. Searches the namespace for all
- * devices that match the driver's criteria and binds. Returns the
- * number of devices that were claimed by the driver, or a negative
- * error status for failure.
+ * devices that match the driver's criteria and binds. Returns zero for
+ * success or a negative error status for failure.
*/
int acpi_bus_register_driver(struct acpi_driver *driver)
{
- int count;
-
ACPI_FUNCTION_TRACE("acpi_bus_register_driver");
if (acpi_disabled)
return_VALUE(-ENODEV);
- if (!driver)
- return_VALUE(-EINVAL);
-
spin_lock(&acpi_device_lock);
list_add_tail(&driver->node, &acpi_bus_drivers);
spin_unlock(&acpi_device_lock);
- count = acpi_driver_attach(driver);
+ acpi_driver_attach(driver);
- return_VALUE(count);
+ return_VALUE(0);
}
EXPORT_SYMBOL(acpi_bus_register_driver);
@@ -643,23 +620,16 @@ EXPORT_SYMBOL(acpi_bus_register_driver);
* Unregisters a driver with the ACPI bus. Searches the namespace for all
* devices that match the driver's criteria and unbinds.
*/
-int acpi_bus_unregister_driver(struct acpi_driver *driver)
+void acpi_bus_unregister_driver(struct acpi_driver *driver)
{
- int error = 0;
+ acpi_driver_detach(driver);
- ACPI_FUNCTION_TRACE("acpi_bus_unregister_driver");
-
- if (driver) {
- acpi_driver_detach(driver);
-
- if (!atomic_read(&driver->references)) {
- spin_lock(&acpi_device_lock);
- list_del_init(&driver->node);
- spin_unlock(&acpi_device_lock);
- }
- } else
- error = -EINVAL;
- return_VALUE(error);
+ if (!atomic_read(&driver->references)) {
+ spin_lock(&acpi_device_lock);
+ list_del_init(&driver->node);
+ spin_unlock(&acpi_device_lock);
+ }
+ return;
}
EXPORT_SYMBOL(acpi_bus_unregister_driver);
@@ -1371,6 +1341,100 @@ static int acpi_bus_scan_fixed(struct acpi_device *root)
return_VALUE(result);
}
+
+static inline struct acpi_device * to_acpi_dev(struct device * dev)
+{
+ return container_of(dev, struct acpi_device, dev);
+}
+
+
+static int root_suspend(struct acpi_device * acpi_dev, pm_message_t state)
+{
+ struct acpi_device * dev, * next;
+ int result;
+
+ spin_lock(&acpi_device_lock);
+ list_for_each_entry_safe_reverse(dev, next, &acpi_device_list, g_list) {
+ if (dev->driver && dev->driver->ops.suspend) {
+ spin_unlock(&acpi_device_lock);
+ result = dev->driver->ops.suspend(dev, 0);
+ if (result) {
+ printk(KERN_ERR PREFIX "[%s - %s] Suspend failed: %d\n",
+ acpi_device_name(dev),
+ acpi_device_bid(dev), result);
+ }
+ spin_lock(&acpi_device_lock);
+ }
+ }
+ spin_unlock(&acpi_device_lock);
+ return 0;
+}
+
+
+static int acpi_device_suspend(struct device * dev, pm_message_t state)
+{
+ struct acpi_device * acpi_dev = to_acpi_dev(dev);
+
+ /*
+ * For now, we should only register 1 generic device -
+ * the ACPI root device - and from there, we walk the
+ * tree of ACPI devices to suspend each one using the
+ * ACPI driver methods.
+ */
+ if (acpi_dev->handle == ACPI_ROOT_OBJECT)
+ root_suspend(acpi_dev, state);
+ return 0;
+}
+
+
+
+static int root_resume(struct acpi_device * acpi_dev)
+{
+ struct acpi_device * dev, * next;
+ int result;
+
+ spin_lock(&acpi_device_lock);
+ list_for_each_entry_safe(dev, next, &acpi_device_list, g_list) {
+ if (dev->driver && dev->driver->ops.resume) {
+ spin_unlock(&acpi_device_lock);
+ result = dev->driver->ops.resume(dev, 0);
+ if (result) {
+ printk(KERN_ERR PREFIX "[%s - %s] resume failed: %d\n",
+ acpi_device_name(dev),
+ acpi_device_bid(dev), result);
+ }
+ spin_lock(&acpi_device_lock);
+ }
+ }
+ spin_unlock(&acpi_device_lock);
+ return 0;
+}
+
+
+static int acpi_device_resume(struct device * dev)
+{
+ struct acpi_device * acpi_dev = to_acpi_dev(dev);
+
+ /*
+ * For now, we should only register 1 generic device -
+ * the ACPI root device - and from there, we walk the
+ * tree of ACPI devices to resume each one using the
+ * ACPI driver methods.
+ */
+ if (acpi_dev->handle == ACPI_ROOT_OBJECT)
+ root_resume(acpi_dev);
+ return 0;
+}
+
+
+struct bus_type acpi_bus_type = {
+ .name = "acpi",
+ .suspend = acpi_device_suspend,
+ .resume = acpi_device_resume,
+};
+
+
+
static int __init acpi_scan_init(void)
{
int result;
@@ -1383,6 +1447,12 @@ static int __init acpi_scan_init(void)
kset_register(&acpi_namespace_kset);
+ result = bus_register(&acpi_bus_type);
+ if (result) {
+ /* We don't want to quit even if we failed to add suspend/resume */
+ printk(KERN_ERR PREFIX "Could not register bus type\n");
+ }
+
/*
* Create the root device in the bus's device tree
*/
@@ -1392,6 +1462,16 @@ static int __init acpi_scan_init(void)
goto Done;
result = acpi_start_single_object(acpi_root);
+ if (result)
+ goto Done;
+
+ acpi_root->dev.bus = &acpi_bus_type;
+ snprintf(acpi_root->dev.bus_id, BUS_ID_SIZE, "%s", acpi_bus_type.name);
+ result = device_register(&acpi_root->dev);
+ if (result) {
+ /* We don't want to quit even if we failed to add suspend/resume */
+ printk(KERN_ERR PREFIX "Could not register device\n");
+ }
/*
* Enumerate devices in the ACPI namespace.
diff --git a/drivers/acpi/sleep/main.c b/drivers/acpi/sleep/main.c
index 930427fc0c4b..62ce87d71651 100644
--- a/drivers/acpi/sleep/main.c
+++ b/drivers/acpi/sleep/main.c
@@ -105,6 +105,14 @@ static int acpi_pm_enter(suspend_state_t pm_state)
default:
return -EINVAL;
}
+
+ /* ACPI 3.0 specs (P62) says that it's the responsabilty
+ * of the OSPM to clear the status bit [ implying that the
+ * POWER_BUTTON event should not reach userspace ]
+ */
+ if (ACPI_SUCCESS(status) && (acpi_state == ACPI_STATE_S3))
+ acpi_clear_event(ACPI_EVENT_POWER_BUTTON);
+
local_irq_restore(flags);
printk(KERN_DEBUG "Back to C!\n");
diff --git a/drivers/acpi/sleep/wakeup.c b/drivers/acpi/sleep/wakeup.c
index 85df0ceda2a9..af1dbabaf0b1 100644
--- a/drivers/acpi/sleep/wakeup.c
+++ b/drivers/acpi/sleep/wakeup.c
@@ -155,7 +155,6 @@ static int __init acpi_wakeup_device_init(void)
if (acpi_disabled)
return 0;
- printk("ACPI wakeup devices: \n");
spin_lock(&acpi_device_lock);
list_for_each_safe(node, next, &acpi_wakeup_device_list) {
@@ -174,10 +173,8 @@ static int __init acpi_wakeup_device_init(void)
dev->wakeup.state.enabled = 1;
spin_lock(&acpi_device_lock);
}
- printk("%4s ", dev->pnp.bus_id);
}
spin_unlock(&acpi_device_lock);
- printk("\n");
return 0;
}
diff --git a/drivers/acpi/system.c b/drivers/acpi/system.c
index e4308c7a6743..a934ac42178d 100644
--- a/drivers/acpi/system.c
+++ b/drivers/acpi/system.c
@@ -39,7 +39,7 @@ ACPI_MODULE_NAME("acpi_system")
#define ACPI_SYSTEM_FILE_EVENT "event"
#define ACPI_SYSTEM_FILE_DSDT "dsdt"
#define ACPI_SYSTEM_FILE_FADT "fadt"
-extern FADT_DESCRIPTOR acpi_fadt;
+extern struct fadt_descriptor acpi_fadt;
/* --------------------------------------------------------------------------
FS Interface (/proc)
@@ -82,7 +82,7 @@ acpi_system_read_dsdt(struct file *file,
ACPI_FUNCTION_TRACE("acpi_system_read_dsdt");
- status = acpi_get_table(ACPI_TABLE_DSDT, 1, &dsdt);
+ status = acpi_get_table(ACPI_TABLE_ID_DSDT, 1, &dsdt);
if (ACPI_FAILURE(status))
return_VALUE(-ENODEV);
@@ -110,7 +110,7 @@ acpi_system_read_fadt(struct file *file,
ACPI_FUNCTION_TRACE("acpi_system_read_fadt");
- status = acpi_get_table(ACPI_TABLE_FADT, 1, &fadt);
+ status = acpi_get_table(ACPI_TABLE_ID_FADT, 1, &fadt);
if (ACPI_FAILURE(status))
return_VALUE(-ENODEV);
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index 7f37c7cc5ef1..ed5e8816d83d 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -282,8 +282,8 @@ acpi_get_table_header_early(enum acpi_table_id id,
/* Map the DSDT header via the pointer in the FADT */
if (id == ACPI_DSDT) {
- struct fadt_descriptor_rev2 *fadt =
- (struct fadt_descriptor_rev2 *)*header;
+ struct fadt_descriptor *fadt =
+ (struct fadt_descriptor *)*header;
if (fadt->revision == 3 && fadt->Xdsdt) {
*header = (void *)__acpi_map_table(fadt->Xdsdt,
diff --git a/drivers/acpi/tables/tbconvrt.c b/drivers/acpi/tables/tbconvrt.c
index 03b37d2223bc..d697fcb35d52 100644
--- a/drivers/acpi/tables/tbconvrt.c
+++ b/drivers/acpi/tables/tbconvrt.c
@@ -41,8 +41,6 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/module.h>
-
#include <acpi/acpi.h>
#include <acpi/actables.h>
@@ -56,15 +54,15 @@ acpi_tb_init_generic_address(struct acpi_generic_address *new_gas_struct,
acpi_physical_address address);
static void
-acpi_tb_convert_fadt1(struct fadt_descriptor_rev2 *local_fadt,
+acpi_tb_convert_fadt1(struct fadt_descriptor *local_fadt,
struct fadt_descriptor_rev1 *original_fadt);
static void
-acpi_tb_convert_fadt2(struct fadt_descriptor_rev2 *local_fadt,
- struct fadt_descriptor_rev2 *original_fadt);
+acpi_tb_convert_fadt2(struct fadt_descriptor *local_fadt,
+ struct fadt_descriptor *original_fadt);
u8 acpi_fadt_is_v1;
-EXPORT_SYMBOL(acpi_fadt_is_v1);
+ACPI_EXPORT_SYMBOL(acpi_fadt_is_v1)
/*******************************************************************************
*
@@ -122,7 +120,7 @@ acpi_status acpi_tb_convert_to_xsdt(struct acpi_table_desc *table_info)
{
acpi_size table_size;
u32 i;
- XSDT_DESCRIPTOR *new_table;
+ struct xsdt_descriptor *new_table;
ACPI_FUNCTION_ENTRY();
@@ -133,7 +131,7 @@ acpi_status acpi_tb_convert_to_xsdt(struct acpi_table_desc *table_info)
/* Allocate an XSDT */
- new_table = ACPI_MEM_CALLOCATE(table_size);
+ new_table = ACPI_ALLOCATE_ZEROED(table_size);
if (!new_table) {
return (AE_NO_MEMORY);
}
@@ -147,17 +145,18 @@ acpi_status acpi_tb_convert_to_xsdt(struct acpi_table_desc *table_info)
/* Copy the table pointers */
for (i = 0; i < acpi_gbl_rsdt_table_count; i++) {
+
/* RSDT pointers are 32 bits, XSDT pointers are 64 bits */
if (acpi_gbl_root_table_type == ACPI_TABLE_TYPE_RSDT) {
ACPI_STORE_ADDRESS(new_table->table_offset_entry[i],
(ACPI_CAST_PTR
- (struct rsdt_descriptor_rev1,
+ (struct rsdt_descriptor,
table_info->pointer))->
table_offset_entry[i]);
} else {
new_table->table_offset_entry[i] =
- (ACPI_CAST_PTR(XSDT_DESCRIPTOR,
+ (ACPI_CAST_PTR(struct xsdt_descriptor,
table_info->pointer))->
table_offset_entry[i];
}
@@ -219,7 +218,7 @@ acpi_tb_init_generic_address(struct acpi_generic_address *new_gas_struct,
******************************************************************************/
static void
-acpi_tb_convert_fadt1(struct fadt_descriptor_rev2 *local_fadt,
+acpi_tb_convert_fadt1(struct fadt_descriptor *local_fadt,
struct fadt_descriptor_rev1 *original_fadt)
{
@@ -365,14 +364,13 @@ acpi_tb_convert_fadt1(struct fadt_descriptor_rev2 *local_fadt,
******************************************************************************/
static void
-acpi_tb_convert_fadt2(struct fadt_descriptor_rev2 *local_fadt,
- struct fadt_descriptor_rev2 *original_fadt)
+acpi_tb_convert_fadt2(struct fadt_descriptor *local_fadt,
+ struct fadt_descriptor *original_fadt)
{
/* We have an ACPI 2.0 FADT but we must copy it to our local buffer */
- ACPI_MEMCPY(local_fadt, original_fadt,
- sizeof(struct fadt_descriptor_rev2));
+ ACPI_MEMCPY(local_fadt, original_fadt, sizeof(struct fadt_descriptor));
/*
* "X" fields are optional extensions to the original V1.0 fields, so
@@ -491,10 +489,10 @@ acpi_tb_convert_fadt2(struct fadt_descriptor_rev2 *local_fadt,
acpi_status acpi_tb_convert_table_fadt(void)
{
- struct fadt_descriptor_rev2 *local_fadt;
+ struct fadt_descriptor *local_fadt;
struct acpi_table_desc *table_desc;
- ACPI_FUNCTION_TRACE("tb_convert_table_fadt");
+ ACPI_FUNCTION_TRACE(tb_convert_table_fadt);
/*
* acpi_gbl_FADT is valid. Validate the FADT length. The table must be
@@ -508,13 +506,14 @@ acpi_status acpi_tb_convert_table_fadt(void)
/* Allocate buffer for the ACPI 2.0(+) FADT */
- local_fadt = ACPI_MEM_CALLOCATE(sizeof(struct fadt_descriptor_rev2));
+ local_fadt = ACPI_ALLOCATE_ZEROED(sizeof(struct fadt_descriptor));
if (!local_fadt) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
if (acpi_gbl_FADT->revision >= FADT2_REVISION_ID) {
- if (acpi_gbl_FADT->length < sizeof(struct fadt_descriptor_rev2)) {
+ if (acpi_gbl_FADT->length < sizeof(struct fadt_descriptor)) {
+
/* Length is too short to be a V2.0 table */
ACPI_WARNING((AE_INFO,
@@ -538,11 +537,11 @@ acpi_status acpi_tb_convert_table_fadt(void)
/* Global FADT pointer will point to the new common V2.0 FADT */
acpi_gbl_FADT = local_fadt;
- acpi_gbl_FADT->length = sizeof(FADT_DESCRIPTOR);
+ acpi_gbl_FADT->length = sizeof(struct fadt_descriptor);
/* Free the original table */
- table_desc = acpi_gbl_table_lists[ACPI_TABLE_FADT].next;
+ table_desc = acpi_gbl_table_lists[ACPI_TABLE_ID_FADT].next;
acpi_tb_delete_single_table(table_desc);
/* Install the new table */
@@ -550,7 +549,7 @@ acpi_status acpi_tb_convert_table_fadt(void)
table_desc->pointer =
ACPI_CAST_PTR(struct acpi_table_header, acpi_gbl_FADT);
table_desc->allocation = ACPI_MEM_ALLOCATED;
- table_desc->length = sizeof(struct fadt_descriptor_rev2);
+ table_desc->length = sizeof(struct fadt_descriptor);
/* Dump the entire FADT */
@@ -580,7 +579,7 @@ acpi_status acpi_tb_convert_table_fadt(void)
acpi_status acpi_tb_build_common_facs(struct acpi_table_desc *table_info)
{
- ACPI_FUNCTION_TRACE("tb_build_common_facs");
+ ACPI_FUNCTION_TRACE(tb_build_common_facs);
/* Absolute minimum length is 24, but the ACPI spec says 64 */
@@ -603,6 +602,7 @@ acpi_status acpi_tb_build_common_facs(struct acpi_table_desc *table_info)
if ((acpi_gbl_RSDP->revision < 2) ||
(acpi_gbl_FACS->length < 32) ||
(!(acpi_gbl_FACS->xfirmware_waking_vector))) {
+
/* ACPI 1.0 FACS or short table or optional X_ field is zero */
acpi_gbl_common_fACS.firmware_waking_vector = ACPI_CAST_PTR(u64,
diff --git a/drivers/acpi/tables/tbget.c b/drivers/acpi/tables/tbget.c
index 09b4ee6dfd60..99eacceff563 100644
--- a/drivers/acpi/tables/tbget.c
+++ b/drivers/acpi/tables/tbget.c
@@ -78,7 +78,7 @@ acpi_tb_get_table(struct acpi_pointer *address,
acpi_status status;
struct acpi_table_header header;
- ACPI_FUNCTION_TRACE("tb_get_table");
+ ACPI_FUNCTION_TRACE(tb_get_table);
/* Get the header in order to get signature and table size */
@@ -124,7 +124,7 @@ acpi_tb_get_table_header(struct acpi_pointer *address,
acpi_status status = AE_OK;
struct acpi_table_header *header = NULL;
- ACPI_FUNCTION_TRACE("tb_get_table_header");
+ ACPI_FUNCTION_TRACE(tb_get_table_header);
/*
* Flags contains the current processor mode (Virtual or Physical
@@ -148,6 +148,10 @@ acpi_tb_get_table_header(struct acpi_pointer *address,
sizeof(struct acpi_table_header),
(void *)&header);
if (ACPI_FAILURE(status)) {
+ ACPI_ERROR((AE_INFO,
+ "Could not map memory at %8.8X%8.8X for table header",
+ ACPI_FORMAT_UINT64(address->pointer.
+ physical)));
return_ACPI_STATUS(status);
}
@@ -198,7 +202,7 @@ acpi_tb_get_table_body(struct acpi_pointer *address,
{
acpi_status status;
- ACPI_FUNCTION_TRACE("tb_get_table_body");
+ ACPI_FUNCTION_TRACE(tb_get_table_body);
if (!table_info || !address) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
@@ -208,6 +212,7 @@ acpi_tb_get_table_body(struct acpi_pointer *address,
status = acpi_tb_table_override(header, table_info);
if (ACPI_SUCCESS(status)) {
+
/* Table was overridden by the host OS */
return_ACPI_STATUS(status);
@@ -241,7 +246,7 @@ acpi_tb_table_override(struct acpi_table_header *header,
acpi_status status;
struct acpi_pointer address;
- ACPI_FUNCTION_TRACE("tb_table_override");
+ ACPI_FUNCTION_TRACE(tb_table_override);
/*
* The OSL will examine the header and decide whether to override this
@@ -250,6 +255,7 @@ acpi_tb_table_override(struct acpi_table_header *header,
*/
status = acpi_os_table_override(header, &new_table);
if (ACPI_FAILURE(status)) {
+
/* Some severe error from the OSL, but we basically ignore it */
ACPI_EXCEPTION((AE_INFO, status,
@@ -258,6 +264,7 @@ acpi_tb_table_override(struct acpi_table_header *header,
}
if (!new_table) {
+
/* No table override */
return_ACPI_STATUS(AE_NO_ACPI_TABLES);
@@ -311,7 +318,7 @@ acpi_tb_get_this_table(struct acpi_pointer *address,
u8 allocation;
acpi_status status = AE_OK;
- ACPI_FUNCTION_TRACE("tb_get_this_table");
+ ACPI_FUNCTION_TRACE(tb_get_this_table);
/*
* Flags contains the current processor mode (Virtual or Physical
@@ -323,7 +330,7 @@ acpi_tb_get_this_table(struct acpi_pointer *address,
/* Pointer matches processor mode, copy the table to a new buffer */
- full_table = ACPI_MEM_ALLOCATE(header->length);
+ full_table = ACPI_ALLOCATE(header->length);
if (!full_table) {
ACPI_ERROR((AE_INFO,
"Could not allocate table memory for [%4.4s] length %X",
@@ -376,11 +383,12 @@ acpi_tb_get_this_table(struct acpi_pointer *address,
* Validate checksum for _most_ tables,
* even the ones whose signature we don't recognize
*/
- if (table_info->type != ACPI_TABLE_FACS) {
+ if (table_info->type != ACPI_TABLE_ID_FACS) {
status = acpi_tb_verify_table_checksum(full_table);
#if (!ACPI_CHECKSUM_ABORT)
if (ACPI_FAILURE(status)) {
+
/* Ignore the error if configuration says so */
status = AE_OK;
@@ -409,7 +417,7 @@ acpi_tb_get_this_table(struct acpi_pointer *address,
*
* PARAMETERS: table_type - one of the defined table types
* Instance - Which table of this type
- * table_ptr_loc - pointer to location to place the pointer for
+ * return_table - pointer to location to place the pointer for
* return
*
* RETURN: Status
@@ -420,57 +428,34 @@ acpi_tb_get_this_table(struct acpi_pointer *address,
acpi_status
acpi_tb_get_table_ptr(acpi_table_type table_type,
- u32 instance, struct acpi_table_header **table_ptr_loc)
+ u32 instance, struct acpi_table_header **return_table)
{
struct acpi_table_desc *table_desc;
u32 i;
- ACPI_FUNCTION_TRACE("tb_get_table_ptr");
-
- if (!acpi_gbl_DSDT) {
- return_ACPI_STATUS(AE_NO_ACPI_TABLES);
- }
+ ACPI_FUNCTION_TRACE(tb_get_table_ptr);
- if (table_type > ACPI_TABLE_MAX) {
+ if (table_type > ACPI_TABLE_ID_MAX) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
- /*
- * For all table types (Single/Multiple), the first
- * instance is always in the list head.
- */
- if (instance == 1) {
- /* Get the first */
-
- *table_ptr_loc = NULL;
- if (acpi_gbl_table_lists[table_type].next) {
- *table_ptr_loc =
- acpi_gbl_table_lists[table_type].next->pointer;
- }
- return_ACPI_STATUS(AE_OK);
- }
-
- /* Check for instance out of range */
+ /* Check for instance out of range of the current table count */
if (instance > acpi_gbl_table_lists[table_type].count) {
return_ACPI_STATUS(AE_NOT_EXIST);
}
- /* Walk the list to get the desired table
- * Since the if (Instance == 1) check above checked for the
- * first table, setting table_desc equal to the .Next member
- * is actually pointing to the second table. Therefore, we
- * need to walk from the 2nd table until we reach the Instance
- * that the user is looking for and return its table pointer.
+ /*
+ * Walk the list to get the desired table
+ * Note: Instance is one-based
*/
table_desc = acpi_gbl_table_lists[table_type].next;
- for (i = 2; i < instance; i++) {
+ for (i = 1; i < instance; i++) {
table_desc = table_desc->next;
}
/* We are now pointing to the requested table's descriptor */
- *table_ptr_loc = table_desc->pointer;
-
+ *return_table = table_desc->pointer;
return_ACPI_STATUS(AE_OK);
}
diff --git a/drivers/acpi/tables/tbgetall.c b/drivers/acpi/tables/tbgetall.c
index 134e5dce0bc1..ad982112e4c6 100644
--- a/drivers/acpi/tables/tbgetall.c
+++ b/drivers/acpi/tables/tbgetall.c
@@ -77,7 +77,7 @@ acpi_tb_get_primary_table(struct acpi_pointer *address,
acpi_status status;
struct acpi_table_header header;
- ACPI_FUNCTION_TRACE("tb_get_primary_table");
+ ACPI_FUNCTION_TRACE(tb_get_primary_table);
/* Ignore a NULL address in the RSDT */
@@ -140,7 +140,7 @@ acpi_tb_get_secondary_table(struct acpi_pointer *address,
acpi_status status;
struct acpi_table_header header;
- ACPI_FUNCTION_TRACE_STR("tb_get_secondary_table", signature);
+ ACPI_FUNCTION_TRACE_STR(tb_get_secondary_table, signature);
/* Get the header in order to match the signature */
@@ -151,7 +151,7 @@ acpi_tb_get_secondary_table(struct acpi_pointer *address,
/* Signature must match request */
- if (ACPI_STRNCMP(header.signature, signature, ACPI_NAME_SIZE)) {
+ if (!ACPI_COMPARE_NAME(header.signature, signature)) {
ACPI_ERROR((AE_INFO,
"Incorrect table signature - wanted [%s] found [%4.4s]",
signature, header.signature));
@@ -207,7 +207,7 @@ acpi_status acpi_tb_get_required_tables(void)
struct acpi_table_desc table_info;
struct acpi_pointer address;
- ACPI_FUNCTION_TRACE("tb_get_required_tables");
+ ACPI_FUNCTION_TRACE(tb_get_required_tables);
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "%d ACPI tables in RSDT\n",
acpi_gbl_rsdt_table_count));
@@ -223,6 +223,7 @@ acpi_status acpi_tb_get_required_tables(void)
* any SSDTs.
*/
for (i = 0; i < acpi_gbl_rsdt_table_count; i++) {
+
/* Get the table address from the common internal XSDT */
address.pointer.value = acpi_gbl_XSDT->table_offset_entry[i];
@@ -305,6 +306,6 @@ acpi_status acpi_tb_get_required_tables(void)
/* Always delete the RSDP mapping, we are done with it */
- acpi_tb_delete_tables_by_type(ACPI_TABLE_RSDP);
+ acpi_tb_delete_tables_by_type(ACPI_TABLE_ID_RSDP);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/tables/tbinstal.c b/drivers/acpi/tables/tbinstal.c
index 7ffd0fddb4e5..7ca2df75bb11 100644
--- a/drivers/acpi/tables/tbinstal.c
+++ b/drivers/acpi/tables/tbinstal.c
@@ -73,17 +73,18 @@ acpi_tb_match_signature(char *signature,
{
acpi_native_uint i;
- ACPI_FUNCTION_TRACE("tb_match_signature");
+ ACPI_FUNCTION_TRACE(tb_match_signature);
/* Search for a signature match among the known table types */
- for (i = 0; i < NUM_ACPI_TABLE_TYPES; i++) {
+ for (i = 0; i < (ACPI_TABLE_ID_MAX + 1); i++) {
if (!(acpi_gbl_table_data[i].flags & search_type)) {
continue;
}
if (!ACPI_STRNCMP(signature, acpi_gbl_table_data[i].signature,
acpi_gbl_table_data[i].sig_length)) {
+
/* Found a signature match, return index if requested */
if (table_info) {
@@ -122,7 +123,7 @@ acpi_status acpi_tb_install_table(struct acpi_table_desc *table_info)
{
acpi_status status;
- ACPI_FUNCTION_TRACE("tb_install_table");
+ ACPI_FUNCTION_TRACE(tb_install_table);
/* Lock tables while installing */
@@ -187,7 +188,7 @@ acpi_tb_recognize_table(struct acpi_table_desc *table_info, u8 search_type)
struct acpi_table_header *table_header;
acpi_status status;
- ACPI_FUNCTION_TRACE("tb_recognize_table");
+ ACPI_FUNCTION_TRACE(tb_recognize_table);
/* Ensure that we have a valid table pointer */
@@ -218,7 +219,6 @@ acpi_tb_recognize_table(struct acpi_table_desc *table_info, u8 search_type)
/* Return the table type and length via the info struct */
table_info->length = (acpi_size) table_header->length;
-
return_ACPI_STATUS(status);
}
@@ -243,11 +243,11 @@ acpi_tb_init_table_descriptor(acpi_table_type table_type,
struct acpi_table_desc *table_desc;
acpi_status status;
- ACPI_FUNCTION_TRACE_U32("tb_init_table_descriptor", table_type);
+ ACPI_FUNCTION_TRACE_U32(tb_init_table_descriptor, table_type);
/* Allocate a descriptor for this table */
- table_desc = ACPI_MEM_CALLOCATE(sizeof(struct acpi_table_desc));
+ table_desc = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_table_desc));
if (!table_desc) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
@@ -274,7 +274,7 @@ acpi_tb_init_table_descriptor(acpi_table_type table_type,
* at this location, so return an error.
*/
if (list_head->next) {
- ACPI_MEM_FREE(table_desc);
+ ACPI_FREE(table_desc);
return_ACPI_STATUS(AE_ALREADY_EXISTS);
}
@@ -312,15 +312,14 @@ acpi_tb_init_table_descriptor(acpi_table_type table_type,
/* Finish initialization of the table descriptor */
+ table_desc->loaded_into_namespace = FALSE;
table_desc->type = (u8) table_type;
table_desc->pointer = table_info->pointer;
table_desc->length = table_info->length;
table_desc->allocation = table_info->allocation;
table_desc->aml_start = (u8 *) (table_desc->pointer + 1),
- table_desc->aml_length = (u32) (table_desc->length -
- (u32) sizeof(struct
- acpi_table_header));
- table_desc->loaded_into_namespace = FALSE;
+ table_desc->aml_length = (u32)
+ (table_desc->length - (u32) sizeof(struct acpi_table_header));
/*
* Set the appropriate global pointer (if there is one) to point to the
@@ -335,7 +334,6 @@ acpi_tb_init_table_descriptor(acpi_table_type table_type,
table_info->owner_id = table_desc->owner_id;
table_info->installed_desc = table_desc;
-
return_ACPI_STATUS(AE_OK);
}
@@ -359,7 +357,7 @@ void acpi_tb_delete_all_tables(void)
* Free memory allocated for ACPI tables
* Memory can either be mapped or allocated
*/
- for (type = 0; type < NUM_ACPI_TABLE_TYPES; type++) {
+ for (type = 0; type < (ACPI_TABLE_ID_MAX + 1); type++) {
acpi_tb_delete_tables_by_type(type);
}
}
@@ -383,9 +381,9 @@ void acpi_tb_delete_tables_by_type(acpi_table_type type)
u32 count;
u32 i;
- ACPI_FUNCTION_TRACE_U32("tb_delete_tables_by_type", type);
+ ACPI_FUNCTION_TRACE_U32(tb_delete_tables_by_type, type);
- if (type > ACPI_TABLE_MAX) {
+ if (type > ACPI_TABLE_ID_MAX) {
return_VOID;
}
@@ -396,28 +394,28 @@ void acpi_tb_delete_tables_by_type(acpi_table_type type)
/* Clear the appropriate "typed" global table pointer */
switch (type) {
- case ACPI_TABLE_RSDP:
+ case ACPI_TABLE_ID_RSDP:
acpi_gbl_RSDP = NULL;
break;
- case ACPI_TABLE_DSDT:
+ case ACPI_TABLE_ID_DSDT:
acpi_gbl_DSDT = NULL;
break;
- case ACPI_TABLE_FADT:
+ case ACPI_TABLE_ID_FADT:
acpi_gbl_FADT = NULL;
break;
- case ACPI_TABLE_FACS:
+ case ACPI_TABLE_ID_FACS:
acpi_gbl_FACS = NULL;
break;
- case ACPI_TABLE_XSDT:
+ case ACPI_TABLE_ID_XSDT:
acpi_gbl_XSDT = NULL;
break;
- case ACPI_TABLE_SSDT:
- case ACPI_TABLE_PSDT:
+ case ACPI_TABLE_ID_SSDT:
+ case ACPI_TABLE_ID_PSDT:
default:
break;
}
@@ -471,7 +469,7 @@ void acpi_tb_delete_single_table(struct acpi_table_desc *table_desc)
case ACPI_MEM_ALLOCATED:
- ACPI_MEM_FREE(table_desc->pointer);
+ ACPI_FREE(table_desc->pointer);
break;
case ACPI_MEM_MAPPED:
@@ -503,7 +501,7 @@ struct acpi_table_desc *acpi_tb_uninstall_table(struct acpi_table_desc
{
struct acpi_table_desc *next_desc;
- ACPI_FUNCTION_TRACE_PTR("tb_uninstall_table", table_desc);
+ ACPI_FUNCTION_TRACE_PTR(tb_uninstall_table, table_desc);
if (!table_desc) {
return_PTR(NULL);
@@ -530,7 +528,7 @@ struct acpi_table_desc *acpi_tb_uninstall_table(struct acpi_table_desc
/* Free the table descriptor */
next_desc = table_desc->next;
- ACPI_MEM_FREE(table_desc);
+ ACPI_FREE(table_desc);
/* Return pointer to the next descriptor */
diff --git a/drivers/acpi/tables/tbrsdt.c b/drivers/acpi/tables/tbrsdt.c
index 4d308220225d..abcb08c2592a 100644
--- a/drivers/acpi/tables/tbrsdt.c
+++ b/drivers/acpi/tables/tbrsdt.c
@@ -64,7 +64,7 @@ acpi_status acpi_tb_verify_rsdp(struct acpi_pointer *address)
acpi_status status;
struct rsdp_descriptor *rsdp;
- ACPI_FUNCTION_TRACE("tb_verify_rsdp");
+ ACPI_FUNCTION_TRACE(tb_verify_rsdp);
switch (address->pointer_type) {
case ACPI_LOGICAL_POINTER:
@@ -78,7 +78,7 @@ acpi_status acpi_tb_verify_rsdp(struct acpi_pointer *address)
*/
status = acpi_os_map_memory(address->pointer.physical,
sizeof(struct rsdp_descriptor),
- (void *)&rsdp);
+ ACPI_CAST_PTR(void, &rsdp));
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
@@ -95,15 +95,20 @@ acpi_status acpi_tb_verify_rsdp(struct acpi_pointer *address)
goto cleanup;
}
- /* The RSDP supplied is OK */
+ /* RSDP is ok. Init the table info */
table_info.pointer = ACPI_CAST_PTR(struct acpi_table_header, rsdp);
table_info.length = sizeof(struct rsdp_descriptor);
- table_info.allocation = ACPI_MEM_MAPPED;
+
+ if (address->pointer_type == ACPI_PHYSICAL_POINTER) {
+ table_info.allocation = ACPI_MEM_MAPPED;
+ } else {
+ table_info.allocation = ACPI_MEM_NOT_ALLOCATED;
+ }
/* Save the table pointers and allocation info */
- status = acpi_tb_init_table_descriptor(ACPI_TABLE_RSDP, &table_info);
+ status = acpi_tb_init_table_descriptor(ACPI_TABLE_ID_RSDP, &table_info);
if (ACPI_FAILURE(status)) {
goto cleanup;
}
@@ -174,22 +179,20 @@ void acpi_tb_get_rsdt_address(struct acpi_pointer *out_address)
acpi_status acpi_tb_validate_rsdt(struct acpi_table_header *table_ptr)
{
- int no_match;
+ char *signature;
ACPI_FUNCTION_ENTRY();
- /*
- * Search for appropriate signature, RSDT or XSDT
- */
+ /* Search for appropriate signature, RSDT or XSDT */
+
if (acpi_gbl_root_table_type == ACPI_TABLE_TYPE_RSDT) {
- no_match = ACPI_STRNCMP((char *)table_ptr, RSDT_SIG,
- sizeof(RSDT_SIG) - 1);
+ signature = RSDT_SIG;
} else {
- no_match = ACPI_STRNCMP((char *)table_ptr, XSDT_SIG,
- sizeof(XSDT_SIG) - 1);
+ signature = XSDT_SIG;
}
- if (no_match) {
+ if (!ACPI_COMPARE_NAME(table_ptr->signature, signature)) {
+
/* Invalid RSDT or XSDT signature */
ACPI_ERROR((AE_INFO,
@@ -198,10 +201,8 @@ acpi_status acpi_tb_validate_rsdt(struct acpi_table_header *table_ptr)
ACPI_DUMP_BUFFER(acpi_gbl_RSDP, 20);
ACPI_ERROR((AE_INFO,
- "RSDT/XSDT signature at %X (%p) is invalid",
- acpi_gbl_RSDP->rsdt_physical_address,
- (void *)(acpi_native_uint) acpi_gbl_RSDP->
- rsdt_physical_address));
+ "RSDT/XSDT signature at %X is invalid",
+ acpi_gbl_RSDP->rsdt_physical_address));
if (acpi_gbl_root_table_type == ACPI_TABLE_TYPE_RSDT) {
ACPI_ERROR((AE_INFO, "Looking for RSDT"));
@@ -234,13 +235,13 @@ acpi_status acpi_tb_get_table_rsdt(void)
acpi_status status;
struct acpi_pointer address;
- ACPI_FUNCTION_TRACE("tb_get_table_rsdt");
+ ACPI_FUNCTION_TRACE(tb_get_table_rsdt);
/* Get the RSDT/XSDT via the RSDP */
acpi_tb_get_rsdt_address(&address);
- table_info.type = ACPI_TABLE_XSDT;
+ table_info.type = ACPI_TABLE_ID_XSDT;
status = acpi_tb_get_table(&address, &table_info);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
@@ -274,12 +275,13 @@ acpi_status acpi_tb_get_table_rsdt(void)
/* Save the table pointers and allocation info */
- status = acpi_tb_init_table_descriptor(ACPI_TABLE_XSDT, &table_info);
+ status = acpi_tb_init_table_descriptor(ACPI_TABLE_ID_XSDT, &table_info);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
- acpi_gbl_XSDT = ACPI_CAST_PTR(XSDT_DESCRIPTOR, table_info.pointer);
+ acpi_gbl_XSDT =
+ ACPI_CAST_PTR(struct xsdt_descriptor, table_info.pointer);
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "XSDT located at %p\n", acpi_gbl_XSDT));
return_ACPI_STATUS(status);
diff --git a/drivers/acpi/tables/tbutils.c b/drivers/acpi/tables/tbutils.c
index bc571592f087..209a401801e3 100644
--- a/drivers/acpi/tables/tbutils.c
+++ b/drivers/acpi/tables/tbutils.c
@@ -71,7 +71,7 @@ acpi_status acpi_tb_is_table_installed(struct acpi_table_desc *new_table_desc)
{
struct acpi_table_desc *table_desc;
- ACPI_FUNCTION_TRACE("tb_is_table_installed");
+ ACPI_FUNCTION_TRACE(tb_is_table_installed);
/* Get the list descriptor and first table descriptor */
@@ -96,10 +96,11 @@ acpi_status acpi_tb_is_table_installed(struct acpi_table_desc *new_table_desc)
(!ACPI_MEMCMP
(table_desc->pointer, new_table_desc->pointer,
new_table_desc->pointer->length))) {
+
/* Match: this table is already installed */
ACPI_DEBUG_PRINT((ACPI_DB_TABLES,
- "Table [%4.4s] already installed: Rev %X oem_table_id [%8.8s]\n",
+ "Table [%4.4s] already installed: Rev %X OemTableId [%8.8s]\n",
new_table_desc->pointer->signature,
new_table_desc->pointer->revision,
new_table_desc->pointer->
@@ -159,12 +160,8 @@ acpi_tb_validate_table_header(struct acpi_table_header *table_header)
ACPI_MOVE_32_TO_32(&signature, table_header->signature);
if (!acpi_ut_valid_acpi_name(signature)) {
- ACPI_ERROR((AE_INFO,
- "Table signature at %p [%p] has invalid characters",
- table_header, &signature));
-
- ACPI_WARNING((AE_INFO, "Invalid table signature found: [%4.4s]",
- ACPI_CAST_PTR(char, &signature)));
+ ACPI_ERROR((AE_INFO, "Invalid table signature 0x%8.8X",
+ signature));
ACPI_DUMP_BUFFER(table_header,
sizeof(struct acpi_table_header));
@@ -175,12 +172,9 @@ acpi_tb_validate_table_header(struct acpi_table_header *table_header)
if (table_header->length < sizeof(struct acpi_table_header)) {
ACPI_ERROR((AE_INFO,
- "Invalid length in table header %p name %4.4s",
- table_header, (char *)&signature));
-
- ACPI_WARNING((AE_INFO,
- "Invalid table header length (0x%X) found",
- (u32) table_header->length));
+ "Invalid length 0x%X in table with signature %4.4s",
+ (u32) table_header->length,
+ ACPI_CAST_PTR(char, &signature)));
ACPI_DUMP_BUFFER(table_header,
sizeof(struct acpi_table_header));
@@ -192,72 +186,119 @@ acpi_tb_validate_table_header(struct acpi_table_header *table_header)
/*******************************************************************************
*
- * FUNCTION: acpi_tb_verify_table_checksum
+ * FUNCTION: acpi_tb_sum_table
*
- * PARAMETERS: *table_header - ACPI table to verify
+ * PARAMETERS: Buffer - Buffer to sum
+ * Length - Size of the buffer
*
- * RETURN: 8 bit checksum of table
+ * RETURN: 8 bit sum of buffer
*
- * DESCRIPTION: Does an 8 bit checksum of table and returns status. A correct
- * table should have a checksum of 0.
+ * DESCRIPTION: Computes an 8 bit sum of the buffer(length) and returns it.
*
******************************************************************************/
-acpi_status
-acpi_tb_verify_table_checksum(struct acpi_table_header * table_header)
+u8 acpi_tb_sum_table(void *buffer, u32 length)
+{
+ acpi_native_uint i;
+ u8 sum = 0;
+
+ if (!buffer || !length) {
+ return (0);
+ }
+
+ for (i = 0; i < length; i++) {
+ sum = (u8) (sum + ((u8 *) buffer)[i]);
+ }
+ return (sum);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_tb_generate_checksum
+ *
+ * PARAMETERS: Table - Pointer to a valid ACPI table (with a
+ * standard ACPI header)
+ *
+ * RETURN: 8 bit checksum of buffer
+ *
+ * DESCRIPTION: Computes an 8 bit checksum of the table.
+ *
+ ******************************************************************************/
+
+u8 acpi_tb_generate_checksum(struct acpi_table_header * table)
{
u8 checksum;
- acpi_status status = AE_OK;
- ACPI_FUNCTION_TRACE("tb_verify_table_checksum");
+ /* Sum the entire table as-is */
- /* Compute the checksum on the table */
+ checksum = acpi_tb_sum_table(table, table->length);
- checksum =
- acpi_tb_generate_checksum(table_header, table_header->length);
+ /* Subtract off the existing checksum value in the table */
- /* Return the appropriate exception */
+ checksum = (u8) (checksum - table->checksum);
- if (checksum) {
- ACPI_WARNING((AE_INFO,
- "Invalid checksum in table [%4.4s] (%02X, sum %02X is not zero)",
- table_header->signature,
- (u32) table_header->checksum, (u32) checksum));
+ /* Compute the final checksum */
- status = AE_BAD_CHECKSUM;
- }
- return_ACPI_STATUS(status);
+ checksum = (u8) (0 - checksum);
+ return (checksum);
}
/*******************************************************************************
*
- * FUNCTION: acpi_tb_generate_checksum
+ * FUNCTION: acpi_tb_set_checksum
*
- * PARAMETERS: Buffer - Buffer to checksum
- * Length - Size of the buffer
+ * PARAMETERS: Table - Pointer to a valid ACPI table (with a
+ * standard ACPI header)
*
- * RETURN: 8 bit checksum of buffer
+ * RETURN: None. Sets the table checksum field
*
- * DESCRIPTION: Computes an 8 bit checksum of the buffer(length) and returns it.
+ * DESCRIPTION: Computes an 8 bit checksum of the table and inserts the
+ * checksum into the table header.
*
******************************************************************************/
-u8 acpi_tb_generate_checksum(void *buffer, u32 length)
+void acpi_tb_set_checksum(struct acpi_table_header *table)
{
- u8 *end_buffer;
- u8 *rover;
- u8 sum = 0;
- if (buffer && length) {
- /* Buffer and Length are valid */
+ table->checksum = acpi_tb_generate_checksum(table);
+}
- end_buffer = ACPI_ADD_PTR(u8, buffer, length);
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_tb_verify_table_checksum
+ *
+ * PARAMETERS: *table_header - ACPI table to verify
+ *
+ * RETURN: 8 bit checksum of table
+ *
+ * DESCRIPTION: Generates an 8 bit checksum of table and returns and compares
+ * it to the existing checksum value.
+ *
+ ******************************************************************************/
- for (rover = buffer; rover < end_buffer; rover++) {
- sum = (u8) (sum + *rover);
- }
+acpi_status
+acpi_tb_verify_table_checksum(struct acpi_table_header *table_header)
+{
+ u8 checksum;
+
+ ACPI_FUNCTION_TRACE(tb_verify_table_checksum);
+
+ /* Compute the checksum on the table */
+
+ checksum = acpi_tb_generate_checksum(table_header);
+
+ /* Checksum ok? */
+
+ if (checksum == table_header->checksum) {
+ return_ACPI_STATUS(AE_OK);
}
- return (sum);
+
+ ACPI_WARNING((AE_INFO,
+ "Incorrect checksum in table [%4.4s] - is %2.2X, should be %2.2X",
+ table_header->signature, table_header->checksum,
+ checksum));
+
+ return_ACPI_STATUS(AE_BAD_CHECKSUM);
}
#ifdef ACPI_OBSOLETE_FUNCTIONS
@@ -276,12 +317,12 @@ u8 acpi_tb_generate_checksum(void *buffer, u32 length)
acpi_status
acpi_tb_handle_to_object(u16 table_id,
- struct acpi_table_desc ** return_table_desc)
+ struct acpi_table_desc **return_table_desc)
{
u32 i;
struct acpi_table_desc *table_desc;
- ACPI_FUNCTION_NAME("tb_handle_to_object");
+ ACPI_FUNCTION_NAME(tb_handle_to_object);
for (i = 0; i < ACPI_TABLE_MAX; i++) {
table_desc = acpi_gbl_table_lists[i].next;
@@ -295,7 +336,7 @@ acpi_tb_handle_to_object(u16 table_id,
}
}
- ACPI_ERROR((AE_INFO, "table_id=%X does not exist", table_id));
+ ACPI_ERROR((AE_INFO, "TableId=%X does not exist", table_id));
return (AE_BAD_PARAMETER);
}
#endif
diff --git a/drivers/acpi/tables/tbxface.c b/drivers/acpi/tables/tbxface.c
index 9fe53c9d5b9a..4e91f2984815 100644
--- a/drivers/acpi/tables/tbxface.c
+++ b/drivers/acpi/tables/tbxface.c
@@ -42,8 +42,6 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/module.h>
-
#include <acpi/acpi.h>
#include <acpi/acnamesp.h>
#include <acpi/actables.h>
@@ -68,7 +66,7 @@ acpi_status acpi_load_tables(void)
struct acpi_pointer rsdp_address;
acpi_status status;
- ACPI_FUNCTION_TRACE("acpi_load_tables");
+ ACPI_FUNCTION_TRACE(acpi_load_tables);
/* Get the RSDP */
@@ -123,6 +121,8 @@ acpi_status acpi_load_tables(void)
return_ACPI_STATUS(status);
}
+ACPI_EXPORT_SYMBOL(acpi_load_tables)
+
#ifdef ACPI_FUTURE_USAGE
/*******************************************************************************
*
@@ -139,14 +139,13 @@ acpi_status acpi_load_tables(void)
* is determined that the table is invalid, the call will fail.
*
******************************************************************************/
-
acpi_status acpi_load_table(struct acpi_table_header *table_ptr)
{
acpi_status status;
struct acpi_table_desc table_info;
struct acpi_pointer address;
- ACPI_FUNCTION_TRACE("acpi_load_table");
+ ACPI_FUNCTION_TRACE(acpi_load_table);
if (!table_ptr) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
@@ -174,6 +173,7 @@ acpi_status acpi_load_table(struct acpi_table_header *table_ptr)
status = acpi_tb_install_table(&table_info);
if (ACPI_FAILURE(status)) {
if (status == AE_ALREADY_EXISTS) {
+
/* Table already exists, no error */
status = AE_OK;
@@ -188,12 +188,12 @@ acpi_status acpi_load_table(struct acpi_table_header *table_ptr)
/* Convert the table to common format if necessary */
switch (table_info.type) {
- case ACPI_TABLE_FADT:
+ case ACPI_TABLE_ID_FADT:
status = acpi_tb_convert_table_fadt();
break;
- case ACPI_TABLE_FACS:
+ case ACPI_TABLE_ID_FACS:
status = acpi_tb_build_common_facs(&table_info);
break;
@@ -208,6 +208,7 @@ acpi_status acpi_load_table(struct acpi_table_header *table_ptr)
}
if (ACPI_FAILURE(status)) {
+
/* Uninstall table and free the buffer */
(void)acpi_tb_uninstall_table(table_info.installed_desc);
@@ -216,6 +217,8 @@ acpi_status acpi_load_table(struct acpi_table_header *table_ptr)
return_ACPI_STATUS(status);
}
+ACPI_EXPORT_SYMBOL(acpi_load_table)
+
/*******************************************************************************
*
* FUNCTION: acpi_unload_table
@@ -227,16 +230,15 @@ acpi_status acpi_load_table(struct acpi_table_header *table_ptr)
* DESCRIPTION: This routine is used to force the unload of a table
*
******************************************************************************/
-
acpi_status acpi_unload_table(acpi_table_type table_type)
{
struct acpi_table_desc *table_desc;
- ACPI_FUNCTION_TRACE("acpi_unload_table");
+ ACPI_FUNCTION_TRACE(acpi_unload_table);
/* Parameter validation */
- if (table_type > ACPI_TABLE_MAX) {
+ if (table_type > ACPI_TABLE_ID_MAX) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
@@ -261,6 +263,8 @@ acpi_status acpi_unload_table(acpi_table_type table_type)
return_ACPI_STATUS(AE_OK);
}
+ACPI_EXPORT_SYMBOL(acpi_unload_table)
+
/*******************************************************************************
*
* FUNCTION: acpi_get_table_header
@@ -281,7 +285,6 @@ acpi_status acpi_unload_table(acpi_table_type table_type)
* have a standard header and is fixed length.
*
******************************************************************************/
-
acpi_status
acpi_get_table_header(acpi_table_type table_type,
u32 instance, struct acpi_table_header *out_table_header)
@@ -289,16 +292,16 @@ acpi_get_table_header(acpi_table_type table_type,
struct acpi_table_header *tbl_ptr;
acpi_status status;
- ACPI_FUNCTION_TRACE("acpi_get_table_header");
+ ACPI_FUNCTION_TRACE(acpi_get_table_header);
if ((instance == 0) ||
- (table_type == ACPI_TABLE_RSDP) || (!out_table_header)) {
+ (table_type == ACPI_TABLE_ID_RSDP) || (!out_table_header)) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
/* Check the table type and instance */
- if ((table_type > ACPI_TABLE_MAX) ||
+ if ((table_type > ACPI_TABLE_ID_MAX) ||
(ACPI_IS_SINGLE_TABLE(acpi_gbl_table_data[table_type].flags) &&
instance > 1)) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
@@ -325,6 +328,7 @@ acpi_get_table_header(acpi_table_type table_type,
return_ACPI_STATUS(status);
}
+ACPI_EXPORT_SYMBOL(acpi_get_table_header)
#endif /* ACPI_FUTURE_USAGE */
/*******************************************************************************
@@ -349,7 +353,6 @@ acpi_get_table_header(acpi_table_type table_type,
* a complete table including the header.
*
******************************************************************************/
-
acpi_status
acpi_get_table(acpi_table_type table_type,
u32 instance, struct acpi_buffer *ret_buffer)
@@ -358,7 +361,7 @@ acpi_get_table(acpi_table_type table_type,
acpi_status status;
acpi_size table_length;
- ACPI_FUNCTION_TRACE("acpi_get_table");
+ ACPI_FUNCTION_TRACE(acpi_get_table);
/* Parameter validation */
@@ -373,7 +376,7 @@ acpi_get_table(acpi_table_type table_type,
/* Check the table type and instance */
- if ((table_type > ACPI_TABLE_MAX) ||
+ if ((table_type > ACPI_TABLE_ID_MAX) ||
(ACPI_IS_SINGLE_TABLE(acpi_gbl_table_data[table_type].flags) &&
instance > 1)) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
@@ -396,7 +399,8 @@ acpi_get_table(acpi_table_type table_type,
/* Get the table length */
- if (table_type == ACPI_TABLE_RSDP) {
+ if (table_type == ACPI_TABLE_ID_RSDP) {
+
/* RSD PTR is the only "table" without a header */
table_length = sizeof(struct rsdp_descriptor);
@@ -417,4 +421,4 @@ acpi_get_table(acpi_table_type table_type,
return_ACPI_STATUS(AE_OK);
}
-EXPORT_SYMBOL(acpi_get_table);
+ACPI_EXPORT_SYMBOL(acpi_get_table)
diff --git a/drivers/acpi/tables/tbxfroot.c b/drivers/acpi/tables/tbxfroot.c
index a62db6af83c9..da2648bbdbc0 100644
--- a/drivers/acpi/tables/tbxfroot.c
+++ b/drivers/acpi/tables/tbxfroot.c
@@ -41,8 +41,6 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/module.h>
-
#include <acpi/acpi.h>
#include <acpi/actables.h>
@@ -75,6 +73,7 @@ acpi_status acpi_tb_validate_rsdp(struct rsdp_descriptor *rsdp)
* The signature and checksum must both be correct
*/
if (ACPI_STRNCMP((char *)rsdp, RSDP_SIG, sizeof(RSDP_SIG) - 1) != 0) {
+
/* Nope, BAD Signature */
return (AE_BAD_SIGNATURE);
@@ -82,15 +81,14 @@ acpi_status acpi_tb_validate_rsdp(struct rsdp_descriptor *rsdp)
/* Check the standard checksum */
- if (acpi_tb_generate_checksum(rsdp, ACPI_RSDP_CHECKSUM_LENGTH) != 0) {
+ if (acpi_tb_sum_table(rsdp, ACPI_RSDP_CHECKSUM_LENGTH) != 0) {
return (AE_BAD_CHECKSUM);
}
/* Check extended checksum if table version >= 2 */
if ((rsdp->revision >= 2) &&
- (acpi_tb_generate_checksum(rsdp, ACPI_RSDP_XCHECKSUM_LENGTH) !=
- 0)) {
+ (acpi_tb_sum_table(rsdp, ACPI_RSDP_XCHECKSUM_LENGTH) != 0)) {
return (AE_BAD_CHECKSUM);
}
@@ -121,7 +119,7 @@ acpi_tb_find_table(char *signature,
acpi_status status;
struct acpi_table_header *table;
- ACPI_FUNCTION_TRACE("tb_find_table");
+ ACPI_FUNCTION_TRACE(tb_find_table);
/* Validate string lengths */
@@ -131,7 +129,7 @@ acpi_tb_find_table(char *signature,
return_ACPI_STATUS(AE_AML_STRING_LIMIT);
}
- if (!ACPI_STRNCMP(signature, DSDT_SIG, ACPI_NAME_SIZE)) {
+ if (ACPI_COMPARE_NAME(signature, DSDT_SIG)) {
/*
* The DSDT pointer is contained in the FADT, not the RSDT.
* This code should suffice, because the only code that would perform
@@ -156,10 +154,12 @@ acpi_tb_find_table(char *signature,
/* Check oem_id and oem_table_id */
- if ((oem_id[0] && ACPI_STRNCMP(oem_id, table->oem_id,
- sizeof(table->oem_id))) ||
- (oem_table_id[0] && ACPI_STRNCMP(oem_table_id, table->oem_table_id,
- sizeof(table->oem_table_id)))) {
+ if ((oem_id[0] &&
+ ACPI_STRNCMP(oem_id, table->oem_id,
+ sizeof(table->oem_id))) ||
+ (oem_table_id[0] &&
+ ACPI_STRNCMP(oem_table_id, table->oem_table_id,
+ sizeof(table->oem_table_id)))) {
return_ACPI_STATUS(AE_AML_NAME_NOT_FOUND);
}
@@ -203,7 +203,7 @@ acpi_get_firmware_table(acpi_string signature,
u32 i;
u32 j;
- ACPI_FUNCTION_TRACE("acpi_get_firmware_table");
+ ACPI_FUNCTION_TRACE(acpi_get_firmware_table);
/*
* Ensure that at least the table manager is initialized. We don't
@@ -217,6 +217,7 @@ acpi_get_firmware_table(acpi_string signature,
/* Ensure that we have a RSDP */
if (!acpi_gbl_RSDP) {
+
/* Get the RSDP */
status = acpi_os_get_root_pointer(flags, &address);
@@ -261,7 +262,7 @@ acpi_get_firmware_table(acpi_string signature,
/* Get and validate the RSDT */
- rsdt_info = ACPI_MEM_CALLOCATE(sizeof(struct acpi_table_desc));
+ rsdt_info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_table_desc));
if (!rsdt_info) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
@@ -278,13 +279,13 @@ acpi_get_firmware_table(acpi_string signature,
/* Allocate a scratch table header and table descriptor */
- header = ACPI_MEM_ALLOCATE(sizeof(struct acpi_table_header));
+ header = ACPI_ALLOCATE(sizeof(struct acpi_table_header));
if (!header) {
status = AE_NO_MEMORY;
goto cleanup;
}
- table_info = ACPI_MEM_ALLOCATE(sizeof(struct acpi_table_desc));
+ table_info = ACPI_ALLOCATE(sizeof(struct acpi_table_desc));
if (!table_info) {
status = AE_NO_MEMORY;
goto cleanup;
@@ -308,12 +309,12 @@ acpi_get_firmware_table(acpi_string signature,
if (acpi_gbl_root_table_type == ACPI_TABLE_TYPE_RSDT) {
address.pointer.value =
(ACPI_CAST_PTR
- (RSDT_DESCRIPTOR,
+ (struct rsdt_descriptor,
rsdt_info->pointer))->table_offset_entry[i];
} else {
address.pointer.value =
(ACPI_CAST_PTR
- (XSDT_DESCRIPTOR,
+ (struct xsdt_descriptor,
rsdt_info->pointer))->table_offset_entry[i];
}
@@ -326,11 +327,13 @@ acpi_get_firmware_table(acpi_string signature,
/* Compare table signatures and table instance */
- if (!ACPI_STRNCMP(header->signature, signature, ACPI_NAME_SIZE)) {
+ if (ACPI_COMPARE_NAME(header->signature, signature)) {
+
/* An instance of the table was found */
j++;
if (j >= instance) {
+
/* Found the correct instance, get the entire table */
status =
@@ -355,23 +358,21 @@ acpi_get_firmware_table(acpi_string signature,
acpi_os_unmap_memory(rsdt_info->pointer,
(acpi_size) rsdt_info->pointer->length);
}
- ACPI_MEM_FREE(rsdt_info);
+ ACPI_FREE(rsdt_info);
if (header) {
- ACPI_MEM_FREE(header);
+ ACPI_FREE(header);
}
if (table_info) {
- ACPI_MEM_FREE(table_info);
+ ACPI_FREE(table_info);
}
return_ACPI_STATUS(status);
}
-EXPORT_SYMBOL(acpi_get_firmware_table);
+ACPI_EXPORT_SYMBOL(acpi_get_firmware_table)
/* TBD: Move to a new file */
-
#if ACPI_MACHINE_WIDTH != 16
-
/*******************************************************************************
*
* FUNCTION: acpi_find_root_pointer
@@ -384,13 +385,12 @@ EXPORT_SYMBOL(acpi_get_firmware_table);
* DESCRIPTION: Find the RSDP
*
******************************************************************************/
-
acpi_status acpi_find_root_pointer(u32 flags, struct acpi_pointer *rsdp_address)
{
struct acpi_table_desc table_info;
acpi_status status;
- ACPI_FUNCTION_TRACE("acpi_find_root_pointer");
+ ACPI_FUNCTION_TRACE(acpi_find_root_pointer);
/* Get the RSDP */
@@ -407,6 +407,8 @@ acpi_status acpi_find_root_pointer(u32 flags, struct acpi_pointer *rsdp_address)
return_ACPI_STATUS(AE_OK);
}
+ACPI_EXPORT_SYMBOL(acpi_find_root_pointer)
+
/*******************************************************************************
*
* FUNCTION: acpi_tb_scan_memory_for_rsdp
@@ -419,14 +421,13 @@ acpi_status acpi_find_root_pointer(u32 flags, struct acpi_pointer *rsdp_address)
* DESCRIPTION: Search a block of memory for the RSDP signature
*
******************************************************************************/
-
static u8 *acpi_tb_scan_memory_for_rsdp(u8 * start_address, u32 length)
{
acpi_status status;
u8 *mem_rover;
u8 *end_address;
- ACPI_FUNCTION_TRACE("tb_scan_memory_for_rsdp");
+ ACPI_FUNCTION_TRACE(tb_scan_memory_for_rsdp);
end_address = start_address + length;
@@ -434,12 +435,14 @@ static u8 *acpi_tb_scan_memory_for_rsdp(u8 * start_address, u32 length)
for (mem_rover = start_address; mem_rover < end_address;
mem_rover += ACPI_RSDP_SCAN_STEP) {
+
/* The RSDP signature and checksum must both be correct */
status =
acpi_tb_validate_rsdp(ACPI_CAST_PTR
(struct rsdp_descriptor, mem_rover));
if (ACPI_SUCCESS(status)) {
+
/* Sig and checksum valid, we have found a real RSDP */
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
@@ -469,10 +472,10 @@ static u8 *acpi_tb_scan_memory_for_rsdp(u8 * start_address, u32 length)
*
* RETURN: Status, RSDP physical address
*
- * DESCRIPTION: search lower 1_mbyte of memory for the root system descriptor
+ * DESCRIPTION: Search lower 1_mbyte of memory for the root system descriptor
* pointer structure. If it is found, set *RSDP to point to it.
*
- * NOTE1: The RSDp must be either in the first 1_k of the Extended
+ * NOTE1: The RSDP must be either in the first 1_k of the Extended
* BIOS Data Area or between E0000 and FFFFF (From ACPI Spec.)
* Only a 32-bit physical address is necessary.
*
@@ -489,12 +492,13 @@ acpi_tb_find_rsdp(struct acpi_table_desc *table_info, u32 flags)
u32 physical_address;
acpi_status status;
- ACPI_FUNCTION_TRACE("tb_find_rsdp");
+ ACPI_FUNCTION_TRACE(tb_find_rsdp);
/*
* Scan supports either logical addressing or physical addressing
*/
if ((flags & ACPI_MEMORY_MODE) == ACPI_LOGICAL_ADDRESSING) {
+
/* 1a) Get the location of the Extended BIOS Data Area (EBDA) */
status = acpi_os_map_memory((acpi_physical_address)
@@ -521,7 +525,7 @@ acpi_tb_find_rsdp(struct acpi_table_desc *table_info, u32 flags)
if (physical_address > 0x400) {
/*
- * 1b) Search EBDA paragraphs (EBDa is required to be a
+ * 1b) Search EBDA paragraphs (EBDA is required to be a
* minimum of 1_k length)
*/
status = acpi_os_map_memory((acpi_physical_address)
@@ -542,10 +546,11 @@ acpi_tb_find_rsdp(struct acpi_table_desc *table_info, u32 flags)
acpi_os_unmap_memory(table_ptr, ACPI_EBDA_WINDOW_SIZE);
if (mem_rover) {
+
/* Return the physical address */
physical_address +=
- ACPI_PTR_DIFF(mem_rover, table_ptr);
+ (u32) ACPI_PTR_DIFF(mem_rover, table_ptr);
table_info->physical_address =
(acpi_physical_address) physical_address;
@@ -576,11 +581,12 @@ acpi_tb_find_rsdp(struct acpi_table_desc *table_info, u32 flags)
acpi_os_unmap_memory(table_ptr, ACPI_HI_RSDP_WINDOW_SIZE);
if (mem_rover) {
+
/* Return the physical address */
- physical_address =
- ACPI_HI_RSDP_WINDOW_BASE + ACPI_PTR_DIFF(mem_rover,
- table_ptr);
+ physical_address = (u32)
+ (ACPI_HI_RSDP_WINDOW_BASE +
+ ACPI_PTR_DIFF(mem_rover, table_ptr));
table_info->physical_address =
(acpi_physical_address) physical_address;
@@ -601,7 +607,7 @@ acpi_tb_find_rsdp(struct acpi_table_desc *table_info, u32 flags)
if (physical_address > 0x400) {
/*
- * 1b) Search EBDA paragraphs (EBDa is required to be a minimum of
+ * 1b) Search EBDA paragraphs (EBDA is required to be a minimum of
* 1_k length)
*/
mem_rover =
@@ -609,6 +615,7 @@ acpi_tb_find_rsdp(struct acpi_table_desc *table_info, u32 flags)
(physical_address),
ACPI_EBDA_WINDOW_SIZE);
if (mem_rover) {
+
/* Return the physical address */
table_info->physical_address =
@@ -624,6 +631,7 @@ acpi_tb_find_rsdp(struct acpi_table_desc *table_info, u32 flags)
(ACPI_HI_RSDP_WINDOW_BASE),
ACPI_HI_RSDP_WINDOW_SIZE);
if (mem_rover) {
+
/* Found it, return the physical address */
table_info->physical_address =
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 19f3ea48475e..e7fe3a14fdaf 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -82,6 +82,7 @@ MODULE_PARM_DESC(tzp, "Thermal zone polling frequency, in 1/10 seconds.\n");
static int acpi_thermal_add(struct acpi_device *device);
static int acpi_thermal_remove(struct acpi_device *device, int type);
+static int acpi_thermal_resume(struct acpi_device *device, int state);
static int acpi_thermal_state_open_fs(struct inode *inode, struct file *file);
static int acpi_thermal_temp_open_fs(struct inode *inode, struct file *file);
static int acpi_thermal_trip_open_fs(struct inode *inode, struct file *file);
@@ -103,6 +104,7 @@ static struct acpi_driver acpi_thermal_driver = {
.ops = {
.add = acpi_thermal_add,
.remove = acpi_thermal_remove,
+ .resume = acpi_thermal_resume,
},
};
@@ -684,8 +686,7 @@ static void acpi_thermal_run(unsigned long data)
{
struct acpi_thermal *tz = (struct acpi_thermal *)data;
if (!tz->zombie)
- acpi_os_queue_for_execution(OSD_PRIORITY_GPE,
- acpi_thermal_check, (void *)data);
+ acpi_os_execute(OSL_GPE_HANDLER, acpi_thermal_check, (void *)data);
}
static void acpi_thermal_check(void *data)
@@ -942,8 +943,10 @@ acpi_thermal_write_trip_points(struct file *file,
memset(limit_string, 0, ACPI_THERMAL_MAX_LIMIT_STR_LEN);
active = kmalloc(ACPI_THERMAL_MAX_ACTIVE * sizeof(int), GFP_KERNEL);
- if (!active)
+ if (!active) {
+ kfree(limit_string);
return_VALUE(-ENOMEM);
+ }
if (!tz || (count > ACPI_THERMAL_MAX_LIMIT_STR_LEN - 1)) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid argument\n"));
@@ -1342,7 +1345,7 @@ static int acpi_thermal_add(struct acpi_device *device)
result = acpi_thermal_add_fs(device);
if (result)
- return_VALUE(result);
+ goto end;
init_timer(&tz->timer);
@@ -1416,6 +1419,20 @@ static int acpi_thermal_remove(struct acpi_device *device, int type)
return_VALUE(0);
}
+static int acpi_thermal_resume(struct acpi_device *device, int state)
+{
+ struct acpi_thermal *tz = NULL;
+
+ if (!device || !acpi_driver_data(device))
+ return_VALUE(-EINVAL);
+
+ tz = (struct acpi_thermal *)acpi_driver_data(device);
+
+ acpi_thermal_check(tz);
+
+ return AE_OK;
+}
+
static int __init acpi_thermal_init(void)
{
int result = 0;
diff --git a/drivers/acpi/utilities/utalloc.c b/drivers/acpi/utilities/utalloc.c
index 03b0044974c2..7940fc1bd69e 100644
--- a/drivers/acpi/utilities/utalloc.c
+++ b/drivers/acpi/utilities/utalloc.c
@@ -46,24 +46,6 @@
#define _COMPONENT ACPI_UTILITIES
ACPI_MODULE_NAME("utalloc")
-/* Local prototypes */
-#ifdef ACPI_DBG_TRACK_ALLOCATIONS
-static struct acpi_debug_mem_block *acpi_ut_find_allocation(void *allocation);
-
-static acpi_status
-acpi_ut_track_allocation(struct acpi_debug_mem_block *address,
- acpi_size size,
- u8 alloc_type, u32 component, char *module, u32 line);
-
-static acpi_status
-acpi_ut_remove_allocation(struct acpi_debug_mem_block *address,
- u32 component, char *module, u32 line);
-
-static acpi_status
-acpi_ut_create_list(char *list_name,
- u16 object_size, struct acpi_memory_list **return_cache);
-#endif
-
/*******************************************************************************
*
* FUNCTION: acpi_ut_create_caches
@@ -75,33 +57,23 @@ acpi_ut_create_list(char *list_name,
* DESCRIPTION: Create all local caches
*
******************************************************************************/
-
acpi_status acpi_ut_create_caches(void)
{
acpi_status status;
-#ifdef ACPI_DBG_TRACK_ALLOCATIONS
-
- /* Memory allocation lists */
-
- status = acpi_ut_create_list("Acpi-Global", 0, &acpi_gbl_global_list);
- if (ACPI_FAILURE(status)) {
- return (status);
- }
+ /* Object Caches, for frequently used objects */
status =
- acpi_ut_create_list("Acpi-Namespace",
- sizeof(struct acpi_namespace_node),
- &acpi_gbl_ns_node_list);
+ acpi_os_create_cache("Acpi-Namespace",
+ sizeof(struct acpi_namespace_node),
+ ACPI_MAX_NAMESPACE_CACHE_DEPTH,
+ &acpi_gbl_namespace_cache);
if (ACPI_FAILURE(status)) {
return (status);
}
-#endif
-
- /* Object Caches, for frequently used objects */
status =
- acpi_os_create_cache("acpi_state", sizeof(union acpi_generic_state),
+ acpi_os_create_cache("Acpi-State", sizeof(union acpi_generic_state),
ACPI_MAX_STATE_CACHE_DEPTH,
&acpi_gbl_state_cache);
if (ACPI_FAILURE(status)) {
@@ -109,7 +81,7 @@ acpi_status acpi_ut_create_caches(void)
}
status =
- acpi_os_create_cache("acpi_parse",
+ acpi_os_create_cache("Acpi-Parse",
sizeof(struct acpi_parse_obj_common),
ACPI_MAX_PARSE_CACHE_DEPTH,
&acpi_gbl_ps_node_cache);
@@ -118,7 +90,7 @@ acpi_status acpi_ut_create_caches(void)
}
status =
- acpi_os_create_cache("acpi_parse_ext",
+ acpi_os_create_cache("Acpi-ParseExt",
sizeof(struct acpi_parse_obj_named),
ACPI_MAX_EXTPARSE_CACHE_DEPTH,
&acpi_gbl_ps_node_ext_cache);
@@ -127,7 +99,7 @@ acpi_status acpi_ut_create_caches(void)
}
status =
- acpi_os_create_cache("acpi_operand",
+ acpi_os_create_cache("Acpi-Operand",
sizeof(union acpi_operand_object),
ACPI_MAX_OBJECT_CACHE_DEPTH,
&acpi_gbl_operand_cache);
@@ -135,6 +107,24 @@ acpi_status acpi_ut_create_caches(void)
return (status);
}
+#ifdef ACPI_DBG_TRACK_ALLOCATIONS
+
+ /* Memory allocation lists */
+
+ status = acpi_ut_create_list("Acpi-Global", 0, &acpi_gbl_global_list);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ status =
+ acpi_ut_create_list("Acpi-Namespace",
+ sizeof(struct acpi_namespace_node),
+ &acpi_gbl_ns_node_list);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+#endif
+
return (AE_OK);
}
@@ -153,6 +143,9 @@ acpi_status acpi_ut_create_caches(void)
acpi_status acpi_ut_delete_caches(void)
{
+ (void)acpi_os_delete_cache(acpi_gbl_namespace_cache);
+ acpi_gbl_namespace_cache = NULL;
+
(void)acpi_os_delete_cache(acpi_gbl_state_cache);
acpi_gbl_state_cache = NULL;
@@ -165,6 +158,21 @@ acpi_status acpi_ut_delete_caches(void)
(void)acpi_os_delete_cache(acpi_gbl_ps_node_ext_cache);
acpi_gbl_ps_node_ext_cache = NULL;
+#ifdef ACPI_DBG_TRACK_ALLOCATIONS
+
+ /* Debug only - display leftover memory allocation, if any */
+
+ acpi_ut_dump_allocations(ACPI_UINT32_MAX, NULL);
+
+ /* Free memory lists */
+
+ acpi_os_free(acpi_gbl_global_list);
+ acpi_gbl_global_list = NULL;
+
+ acpi_os_free(acpi_gbl_ns_node_list);
+ acpi_gbl_ns_node_list = NULL;
+#endif
+
return (AE_OK);
}
@@ -252,7 +260,7 @@ acpi_ut_initialize_buffer(struct acpi_buffer * buffer,
/* Allocate a new buffer with local interface to allow tracking */
- buffer->pointer = ACPI_MEM_CALLOCATE(required_length);
+ buffer->pointer = ACPI_ALLOCATE_ZEROED(required_length);
if (!buffer->pointer) {
return (AE_NO_MEMORY);
}
@@ -288,7 +296,7 @@ acpi_ut_initialize_buffer(struct acpi_buffer * buffer,
*
* RETURN: Address of the allocated memory on success, NULL on failure.
*
- * DESCRIPTION: The subsystem's equivalent of malloc.
+ * DESCRIPTION: Subsystem equivalent of malloc.
*
******************************************************************************/
@@ -296,23 +304,23 @@ void *acpi_ut_allocate(acpi_size size, u32 component, char *module, u32 line)
{
void *allocation;
- ACPI_FUNCTION_TRACE_U32("ut_allocate", size);
+ ACPI_FUNCTION_TRACE_U32(ut_allocate, size);
/* Check for an inadvertent size of zero bytes */
if (!size) {
- ACPI_ERROR((module, line,
- "ut_allocate: Attempt to allocate zero bytes, allocating 1 byte"));
+ ACPI_WARNING((module, line,
+ "Attempt to allocate zero bytes, allocating 1 byte"));
size = 1;
}
allocation = acpi_os_allocate(size);
if (!allocation) {
+
/* Report allocation error */
- ACPI_ERROR((module, line,
- "ut_allocate: Could not allocate size %X",
- (u32) size));
+ ACPI_WARNING((module, line,
+ "Could not allocate size %X", (u32) size));
return_PTR(NULL);
}
@@ -322,7 +330,7 @@ void *acpi_ut_allocate(acpi_size size, u32 component, char *module, u32 line)
/*******************************************************************************
*
- * FUNCTION: acpi_ut_callocate
+ * FUNCTION: acpi_ut_allocate_zeroed
*
* PARAMETERS: Size - Size of the allocation
* Component - Component type of caller
@@ -331,542 +339,24 @@ void *acpi_ut_allocate(acpi_size size, u32 component, char *module, u32 line)
*
* RETURN: Address of the allocated memory on success, NULL on failure.
*
- * DESCRIPTION: Subsystem equivalent of calloc.
+ * DESCRIPTION: Subsystem equivalent of calloc. Allocate and zero memory.
*
******************************************************************************/
-void *acpi_ut_callocate(acpi_size size, u32 component, char *module, u32 line)
+void *acpi_ut_allocate_zeroed(acpi_size size,
+ u32 component, char *module, u32 line)
{
void *allocation;
- ACPI_FUNCTION_TRACE_U32("ut_callocate", size);
-
- /* Check for an inadvertent size of zero bytes */
-
- if (!size) {
- ACPI_ERROR((module, line,
- "Attempt to allocate zero bytes, allocating 1 byte"));
- size = 1;
- }
-
- allocation = acpi_os_allocate(size);
- if (!allocation) {
- /* Report allocation error */
-
- ACPI_ERROR((module, line,
- "Could not allocate size %X", (u32) size));
- return_PTR(NULL);
- }
-
- /* Clear the memory block */
-
- ACPI_MEMSET(allocation, 0, size);
- return_PTR(allocation);
-}
-
-#ifdef ACPI_DBG_TRACK_ALLOCATIONS
-/*
- * These procedures are used for tracking memory leaks in the subsystem, and
- * they get compiled out when the ACPI_DBG_TRACK_ALLOCATIONS is not set.
- *
- * Each memory allocation is tracked via a doubly linked list. Each
- * element contains the caller's component, module name, function name, and
- * line number. acpi_ut_allocate and acpi_ut_callocate call
- * acpi_ut_track_allocation to add an element to the list; deletion
- * occurs in the body of acpi_ut_free.
- */
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_create_list
- *
- * PARAMETERS: cache_name - Ascii name for the cache
- * object_size - Size of each cached object
- * return_cache - Where the new cache object is returned
- *
- * RETURN: Status
- *
- * DESCRIPTION: Create a local memory list for tracking purposed
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ut_create_list(char *list_name,
- u16 object_size, struct acpi_memory_list **return_cache)
-{
- struct acpi_memory_list *cache;
-
- cache = acpi_os_allocate(sizeof(struct acpi_memory_list));
- if (!cache) {
- return (AE_NO_MEMORY);
- }
-
- ACPI_MEMSET(cache, 0, sizeof(struct acpi_memory_list));
-
- cache->list_name = list_name;
- cache->object_size = object_size;
-
- *return_cache = cache;
- return (AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_allocate_and_track
- *
- * PARAMETERS: Size - Size of the allocation
- * Component - Component type of caller
- * Module - Source file name of caller
- * Line - Line number of caller
- *
- * RETURN: Address of the allocated memory on success, NULL on failure.
- *
- * DESCRIPTION: The subsystem's equivalent of malloc.
- *
- ******************************************************************************/
-
-void *acpi_ut_allocate_and_track(acpi_size size,
- u32 component, char *module, u32 line)
-{
- struct acpi_debug_mem_block *allocation;
- acpi_status status;
-
- allocation =
- acpi_ut_allocate(size + sizeof(struct acpi_debug_mem_header),
- component, module, line);
- if (!allocation) {
- return (NULL);
- }
-
- status = acpi_ut_track_allocation(allocation, size,
- ACPI_MEM_MALLOC, component, module,
- line);
- if (ACPI_FAILURE(status)) {
- acpi_os_free(allocation);
- return (NULL);
- }
-
- acpi_gbl_global_list->total_allocated++;
- acpi_gbl_global_list->current_total_size += (u32) size;
-
- return ((void *)&allocation->user_space);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_callocate_and_track
- *
- * PARAMETERS: Size - Size of the allocation
- * Component - Component type of caller
- * Module - Source file name of caller
- * Line - Line number of caller
- *
- * RETURN: Address of the allocated memory on success, NULL on failure.
- *
- * DESCRIPTION: Subsystem equivalent of calloc.
- *
- ******************************************************************************/
-
-void *acpi_ut_callocate_and_track(acpi_size size,
- u32 component, char *module, u32 line)
-{
- struct acpi_debug_mem_block *allocation;
- acpi_status status;
-
- allocation =
- acpi_ut_callocate(size + sizeof(struct acpi_debug_mem_header),
- component, module, line);
- if (!allocation) {
- /* Report allocation error */
-
- ACPI_ERROR((module, line,
- "Could not allocate size %X", (u32) size));
- return (NULL);
- }
-
- status = acpi_ut_track_allocation(allocation, size,
- ACPI_MEM_CALLOC, component, module,
- line);
- if (ACPI_FAILURE(status)) {
- acpi_os_free(allocation);
- return (NULL);
- }
-
- acpi_gbl_global_list->total_allocated++;
- acpi_gbl_global_list->current_total_size += (u32) size;
-
- return ((void *)&allocation->user_space);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_free_and_track
- *
- * PARAMETERS: Allocation - Address of the memory to deallocate
- * Component - Component type of caller
- * Module - Source file name of caller
- * Line - Line number of caller
- *
- * RETURN: None
- *
- * DESCRIPTION: Frees the memory at Allocation
- *
- ******************************************************************************/
-
-void
-acpi_ut_free_and_track(void *allocation, u32 component, char *module, u32 line)
-{
- struct acpi_debug_mem_block *debug_block;
- acpi_status status;
-
- ACPI_FUNCTION_TRACE_PTR("ut_free", allocation);
-
- if (NULL == allocation) {
- ACPI_ERROR((module, line, "Attempt to delete a NULL address"));
-
- return_VOID;
- }
-
- debug_block = ACPI_CAST_PTR(struct acpi_debug_mem_block,
- (((char *)allocation) -
- sizeof(struct acpi_debug_mem_header)));
-
- acpi_gbl_global_list->total_freed++;
- acpi_gbl_global_list->current_total_size -= debug_block->size;
-
- status = acpi_ut_remove_allocation(debug_block,
- component, module, line);
- if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status, "Could not free memory"));
- }
-
- acpi_os_free(debug_block);
- ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, "%p freed\n", allocation));
- return_VOID;
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_find_allocation
- *
- * PARAMETERS: Allocation - Address of allocated memory
- *
- * RETURN: A list element if found; NULL otherwise.
- *
- * DESCRIPTION: Searches for an element in the global allocation tracking list.
- *
- ******************************************************************************/
-
-static struct acpi_debug_mem_block *acpi_ut_find_allocation(void *allocation)
-{
- struct acpi_debug_mem_block *element;
-
ACPI_FUNCTION_ENTRY();
- element = acpi_gbl_global_list->list_head;
-
- /* Search for the address. */
-
- while (element) {
- if (element == allocation) {
- return (element);
- }
-
- element = element->next;
- }
-
- return (NULL);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_track_allocation
- *
- * PARAMETERS: Allocation - Address of allocated memory
- * Size - Size of the allocation
- * alloc_type - MEM_MALLOC or MEM_CALLOC
- * Component - Component type of caller
- * Module - Source file name of caller
- * Line - Line number of caller
- *
- * RETURN: None.
- *
- * DESCRIPTION: Inserts an element into the global allocation tracking list.
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ut_track_allocation(struct acpi_debug_mem_block *allocation,
- acpi_size size,
- u8 alloc_type, u32 component, char *module, u32 line)
-{
- struct acpi_memory_list *mem_list;
- struct acpi_debug_mem_block *element;
- acpi_status status = AE_OK;
-
- ACPI_FUNCTION_TRACE_PTR("ut_track_allocation", allocation);
-
- mem_list = acpi_gbl_global_list;
- status = acpi_ut_acquire_mutex(ACPI_MTX_MEMORY);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
-
- /*
- * Search list for this address to make sure it is not already on the list.
- * This will catch several kinds of problems.
- */
- element = acpi_ut_find_allocation(allocation);
- if (element) {
- ACPI_ERROR((AE_INFO,
- "ut_track_allocation: Allocation already present in list! (%p)",
- allocation));
-
- ACPI_ERROR((AE_INFO, "Element %p Address %p",
- element, allocation));
-
- goto unlock_and_exit;
- }
-
- /* Fill in the instance data. */
-
- allocation->size = (u32) size;
- allocation->alloc_type = alloc_type;
- allocation->component = component;
- allocation->line = line;
-
- ACPI_STRNCPY(allocation->module, module, ACPI_MAX_MODULE_NAME);
- allocation->module[ACPI_MAX_MODULE_NAME - 1] = 0;
-
- /* Insert at list head */
-
- if (mem_list->list_head) {
- ((struct acpi_debug_mem_block *)(mem_list->list_head))->
- previous = allocation;
- }
-
- allocation->next = mem_list->list_head;
- allocation->previous = NULL;
-
- mem_list->list_head = allocation;
-
- unlock_and_exit:
- status = acpi_ut_release_mutex(ACPI_MTX_MEMORY);
- return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_remove_allocation
- *
- * PARAMETERS: Allocation - Address of allocated memory
- * Component - Component type of caller
- * Module - Source file name of caller
- * Line - Line number of caller
- *
- * RETURN:
- *
- * DESCRIPTION: Deletes an element from the global allocation tracking list.
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ut_remove_allocation(struct acpi_debug_mem_block *allocation,
- u32 component, char *module, u32 line)
-{
- struct acpi_memory_list *mem_list;
- acpi_status status;
-
- ACPI_FUNCTION_TRACE("ut_remove_allocation");
-
- mem_list = acpi_gbl_global_list;
- if (NULL == mem_list->list_head) {
- /* No allocations! */
-
- ACPI_ERROR((module, line,
- "Empty allocation list, nothing to free!"));
-
- return_ACPI_STATUS(AE_OK);
- }
-
- status = acpi_ut_acquire_mutex(ACPI_MTX_MEMORY);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
-
- /* Unlink */
-
- if (allocation->previous) {
- (allocation->previous)->next = allocation->next;
- } else {
- mem_list->list_head = allocation->next;
- }
-
- if (allocation->next) {
- (allocation->next)->previous = allocation->previous;
- }
-
- /* Mark the segment as deleted */
-
- ACPI_MEMSET(&allocation->user_space, 0xEA, allocation->size);
-
- ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, "Freeing size 0%X\n",
- allocation->size));
+ allocation = acpi_ut_allocate(size, component, module, line);
+ if (allocation) {
- status = acpi_ut_release_mutex(ACPI_MTX_MEMORY);
- return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_dump_allocation_info
- *
- * PARAMETERS:
- *
- * RETURN: None
- *
- * DESCRIPTION: Print some info about the outstanding allocations.
- *
- ******************************************************************************/
+ /* Clear the memory block */
-#ifdef ACPI_FUTURE_USAGE
-void acpi_ut_dump_allocation_info(void)
-{
-/*
- struct acpi_memory_list *mem_list;
-*/
-
- ACPI_FUNCTION_TRACE("ut_dump_allocation_info");
-
-/*
- ACPI_DEBUG_PRINT (TRACE_ALLOCATIONS | TRACE_TABLES,
- ("%30s: %4d (%3d Kb)\n", "Current allocations",
- mem_list->current_count,
- ROUND_UP_TO_1K (mem_list->current_size)));
-
- ACPI_DEBUG_PRINT (TRACE_ALLOCATIONS | TRACE_TABLES,
- ("%30s: %4d (%3d Kb)\n", "Max concurrent allocations",
- mem_list->max_concurrent_count,
- ROUND_UP_TO_1K (mem_list->max_concurrent_size)));
-
- ACPI_DEBUG_PRINT (TRACE_ALLOCATIONS | TRACE_TABLES,
- ("%30s: %4d (%3d Kb)\n", "Total (all) internal objects",
- running_object_count,
- ROUND_UP_TO_1K (running_object_size)));
-
- ACPI_DEBUG_PRINT (TRACE_ALLOCATIONS | TRACE_TABLES,
- ("%30s: %4d (%3d Kb)\n", "Total (all) allocations",
- running_alloc_count,
- ROUND_UP_TO_1K (running_alloc_size)));
-
- ACPI_DEBUG_PRINT (TRACE_ALLOCATIONS | TRACE_TABLES,
- ("%30s: %4d (%3d Kb)\n", "Current Nodes",
- acpi_gbl_current_node_count,
- ROUND_UP_TO_1K (acpi_gbl_current_node_size)));
-
- ACPI_DEBUG_PRINT (TRACE_ALLOCATIONS | TRACE_TABLES,
- ("%30s: %4d (%3d Kb)\n", "Max Nodes",
- acpi_gbl_max_concurrent_node_count,
- ROUND_UP_TO_1K ((acpi_gbl_max_concurrent_node_count *
- sizeof (struct acpi_namespace_node)))));
-*/
- return_VOID;
-}
-#endif /* ACPI_FUTURE_USAGE */
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_dump_allocations
- *
- * PARAMETERS: Component - Component(s) to dump info for.
- * Module - Module to dump info for. NULL means all.
- *
- * RETURN: None
- *
- * DESCRIPTION: Print a list of all outstanding allocations.
- *
- ******************************************************************************/
-
-void acpi_ut_dump_allocations(u32 component, char *module)
-{
- struct acpi_debug_mem_block *element;
- union acpi_descriptor *descriptor;
- u32 num_outstanding = 0;
-
- ACPI_FUNCTION_TRACE("ut_dump_allocations");
-
- /*
- * Walk the allocation list.
- */
- if (ACPI_FAILURE(acpi_ut_acquire_mutex(ACPI_MTX_MEMORY))) {
- return;
+ ACPI_MEMSET(allocation, 0, size);
}
- element = acpi_gbl_global_list->list_head;
- while (element) {
- if ((element->component & component) &&
- ((module == NULL)
- || (0 == ACPI_STRCMP(module, element->module)))) {
- /* Ignore allocated objects that are in a cache */
-
- descriptor =
- ACPI_CAST_PTR(union acpi_descriptor,
- &element->user_space);
- if (descriptor->descriptor_id != ACPI_DESC_TYPE_CACHED) {
- acpi_os_printf("%p Len %04X %9.9s-%d [%s] ",
- descriptor, element->size,
- element->module, element->line,
- acpi_ut_get_descriptor_name
- (descriptor));
-
- /* Most of the elements will be Operand objects. */
-
- switch (ACPI_GET_DESCRIPTOR_TYPE(descriptor)) {
- case ACPI_DESC_TYPE_OPERAND:
- acpi_os_printf("%12.12s R%hd",
- acpi_ut_get_type_name
- (descriptor->object.
- common.type),
- descriptor->object.
- common.reference_count);
- break;
-
- case ACPI_DESC_TYPE_PARSER:
- acpi_os_printf("aml_opcode %04hX",
- descriptor->op.asl.
- aml_opcode);
- break;
-
- case ACPI_DESC_TYPE_NAMED:
- acpi_os_printf("%4.4s",
- acpi_ut_get_node_name
- (&descriptor->node));
- break;
-
- default:
- break;
- }
-
- acpi_os_printf("\n");
- num_outstanding++;
- }
- }
- element = element->next;
- }
-
- (void)acpi_ut_release_mutex(ACPI_MTX_MEMORY);
-
- /* Print summary */
-
- if (!num_outstanding) {
- ACPI_INFO((AE_INFO, "No outstanding allocations"));
- } else {
- ACPI_ERROR((AE_INFO,
- "%d(%X) Outstanding allocations",
- num_outstanding, num_outstanding));
- }
-
- return_VOID;
+ return (allocation);
}
-
-#endif /* #ifdef ACPI_DBG_TRACK_ALLOCATIONS */
diff --git a/drivers/acpi/utilities/utcache.c b/drivers/acpi/utilities/utcache.c
index 2177cb1ef2c4..56270a30718a 100644
--- a/drivers/acpi/utilities/utcache.c
+++ b/drivers/acpi/utilities/utcache.c
@@ -118,13 +118,14 @@ acpi_status acpi_os_purge_cache(struct acpi_memory_list * cache)
/* Walk the list of objects in this cache */
while (cache->list_head) {
+
/* Delete and unlink one cached state object */
next = *(ACPI_CAST_INDIRECT_PTR(char,
&(((char *)cache->
list_head)[cache->
link_offset])));
- ACPI_MEM_FREE(cache->list_head);
+ ACPI_FREE(cache->list_head);
cache->list_head = next;
cache->current_depth--;
@@ -193,7 +194,7 @@ acpi_os_release_object(struct acpi_memory_list * cache, void *object)
/* If cache is full, just free this object */
if (cache->current_depth >= cache->max_depth) {
- ACPI_MEM_FREE(object);
+ ACPI_FREE(object);
ACPI_MEM_TRACKING(cache->total_freed++);
}
@@ -243,7 +244,7 @@ void *acpi_os_acquire_object(struct acpi_memory_list *cache)
acpi_status status;
void *object;
- ACPI_FUNCTION_NAME("os_acquire_object");
+ ACPI_FUNCTION_NAME(os_acquire_object);
if (!cache) {
return (NULL);
@@ -259,6 +260,7 @@ void *acpi_os_acquire_object(struct acpi_memory_list *cache)
/* Check the cache first */
if (cache->list_head) {
+
/* There is an object available, use it */
object = cache->list_head;
@@ -270,9 +272,9 @@ void *acpi_os_acquire_object(struct acpi_memory_list *cache)
cache->current_depth--;
ACPI_MEM_TRACKING(cache->hits++);
- ACPI_MEM_TRACKING(ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "Object %p from %s cache\n",
- object, cache->list_name)));
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "Object %p from %s cache\n", object,
+ cache->list_name));
status = acpi_ut_release_mutex(ACPI_MTX_CACHES);
if (ACPI_FAILURE(status)) {
@@ -287,14 +289,14 @@ void *acpi_os_acquire_object(struct acpi_memory_list *cache)
ACPI_MEM_TRACKING(cache->total_allocated++);
- /* Avoid deadlock with ACPI_MEM_CALLOCATE */
+ /* Avoid deadlock with ACPI_ALLOCATE_ZEROED */
status = acpi_ut_release_mutex(ACPI_MTX_CACHES);
if (ACPI_FAILURE(status)) {
return (NULL);
}
- object = ACPI_MEM_CALLOCATE(cache->object_size);
+ object = ACPI_ALLOCATE_ZEROED(cache->object_size);
if (!object) {
return (NULL);
}
diff --git a/drivers/acpi/utilities/utcopy.c b/drivers/acpi/utilities/utcopy.c
index df2d32096b72..5e1a80d1bc36 100644
--- a/drivers/acpi/utilities/utcopy.c
+++ b/drivers/acpi/utilities/utcopy.c
@@ -109,7 +109,7 @@ acpi_ut_copy_isimple_to_esimple(union acpi_operand_object *internal_object,
{
acpi_status status = AE_OK;
- ACPI_FUNCTION_TRACE("ut_copy_isimple_to_esimple");
+ ACPI_FUNCTION_TRACE(ut_copy_isimple_to_esimple);
*buffer_space_used = 0;
@@ -325,7 +325,7 @@ acpi_ut_copy_ipackage_to_epackage(union acpi_operand_object *internal_object,
acpi_status status;
struct acpi_pkg_info info;
- ACPI_FUNCTION_TRACE("ut_copy_ipackage_to_epackage");
+ ACPI_FUNCTION_TRACE(ut_copy_ipackage_to_epackage);
/*
* First package at head of the buffer
@@ -383,7 +383,7 @@ acpi_ut_copy_iobject_to_eobject(union acpi_operand_object *internal_object,
{
acpi_status status;
- ACPI_FUNCTION_TRACE("ut_copy_iobject_to_eobject");
+ ACPI_FUNCTION_TRACE(ut_copy_iobject_to_eobject);
if (ACPI_GET_OBJECT_TYPE(internal_object) == ACPI_TYPE_PACKAGE) {
/*
@@ -442,7 +442,7 @@ acpi_ut_copy_esimple_to_isimple(union acpi_object *external_object,
{
union acpi_operand_object *internal_object;
- ACPI_FUNCTION_TRACE("ut_copy_esimple_to_isimple");
+ ACPI_FUNCTION_TRACE(ut_copy_esimple_to_isimple);
/*
* Simple types supported are: String, Buffer, Integer
@@ -472,8 +472,8 @@ acpi_ut_copy_esimple_to_isimple(union acpi_object *external_object,
case ACPI_TYPE_STRING:
internal_object->string.pointer =
- ACPI_MEM_CALLOCATE((acpi_size) external_object->string.
- length + 1);
+ ACPI_ALLOCATE_ZEROED((acpi_size) external_object->string.
+ length + 1);
if (!internal_object->string.pointer) {
goto error_exit;
}
@@ -488,7 +488,7 @@ acpi_ut_copy_esimple_to_isimple(union acpi_object *external_object,
case ACPI_TYPE_BUFFER:
internal_object->buffer.pointer =
- ACPI_MEM_CALLOCATE(external_object->buffer.length);
+ ACPI_ALLOCATE_ZEROED(external_object->buffer.length);
if (!internal_object->buffer.pointer) {
goto error_exit;
}
@@ -552,7 +552,7 @@ acpi_ut_copy_epackage_to_ipackage(union acpi_operand_object *internal_object,
union acpi_operand_object *this_internal_obj;
union acpi_object *this_external_obj;
- ACPI_FUNCTION_TRACE("ut_copy_epackage_to_ipackage");
+ ACPI_FUNCTION_TRACE(ut_copy_epackage_to_ipackage);
/*
* First package at head of the buffer
@@ -600,7 +600,7 @@ acpi_ut_copy_eobject_to_iobject(union acpi_object *external_object,
{
acpi_status status;
- ACPI_FUNCTION_TRACE("ut_copy_eobject_to_iobject");
+ ACPI_FUNCTION_TRACE(ut_copy_eobject_to_iobject);
if (external_object->type == ACPI_TYPE_PACKAGE) {
/*
@@ -676,7 +676,7 @@ acpi_ut_copy_simple_object(union acpi_operand_object *source_desc,
if ((source_desc->buffer.pointer) &&
(source_desc->buffer.length)) {
dest_desc->buffer.pointer =
- ACPI_MEM_ALLOCATE(source_desc->buffer.length);
+ ACPI_ALLOCATE(source_desc->buffer.length);
if (!dest_desc->buffer.pointer) {
return (AE_NO_MEMORY);
}
@@ -697,8 +697,8 @@ acpi_ut_copy_simple_object(union acpi_operand_object *source_desc,
*/
if (source_desc->string.pointer) {
dest_desc->string.pointer =
- ACPI_MEM_ALLOCATE((acpi_size) source_desc->string.
- length + 1);
+ ACPI_ALLOCATE((acpi_size) source_desc->string.
+ length + 1);
if (!dest_desc->string.pointer) {
return (AE_NO_MEMORY);
}
@@ -805,9 +805,7 @@ acpi_ut_copy_ielement_to_ielement(u8 object_type,
/*
* Create the object array
*/
- target_object->package.elements =
- ACPI_MEM_CALLOCATE(((acpi_size) source_object->package.
- count + 1) * sizeof(void *));
+ target_object->package.elements = ACPI_ALLOCATE_ZEROED(((acpi_size) source_object->package.count + 1) * sizeof(void *));
if (!target_object->package.elements) {
status = AE_NO_MEMORY;
goto error_exit;
@@ -856,7 +854,7 @@ acpi_ut_copy_ipackage_to_ipackage(union acpi_operand_object *source_obj,
{
acpi_status status = AE_OK;
- ACPI_FUNCTION_TRACE("ut_copy_ipackage_to_ipackage");
+ ACPI_FUNCTION_TRACE(ut_copy_ipackage_to_ipackage);
dest_obj->common.type = ACPI_GET_OBJECT_TYPE(source_obj);
dest_obj->common.flags = source_obj->common.flags;
@@ -865,10 +863,10 @@ acpi_ut_copy_ipackage_to_ipackage(union acpi_operand_object *source_obj,
/*
* Create the object array and walk the source package tree
*/
- dest_obj->package.elements = ACPI_MEM_CALLOCATE(((acpi_size)
- source_obj->package.
- count +
- 1) * sizeof(void *));
+ dest_obj->package.elements = ACPI_ALLOCATE_ZEROED(((acpi_size)
+ source_obj->package.
+ count +
+ 1) * sizeof(void *));
if (!dest_obj->package.elements) {
ACPI_ERROR((AE_INFO, "Package allocation failure"));
return_ACPI_STATUS(AE_NO_MEMORY);
@@ -882,6 +880,7 @@ acpi_ut_copy_ipackage_to_ipackage(union acpi_operand_object *source_obj,
acpi_ut_copy_ielement_to_ielement,
walk_state);
if (ACPI_FAILURE(status)) {
+
/* On failure, delete the destination package object */
acpi_ut_remove_reference(dest_obj);
@@ -911,7 +910,7 @@ acpi_ut_copy_iobject_to_iobject(union acpi_operand_object *source_desc,
{
acpi_status status = AE_OK;
- ACPI_FUNCTION_TRACE("ut_copy_iobject_to_iobject");
+ ACPI_FUNCTION_TRACE(ut_copy_iobject_to_iobject);
/* Create the top level object */
diff --git a/drivers/acpi/utilities/utdebug.c b/drivers/acpi/utilities/utdebug.c
index 35f3d581e034..5ec1cfcc611d 100644
--- a/drivers/acpi/utilities/utdebug.c
+++ b/drivers/acpi/utilities/utdebug.c
@@ -41,8 +41,6 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/module.h>
-
#include <acpi/acpi.h>
#define _COMPONENT ACPI_UTILITIES
@@ -123,12 +121,14 @@ static const char *acpi_ut_trim_function_name(const char *function_name)
/* All Function names are longer than 4 chars, check is safe */
if (*(ACPI_CAST_PTR(u32, function_name)) == ACPI_PREFIX_MIXED) {
+
/* This is the case where the original source has not been modified */
return (function_name + 4);
}
if (*(ACPI_CAST_PTR(u32, function_name)) == ACPI_PREFIX_LOWER) {
+
/* This is the case where the source has been 'linuxized' */
return (function_name + 5);
@@ -162,7 +162,7 @@ acpi_ut_debug_print(u32 requested_debug_level,
const char *function_name,
char *module_name, u32 component_id, char *format, ...)
{
- u32 thread_id;
+ acpi_thread_id thread_id;
va_list args;
/*
@@ -177,7 +177,6 @@ acpi_ut_debug_print(u32 requested_debug_level,
* Thread tracking and context switch notification
*/
thread_id = acpi_os_get_thread_id();
-
if (thread_id != acpi_gbl_prev_thread_id) {
if (ACPI_LV_THREADS & acpi_dbg_level) {
acpi_os_printf
@@ -206,7 +205,7 @@ acpi_ut_debug_print(u32 requested_debug_level,
acpi_os_vprintf(format, args);
}
-EXPORT_SYMBOL(acpi_ut_debug_print);
+ACPI_EXPORT_SYMBOL(acpi_ut_debug_print)
/*******************************************************************************
*
@@ -226,7 +225,6 @@ EXPORT_SYMBOL(acpi_ut_debug_print);
* debug_print so that the same macros can be used.
*
******************************************************************************/
-
void ACPI_INTERNAL_VAR_XFACE
acpi_ut_debug_print_raw(u32 requested_debug_level,
u32 line_number,
@@ -244,7 +242,7 @@ acpi_ut_debug_print_raw(u32 requested_debug_level,
acpi_os_vprintf(format, args);
}
-EXPORT_SYMBOL(acpi_ut_debug_print_raw);
+ACPI_EXPORT_SYMBOL(acpi_ut_debug_print_raw)
/*******************************************************************************
*
@@ -261,7 +259,6 @@ EXPORT_SYMBOL(acpi_ut_debug_print_raw);
* set in debug_level
*
******************************************************************************/
-
void
acpi_ut_trace(u32 line_number,
const char *function_name, char *module_name, u32 component_id)
@@ -275,7 +272,7 @@ acpi_ut_trace(u32 line_number,
component_id, "%s\n", acpi_gbl_fn_entry_str);
}
-EXPORT_SYMBOL(acpi_ut_trace);
+ACPI_EXPORT_SYMBOL(acpi_ut_trace)
/*******************************************************************************
*
@@ -293,7 +290,6 @@ EXPORT_SYMBOL(acpi_ut_trace);
* set in debug_level
*
******************************************************************************/
-
void
acpi_ut_trace_ptr(u32 line_number,
const char *function_name,
@@ -400,7 +396,7 @@ acpi_ut_exit(u32 line_number,
acpi_gbl_nesting_level--;
}
-EXPORT_SYMBOL(acpi_ut_exit);
+ACPI_EXPORT_SYMBOL(acpi_ut_exit)
/*******************************************************************************
*
@@ -418,7 +414,6 @@ EXPORT_SYMBOL(acpi_ut_exit);
* set in debug_level. Prints exit status also.
*
******************************************************************************/
-
void
acpi_ut_status_exit(u32 line_number,
const char *function_name,
@@ -442,7 +437,7 @@ acpi_ut_status_exit(u32 line_number,
acpi_gbl_nesting_level--;
}
-EXPORT_SYMBOL(acpi_ut_status_exit);
+ACPI_EXPORT_SYMBOL(acpi_ut_status_exit)
/*******************************************************************************
*
@@ -460,7 +455,6 @@ EXPORT_SYMBOL(acpi_ut_status_exit);
* set in debug_level. Prints exit value also.
*
******************************************************************************/
-
void
acpi_ut_value_exit(u32 line_number,
const char *function_name,
@@ -475,7 +469,7 @@ acpi_ut_value_exit(u32 line_number,
acpi_gbl_nesting_level--;
}
-EXPORT_SYMBOL(acpi_ut_value_exit);
+ACPI_EXPORT_SYMBOL(acpi_ut_value_exit)
/*******************************************************************************
*
@@ -493,7 +487,6 @@ EXPORT_SYMBOL(acpi_ut_value_exit);
* set in debug_level. Prints exit value also.
*
******************************************************************************/
-
void
acpi_ut_ptr_exit(u32 line_number,
const char *function_name,
@@ -524,20 +517,13 @@ acpi_ut_ptr_exit(u32 line_number,
*
******************************************************************************/
-void acpi_ut_dump_buffer(u8 * buffer, u32 count, u32 display, u32 component_id)
+void acpi_ut_dump_buffer2(u8 * buffer, u32 count, u32 display)
{
acpi_native_uint i = 0;
acpi_native_uint j;
u32 temp32;
u8 buf_char;
- /* Only dump the buffer if tracing is enabled */
-
- if (!((ACPI_LV_TABLES & acpi_dbg_level) &&
- (component_id & acpi_dbg_layer))) {
- return;
- }
-
if ((count < 4) || (count & 0x01)) {
display = DB_BYTE_DISPLAY;
}
@@ -545,6 +531,7 @@ void acpi_ut_dump_buffer(u8 * buffer, u32 count, u32 display, u32 component_id)
/* Nasty little dump buffer routine! */
while (i < count) {
+
/* Print current offset */
acpi_os_printf("%6.4X: ", (u32) i);
@@ -553,6 +540,7 @@ void acpi_ut_dump_buffer(u8 * buffer, u32 count, u32 display, u32 component_id)
for (j = 0; j < 16;) {
if (i + j >= count) {
+
/* Dump fill spaces */
acpi_os_printf("%*s", ((display * 2) + 1), " ");
@@ -561,6 +549,7 @@ void acpi_ut_dump_buffer(u8 * buffer, u32 count, u32 display, u32 component_id)
}
switch (display) {
+ case DB_BYTE_DISPLAY:
default: /* Default is BYTE display */
acpi_os_printf("%02X ", buffer[i + j]);
@@ -618,3 +607,31 @@ void acpi_ut_dump_buffer(u8 * buffer, u32 count, u32 display, u32 component_id)
return;
}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_dump_buffer
+ *
+ * PARAMETERS: Buffer - Buffer to dump
+ * Count - Amount to dump, in bytes
+ * Display - BYTE, WORD, DWORD, or QWORD display
+ * component_iD - Caller's component ID
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Generic dump buffer in both hex and ascii.
+ *
+ ******************************************************************************/
+
+void acpi_ut_dump_buffer(u8 * buffer, u32 count, u32 display, u32 component_id)
+{
+
+ /* Only dump the buffer if tracing is enabled */
+
+ if (!((ACPI_LV_TABLES & acpi_dbg_level) &&
+ (component_id & acpi_dbg_layer))) {
+ return;
+ }
+
+ acpi_ut_dump_buffer2(buffer, count, display);
+}
diff --git a/drivers/acpi/utilities/utdelete.c b/drivers/acpi/utilities/utdelete.c
index 1db9695b0029..67b9f325c6fa 100644
--- a/drivers/acpi/utilities/utdelete.c
+++ b/drivers/acpi/utilities/utdelete.c
@@ -76,7 +76,7 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)
union acpi_operand_object *second_desc;
union acpi_operand_object *next_desc;
- ACPI_FUNCTION_TRACE_PTR("ut_delete_internal_obj", object);
+ ACPI_FUNCTION_TRACE_PTR(ut_delete_internal_obj, object);
if (!object) {
return_VOID;
@@ -96,6 +96,7 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)
/* Free the actual string buffer */
if (!(object->common.flags & AOPOBJ_STATIC_POINTER)) {
+
/* But only if it is NOT a pointer into an ACPI table */
obj_pointer = object->string.pointer;
@@ -111,6 +112,7 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)
/* Free the actual buffer */
if (!(object->common.flags & AOPOBJ_STATIC_POINTER)) {
+
/* But only if it is NOT a pointer into an ACPI table */
obj_pointer = object->buffer.pointer;
@@ -198,11 +200,22 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)
*/
handler_desc = object->region.handler;
if (handler_desc) {
- if (handler_desc->address_space.
- hflags &
+ if (handler_desc->address_space.handler_flags &
ACPI_ADDR_HANDLER_DEFAULT_INSTALLED) {
- obj_pointer =
- second_desc->extra.region_context;
+
+ /* Deactivate region and free region context */
+
+ if (handler_desc->address_space.setup) {
+ (void)handler_desc->
+ address_space.setup(object,
+ ACPI_REGION_DEACTIVATE,
+ handler_desc->
+ address_space.
+ context,
+ &second_desc->
+ extra.
+ region_context);
+ }
}
acpi_ut_remove_reference(handler_desc);
@@ -234,7 +247,7 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)
if (obj_pointer) {
ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
"Deleting Object Subptr %p\n", obj_pointer));
- ACPI_MEM_FREE(obj_pointer);
+ ACPI_FREE(obj_pointer);
}
/* Now the object can be safely deleted */
@@ -263,7 +276,7 @@ void acpi_ut_delete_internal_object_list(union acpi_operand_object **obj_list)
{
union acpi_operand_object **internal_obj;
- ACPI_FUNCTION_TRACE("ut_delete_internal_object_list");
+ ACPI_FUNCTION_TRACE(ut_delete_internal_object_list);
/* Walk the null-terminated internal list */
@@ -273,7 +286,7 @@ void acpi_ut_delete_internal_object_list(union acpi_operand_object **obj_list)
/* Free the combined parameter pointer list and object array */
- ACPI_MEM_FREE(obj_list);
+ ACPI_FREE(obj_list);
return_VOID;
}
@@ -296,7 +309,7 @@ acpi_ut_update_ref_count(union acpi_operand_object *object, u32 action)
u16 count;
u16 new_count;
- ACPI_FUNCTION_NAME("ut_update_ref_count");
+ ACPI_FUNCTION_NAME(ut_update_ref_count);
if (!object) {
return;
@@ -306,11 +319,9 @@ acpi_ut_update_ref_count(union acpi_operand_object *object, u32 action)
new_count = count;
/*
- * Perform the reference count action
- * (increment, decrement, or force delete)
+ * Perform the reference count action (increment, decrement, force delete)
*/
switch (action) {
-
case REF_INCREMENT:
new_count++;
@@ -347,7 +358,6 @@ acpi_ut_update_ref_count(union acpi_operand_object *object, u32 action)
if (new_count == 0) {
acpi_ut_delete_internal_obj(object);
}
-
break;
case REF_FORCE_DELETE:
@@ -372,13 +382,10 @@ acpi_ut_update_ref_count(union acpi_operand_object *object, u32 action)
* (A deleted object will have a huge reference count)
*/
if (count > ACPI_MAX_REFERENCE_COUNT) {
-
ACPI_WARNING((AE_INFO,
- "Large Reference Count (%X) in object %p",
- count, object));
+ "Large Reference Count (%X) in object %p", count,
+ object));
}
-
- return;
}
/*******************************************************************************
@@ -404,7 +411,7 @@ acpi_ut_update_ref_count(union acpi_operand_object *object, u32 action)
******************************************************************************/
acpi_status
-acpi_ut_update_object_reference(union acpi_operand_object * object, u16 action)
+acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action)
{
acpi_status status = AE_OK;
union acpi_generic_state *state_list = NULL;
@@ -412,9 +419,10 @@ acpi_ut_update_object_reference(union acpi_operand_object * object, u16 action)
union acpi_generic_state *state;
acpi_native_uint i;
- ACPI_FUNCTION_TRACE_PTR("ut_update_object_reference", object);
+ ACPI_FUNCTION_TRACE_PTR(ut_update_object_reference, object);
while (object) {
+
/* Make sure that this isn't a namespace handle */
if (ACPI_GET_DESCRIPTOR_TYPE(object) == ACPI_DESC_TYPE_NAMED) {
@@ -507,11 +515,11 @@ acpi_ut_update_object_reference(union acpi_operand_object * object, u16 action)
case ACPI_TYPE_REGION:
default:
- break; /* No subobjects */
+ break; /* No subobjects for all other types */
}
/*
- * Now we can update the count in the main object. This can only
+ * Now we can update the count in the main object. This can only
* happen after we update the sub-objects in case this causes the
* main object to be deleted.
*/
@@ -556,7 +564,7 @@ acpi_ut_update_object_reference(union acpi_operand_object * object, u16 action)
void acpi_ut_add_reference(union acpi_operand_object *object)
{
- ACPI_FUNCTION_TRACE_PTR("ut_add_reference", object);
+ ACPI_FUNCTION_TRACE_PTR(ut_add_reference, object);
/* Ensure that we have a valid object */
@@ -589,11 +597,11 @@ void acpi_ut_add_reference(union acpi_operand_object *object)
void acpi_ut_remove_reference(union acpi_operand_object *object)
{
- ACPI_FUNCTION_TRACE_PTR("ut_remove_reference", object);
+ ACPI_FUNCTION_TRACE_PTR(ut_remove_reference, object);
/*
- * Allow a NULL pointer to be passed in, just ignore it. This saves
- * each caller from having to check. Also, ignore NS nodes.
+ * Allow a NULL pointer to be passed in, just ignore it. This saves
+ * each caller from having to check. Also, ignore NS nodes.
*
*/
if (!object ||
@@ -613,7 +621,7 @@ void acpi_ut_remove_reference(union acpi_operand_object *object)
/*
* Decrement the reference count, and only actually delete the object
- * if the reference count becomes 0. (Must also decrement the ref count
+ * if the reference count becomes 0. (Must also decrement the ref count
* of all subobjects!)
*/
(void)acpi_ut_update_object_reference(object, REF_DECREMENT);
diff --git a/drivers/acpi/utilities/uteval.c b/drivers/acpi/utilities/uteval.c
index 106cc97cb4af..d6d7121583c0 100644
--- a/drivers/acpi/utilities/uteval.c
+++ b/drivers/acpi/utilities/uteval.c
@@ -56,6 +56,34 @@ static acpi_status
acpi_ut_translate_one_cid(union acpi_operand_object *obj_desc,
struct acpi_compatible_id *one_cid);
+/*
+ * Strings supported by the _OSI predefined (internal) method.
+ */
+static const char *acpi_interfaces_supported[] = {
+ /* Operating System Vendor Strings */
+
+ "Linux",
+ "Windows 2000",
+ "Windows 2001",
+ "Windows 2001 SP0",
+ "Windows 2001 SP1",
+ "Windows 2001 SP2",
+ "Windows 2001 SP3",
+ "Windows 2001 SP4",
+ "Windows 2001.1",
+ "Windows 2001.1 SP1", /* Added 03/2006 */
+ "Windows 2006", /* Added 03/2006 */
+
+ /* Feature Group Strings */
+
+ "Extended Address Space Descriptor"
+ /*
+ * All "optional" feature group strings (features that are implemented
+ * by the host) should be implemented in the host version of
+ * acpi_os_validate_interface and should not be added here.
+ */
+};
+
/*******************************************************************************
*
* FUNCTION: acpi_ut_osi_implementation
@@ -64,18 +92,18 @@ acpi_ut_translate_one_cid(union acpi_operand_object *obj_desc,
*
* RETURN: Status
*
- * DESCRIPTION: Implementation of _OSI predefined control method
- * Supported = _OSI (String)
+ * DESCRIPTION: Implementation of the _OSI predefined control method
*
******************************************************************************/
acpi_status acpi_ut_osi_implementation(struct acpi_walk_state *walk_state)
{
+ acpi_status status;
union acpi_operand_object *string_desc;
union acpi_operand_object *return_desc;
acpi_native_uint i;
- ACPI_FUNCTION_TRACE("ut_osi_implementation");
+ ACPI_FUNCTION_TRACE(ut_osi_implementation);
/* Validate the string input argument */
@@ -84,28 +112,47 @@ acpi_status acpi_ut_osi_implementation(struct acpi_walk_state *walk_state)
return_ACPI_STATUS(AE_TYPE);
}
- /* Create a return object (Default value = 0) */
+ /* Create a return object */
return_desc = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
if (!return_desc) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
- /* Compare input string to table of supported strings */
+ /* Default return value is SUPPORTED */
+
+ return_desc->integer.value = ACPI_UINT32_MAX;
+ walk_state->return_desc = return_desc;
+
+ /* Compare input string to static table of supported interfaces */
- for (i = 0; i < ACPI_NUM_OSI_STRINGS; i++) {
- if (!ACPI_STRCMP(string_desc->string.pointer,
- ACPI_CAST_PTR(char,
- acpi_gbl_valid_osi_strings[i])))
- {
- /* This string is supported */
+ for (i = 0; i < ACPI_ARRAY_LENGTH(acpi_interfaces_supported); i++) {
+ if (!ACPI_STRCMP
+ (string_desc->string.pointer,
+ acpi_interfaces_supported[i])) {
- return_desc->integer.value = 0xFFFFFFFF;
- break;
+ /* The interface is supported */
+
+ return_ACPI_STATUS(AE_CTRL_TERMINATE);
}
}
- walk_state->return_desc = return_desc;
+ /*
+ * Did not match the string in the static table, call the host OSL to
+ * check for a match with one of the optional strings (such as
+ * "Module Device", "3.0 Thermal Model", etc.)
+ */
+ status = acpi_os_validate_interface(string_desc->string.pointer);
+ if (ACPI_SUCCESS(status)) {
+
+ /* The interface is supported */
+
+ return_ACPI_STATUS(AE_CTRL_TERMINATE);
+ }
+
+ /* The interface is not supported */
+
+ return_desc->integer.value = 0;
return_ACPI_STATUS(AE_CTRL_TERMINATE);
}
@@ -134,19 +181,26 @@ acpi_ut_evaluate_object(struct acpi_namespace_node *prefix_node,
u32 expected_return_btypes,
union acpi_operand_object **return_desc)
{
- struct acpi_parameter_info info;
+ struct acpi_evaluate_info *info;
acpi_status status;
u32 return_btype;
- ACPI_FUNCTION_TRACE("ut_evaluate_object");
+ ACPI_FUNCTION_TRACE(ut_evaluate_object);
- info.node = prefix_node;
- info.parameters = NULL;
- info.parameter_type = ACPI_PARAM_ARGS;
+ /* Allocate the evaluation information block */
+
+ info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
+ if (!info) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ info->prefix_node = prefix_node;
+ info->pathname = path;
+ info->parameter_type = ACPI_PARAM_ARGS;
/* Evaluate the object/method */
- status = acpi_ns_evaluate_relative(path, &info);
+ status = acpi_ns_evaluate(info);
if (ACPI_FAILURE(status)) {
if (status == AE_NOT_FOUND) {
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
@@ -158,25 +212,25 @@ acpi_ut_evaluate_object(struct acpi_namespace_node *prefix_node,
prefix_node, path, status);
}
- return_ACPI_STATUS(status);
+ goto cleanup;
}
/* Did we get a return object? */
- if (!info.return_object) {
+ if (!info->return_object) {
if (expected_return_btypes) {
ACPI_ERROR_METHOD("No object was returned from",
prefix_node, path, AE_NOT_EXIST);
- return_ACPI_STATUS(AE_NOT_EXIST);
+ status = AE_NOT_EXIST;
}
- return_ACPI_STATUS(AE_OK);
+ goto cleanup;
}
/* Map the return object type to the bitmapped type */
- switch (ACPI_GET_OBJECT_TYPE(info.return_object)) {
+ switch (ACPI_GET_OBJECT_TYPE(info->return_object)) {
case ACPI_TYPE_INTEGER:
return_btype = ACPI_BTYPE_INTEGER;
break;
@@ -204,8 +258,8 @@ acpi_ut_evaluate_object(struct acpi_namespace_node *prefix_node,
* happen frequently if the "implicit return" feature is enabled.
* Just delete the return object and return AE_OK.
*/
- acpi_ut_remove_reference(info.return_object);
- return_ACPI_STATUS(AE_OK);
+ acpi_ut_remove_reference(info->return_object);
+ goto cleanup;
}
/* Is the return object one of the expected types? */
@@ -217,19 +271,23 @@ acpi_ut_evaluate_object(struct acpi_namespace_node *prefix_node,
ACPI_ERROR((AE_INFO,
"Type returned from %s was incorrect: %s, expected Btypes: %X",
path,
- acpi_ut_get_object_type_name(info.return_object),
+ acpi_ut_get_object_type_name(info->return_object),
expected_return_btypes));
/* On error exit, we must delete the return object */
- acpi_ut_remove_reference(info.return_object);
- return_ACPI_STATUS(AE_TYPE);
+ acpi_ut_remove_reference(info->return_object);
+ status = AE_TYPE;
+ goto cleanup;
}
/* Object type is OK, return it */
- *return_desc = info.return_object;
- return_ACPI_STATUS(AE_OK);
+ *return_desc = info->return_object;
+
+ cleanup:
+ ACPI_FREE(info);
+ return_ACPI_STATUS(status);
}
/*******************************************************************************
@@ -257,7 +315,7 @@ acpi_ut_evaluate_numeric_object(char *object_name,
union acpi_operand_object *obj_desc;
acpi_status status;
- ACPI_FUNCTION_TRACE("ut_evaluate_numeric_object");
+ ACPI_FUNCTION_TRACE(ut_evaluate_numeric_object);
status = acpi_ut_evaluate_object(device_node, object_name,
ACPI_BTYPE_INTEGER, &obj_desc);
@@ -333,7 +391,7 @@ acpi_ut_execute_HID(struct acpi_namespace_node *device_node,
union acpi_operand_object *obj_desc;
acpi_status status;
- ACPI_FUNCTION_TRACE("ut_execute_HID");
+ ACPI_FUNCTION_TRACE(ut_execute_HID);
status = acpi_ut_evaluate_object(device_node, METHOD_NAME__HID,
ACPI_BTYPE_INTEGER | ACPI_BTYPE_STRING,
@@ -343,6 +401,7 @@ acpi_ut_execute_HID(struct acpi_namespace_node *device_node,
}
if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_INTEGER) {
+
/* Convert the Numeric HID to string */
acpi_ex_eisa_id_to_string((u32) obj_desc->integer.value,
@@ -436,7 +495,7 @@ acpi_ut_execute_CID(struct acpi_namespace_node * device_node,
struct acpi_compatible_id_list *cid_list;
acpi_native_uint i;
- ACPI_FUNCTION_TRACE("ut_execute_CID");
+ ACPI_FUNCTION_TRACE(ut_execute_CID);
/* Evaluate the _CID method for this device */
@@ -459,7 +518,7 @@ acpi_ut_execute_CID(struct acpi_namespace_node * device_node,
size = (((count - 1) * sizeof(struct acpi_compatible_id)) +
sizeof(struct acpi_compatible_id_list));
- cid_list = ACPI_MEM_CALLOCATE((acpi_size) size);
+ cid_list = ACPI_ALLOCATE_ZEROED((acpi_size) size);
if (!cid_list) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
@@ -479,6 +538,7 @@ acpi_ut_execute_CID(struct acpi_namespace_node * device_node,
/* The _CID object can be either a single CID or a package (list) of CIDs */
if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_PACKAGE) {
+
/* Translate each package element */
for (i = 0; i < count; i++) {
@@ -499,7 +559,7 @@ acpi_ut_execute_CID(struct acpi_namespace_node * device_node,
/* Cleanup on error */
if (ACPI_FAILURE(status)) {
- ACPI_MEM_FREE(cid_list);
+ ACPI_FREE(cid_list);
} else {
*return_cid_list = cid_list;
}
@@ -533,7 +593,7 @@ acpi_ut_execute_UID(struct acpi_namespace_node *device_node,
union acpi_operand_object *obj_desc;
acpi_status status;
- ACPI_FUNCTION_TRACE("ut_execute_UID");
+ ACPI_FUNCTION_TRACE(ut_execute_UID);
status = acpi_ut_evaluate_object(device_node, METHOD_NAME__UID,
ACPI_BTYPE_INTEGER | ACPI_BTYPE_STRING,
@@ -543,6 +603,7 @@ acpi_ut_execute_UID(struct acpi_namespace_node *device_node,
}
if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_INTEGER) {
+
/* Convert the Numeric UID to string */
acpi_ex_unsigned_integer_to_string(obj_desc->integer.value,
@@ -582,7 +643,7 @@ acpi_ut_execute_STA(struct acpi_namespace_node *device_node, u32 * flags)
union acpi_operand_object *obj_desc;
acpi_status status;
- ACPI_FUNCTION_TRACE("ut_execute_STA");
+ ACPI_FUNCTION_TRACE(ut_execute_STA);
status = acpi_ut_evaluate_object(device_node, METHOD_NAME__STA,
ACPI_BTYPE_INTEGER, &obj_desc);
@@ -632,7 +693,7 @@ acpi_ut_execute_sxds(struct acpi_namespace_node *device_node, u8 * highest)
acpi_status status;
u32 i;
- ACPI_FUNCTION_TRACE("ut_execute_Sxds");
+ ACPI_FUNCTION_TRACE(ut_execute_sxds);
for (i = 0; i < 4; i++) {
highest[i] = 0xFF;
diff --git a/drivers/acpi/utilities/utglobal.c b/drivers/acpi/utilities/utglobal.c
index ffd13383a325..e5999c65c0b8 100644
--- a/drivers/acpi/utilities/utglobal.c
+++ b/drivers/acpi/utilities/utglobal.c
@@ -43,7 +43,6 @@
#define DEFINE_ACPI_GLOBALS
-#include <linux/module.h>
#include <acpi/acpi.h>
#include <acpi/acnamesp.h>
@@ -119,6 +118,7 @@ const char *acpi_format_exception(acpi_status status)
}
if (!exception) {
+
/* Exception code was not recognized */
ACPI_ERROR((AE_INFO,
@@ -143,12 +143,10 @@ const char *acpi_format_exception(acpi_status status)
/* Debug switch - level and trace mask */
u32 acpi_dbg_level = ACPI_DEBUG_DEFAULT;
-EXPORT_SYMBOL(acpi_dbg_level);
/* Debug switch - layer (component) mask */
u32 acpi_dbg_layer = ACPI_COMPONENT_DEFAULT | ACPI_ALL_DRIVERS;
-EXPORT_SYMBOL(acpi_dbg_layer);
u32 acpi_gbl_nesting_level = 0;
/* Debugger globals */
@@ -183,28 +181,6 @@ const char *acpi_gbl_highest_dstate_names[4] = {
"_S4D"
};
-/*
- * Strings supported by the _OSI predefined (internal) method.
- * When adding strings, be sure to update ACPI_NUM_OSI_STRINGS.
- */
-const char *acpi_gbl_valid_osi_strings[ACPI_NUM_OSI_STRINGS] = {
- /* Operating System Vendor Strings */
-
- "Linux",
- "Windows 2000",
- "Windows 2001",
- "Windows 2001.1",
- "Windows 2001 SP0",
- "Windows 2001 SP1",
- "Windows 2001 SP2",
- "Windows 2001 SP3",
- "Windows 2001 SP4",
-
- /* Feature Group Strings */
-
- "Extended Address Space Descriptor"
-};
-
/*******************************************************************************
*
* Namespace globals
@@ -317,9 +293,9 @@ char acpi_ut_hex_to_ascii_char(acpi_integer integer, u32 position)
*
******************************************************************************/
-struct acpi_table_list acpi_gbl_table_lists[NUM_ACPI_TABLE_TYPES];
+struct acpi_table_list acpi_gbl_table_lists[ACPI_TABLE_ID_MAX + 1];
-struct acpi_table_support acpi_gbl_table_data[NUM_ACPI_TABLE_TYPES] = {
+struct acpi_table_support acpi_gbl_table_data[ACPI_TABLE_ID_MAX + 1] = {
/*********** Name, Signature, Global typed pointer Signature size, Type How many allowed?, Contains valid AML? */
/* RSDP 0 */ {RSDP_NAME, RSDP_SIG, NULL, sizeof(RSDP_SIG) - 1,
@@ -467,7 +443,6 @@ struct acpi_fixed_event_info acpi_gbl_fixed_event_info[ACPI_NUM_FIXED_EVENTS] =
/* Region type decoding */
const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS] = {
-/*! [Begin] no source code translation (keep these ASL Keywords as-is) */
"SystemMemory",
"SystemIO",
"PCI_Config",
@@ -476,16 +451,15 @@ const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS] = {
"CMOS",
"PCIBARTarget",
"DataTable"
-/*! [End] no source code translation !*/
};
char *acpi_ut_get_region_name(u8 space_id)
{
if (space_id >= ACPI_USER_REGION_BEGIN) {
- return ("user_defined_region");
+ return ("UserDefinedRegion");
} else if (space_id >= ACPI_NUM_PREDEFINED_REGIONS) {
- return ("invalid_space_id");
+ return ("InvalidSpaceId");
}
return (ACPI_CAST_PTR(char, acpi_gbl_region_types[space_id]));
@@ -506,20 +480,18 @@ char *acpi_ut_get_region_name(u8 space_id)
/* Event type decoding */
static const char *acpi_gbl_event_types[ACPI_NUM_FIXED_EVENTS] = {
-/*! [Begin] no source code translation (keep these strings as-is) */
"PM_Timer",
"GlobalLock",
"PowerButton",
"SleepButton",
"RealTimeClock",
-/*! [End] no source code translation !*/
};
char *acpi_ut_get_event_name(u32 event_id)
{
if (event_id > ACPI_EVENT_MAX) {
- return ("invalid_event_iD");
+ return ("InvalidEventID");
}
return (ACPI_CAST_PTR(char, acpi_gbl_event_types[event_id]));
@@ -550,7 +522,6 @@ static const char acpi_gbl_bad_type[] = "UNDEFINED";
/* Printable names of the ACPI object types */
static const char *acpi_gbl_ns_type_names[] = {
-/*! [Begin] no source code translation (keep these strings as-is) */
/* 00 */ "Untyped",
/* 01 */ "Integer",
/* 02 */ "String",
@@ -582,7 +553,6 @@ static const char *acpi_gbl_ns_type_names[] = {
/* 28 */ "Extra",
/* 29 */ "Data",
/* 30 */ "Invalid"
-/*! [End] no source code translation !*/
};
char *acpi_ut_get_type_name(acpi_object_type type)
@@ -635,14 +605,14 @@ char *acpi_ut_get_node_name(void *object)
/* Descriptor must be a namespace node */
- if (node->descriptor != ACPI_DESC_TYPE_NAMED) {
+ if (ACPI_GET_DESCRIPTOR_TYPE(node) != ACPI_DESC_TYPE_NAMED) {
return ("####");
}
/* Name must be a valid ACPI name */
if (!acpi_ut_valid_acpi_name(node->name.integer)) {
- return ("????");
+ node->name.integer = acpi_ut_repair_name(node->name.integer);
}
/* Return the name */
@@ -665,7 +635,6 @@ char *acpi_ut_get_node_name(void *object)
/* Printable names of object descriptor types */
static const char *acpi_gbl_desc_type_names[] = {
-/*! [Begin] no source code translation (keep these ASL Keywords as-is) */
/* 00 */ "Invalid",
/* 01 */ "Cached",
/* 02 */ "State-Generic",
@@ -682,7 +651,6 @@ static const char *acpi_gbl_desc_type_names[] = {
/* 13 */ "Parser",
/* 14 */ "Operand",
/* 15 */ "Node"
-/*! [End] no source code translation !*/
};
char *acpi_ut_get_descriptor_name(void *object)
@@ -723,7 +691,7 @@ char *acpi_ut_get_descriptor_name(void *object)
char *acpi_ut_get_mutex_name(u32 mutex_id)
{
- if (mutex_id > MAX_MUTEX) {
+ if (mutex_id > ACPI_MAX_MUTEX) {
return ("Invalid Mutex ID");
}
@@ -747,6 +715,7 @@ u8 acpi_ut_valid_object_type(acpi_object_type type)
{
if (type > ACPI_TYPE_LOCAL_MAX) {
+
/* Note: Assumes all TYPEs are contiguous (external/local) */
return (FALSE);
@@ -773,7 +742,7 @@ void acpi_ut_init_globals(void)
acpi_status status;
u32 i;
- ACPI_FUNCTION_TRACE("ut_init_globals");
+ ACPI_FUNCTION_TRACE(ut_init_globals);
/* Create all memory caches */
@@ -784,14 +753,14 @@ void acpi_ut_init_globals(void)
/* ACPI table structure */
- for (i = 0; i < NUM_ACPI_TABLE_TYPES; i++) {
+ for (i = 0; i < (ACPI_TABLE_ID_MAX + 1); i++) {
acpi_gbl_table_lists[i].next = NULL;
acpi_gbl_table_lists[i].count = 0;
}
/* Mutex locked flags */
- for (i = 0; i < NUM_MUTEX; i++) {
+ for (i = 0; i < ACPI_NUM_MUTEX; i++) {
acpi_gbl_mutex_info[i].mutex = NULL;
acpi_gbl_mutex_info[i].thread_id = ACPI_MUTEX_NOT_ACQUIRED;
acpi_gbl_mutex_info[i].use_count = 0;
@@ -856,7 +825,7 @@ void acpi_ut_init_globals(void)
acpi_gbl_root_node = NULL;
acpi_gbl_root_node_struct.name.integer = ACPI_ROOT_NAME;
- acpi_gbl_root_node_struct.descriptor = ACPI_DESC_TYPE_NAMED;
+ acpi_gbl_root_node_struct.descriptor_type = ACPI_DESC_TYPE_NAMED;
acpi_gbl_root_node_struct.type = ACPI_TYPE_DEVICE;
acpi_gbl_root_node_struct.child = NULL;
acpi_gbl_root_node_struct.peer = NULL;
@@ -869,3 +838,6 @@ void acpi_ut_init_globals(void)
return_VOID;
}
+
+ACPI_EXPORT_SYMBOL(acpi_dbg_level)
+ACPI_EXPORT_SYMBOL(acpi_dbg_layer)
diff --git a/drivers/acpi/utilities/utinit.c b/drivers/acpi/utilities/utinit.c
index ba771b4f39bc..ff76055eb7d6 100644
--- a/drivers/acpi/utilities/utinit.c
+++ b/drivers/acpi/utilities/utinit.c
@@ -50,7 +50,7 @@ ACPI_MODULE_NAME("utinit")
/* Local prototypes */
static void
-acpi_ut_fadt_register_error(char *register_name, u32 value, acpi_size offset);
+acpi_ut_fadt_register_error(char *register_name, u32 value, u8 offset);
static void acpi_ut_terminate(void);
@@ -69,12 +69,12 @@ static void acpi_ut_terminate(void);
******************************************************************************/
static void
-acpi_ut_fadt_register_error(char *register_name, u32 value, acpi_size offset)
+acpi_ut_fadt_register_error(char *register_name, u32 value, u8 offset)
{
ACPI_WARNING((AE_INFO,
"Invalid FADT value %s=%X at offset %X FADT=%p",
- register_name, value, (u32) offset, acpi_gbl_FADT));
+ register_name, value, offset, acpi_gbl_FADT));
}
/******************************************************************************
@@ -176,7 +176,7 @@ static void acpi_ut_terminate(void)
struct acpi_gpe_xrupt_info *gpe_xrupt_info;
struct acpi_gpe_xrupt_info *next_gpe_xrupt_info;
- ACPI_FUNCTION_TRACE("ut_terminate");
+ ACPI_FUNCTION_TRACE(ut_terminate);
/* Free global tables, etc. */
/* Free global GPE blocks and related info structures */
@@ -186,14 +186,14 @@ static void acpi_ut_terminate(void)
gpe_block = gpe_xrupt_info->gpe_block_list_head;
while (gpe_block) {
next_gpe_block = gpe_block->next;
- ACPI_MEM_FREE(gpe_block->event_info);
- ACPI_MEM_FREE(gpe_block->register_info);
- ACPI_MEM_FREE(gpe_block);
+ ACPI_FREE(gpe_block->event_info);
+ ACPI_FREE(gpe_block->register_info);
+ ACPI_FREE(gpe_block);
gpe_block = next_gpe_block;
}
next_gpe_xrupt_info = gpe_xrupt_info->next;
- ACPI_MEM_FREE(gpe_xrupt_info);
+ ACPI_FREE(gpe_xrupt_info);
gpe_xrupt_info = next_gpe_xrupt_info;
}
@@ -216,7 +216,7 @@ static void acpi_ut_terminate(void)
void acpi_ut_subsystem_shutdown(void)
{
- ACPI_FUNCTION_TRACE("ut_subsystem_shutdown");
+ ACPI_FUNCTION_TRACE(ut_subsystem_shutdown);
/* Just exit if subsystem is already shutdown */
@@ -228,6 +228,7 @@ void acpi_ut_subsystem_shutdown(void)
/* Subsystem appears active, go ahead and shut it down */
acpi_gbl_shutdown = TRUE;
+ acpi_gbl_startup_flags = 0;
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Shutting down ACPI Subsystem\n"));
/* Close the acpi_event Handling */
@@ -245,12 +246,5 @@ void acpi_ut_subsystem_shutdown(void)
/* Purge the local caches */
(void)acpi_ut_delete_caches();
-
- /* Debug only - display leftover memory allocation, if any */
-
-#ifdef ACPI_DBG_TRACK_ALLOCATIONS
- acpi_ut_dump_allocations(ACPI_UINT32_MAX, NULL);
-#endif
-
return_VOID;
}
diff --git a/drivers/acpi/utilities/utmath.c b/drivers/acpi/utilities/utmath.c
index 4a3360484e72..19d74bedce27 100644
--- a/drivers/acpi/utilities/utmath.c
+++ b/drivers/acpi/utilities/utmath.c
@@ -77,7 +77,7 @@ acpi_ut_short_divide(acpi_integer dividend,
union uint64_overlay quotient;
u32 remainder32;
- ACPI_FUNCTION_TRACE("ut_short_divide");
+ ACPI_FUNCTION_TRACE(ut_short_divide);
/* Always check for a zero divisor */
@@ -139,7 +139,7 @@ acpi_ut_divide(acpi_integer in_dividend,
union uint64_overlay partial2;
union uint64_overlay partial3;
- ACPI_FUNCTION_TRACE("ut_divide");
+ ACPI_FUNCTION_TRACE(ut_divide);
/* Always check for a zero divisor */
@@ -261,7 +261,7 @@ acpi_ut_short_divide(acpi_integer in_dividend,
acpi_integer * out_quotient, u32 * out_remainder)
{
- ACPI_FUNCTION_TRACE("ut_short_divide");
+ ACPI_FUNCTION_TRACE(ut_short_divide);
/* Always check for a zero divisor */
@@ -287,7 +287,7 @@ acpi_ut_divide(acpi_integer in_dividend,
acpi_integer in_divisor,
acpi_integer * out_quotient, acpi_integer * out_remainder)
{
- ACPI_FUNCTION_TRACE("ut_divide");
+ ACPI_FUNCTION_TRACE(ut_divide);
/* Always check for a zero divisor */
diff --git a/drivers/acpi/utilities/utmisc.c b/drivers/acpi/utilities/utmisc.c
index 7364f5f8c9cd..5c75d35ad1cd 100644
--- a/drivers/acpi/utilities/utmisc.c
+++ b/drivers/acpi/utilities/utmisc.c
@@ -49,6 +49,33 @@ ACPI_MODULE_NAME("utmisc")
/*******************************************************************************
*
+ * FUNCTION: acpi_ut_is_aml_table
+ *
+ * PARAMETERS: Table - An ACPI table
+ *
+ * RETURN: TRUE if table contains executable AML; FALSE otherwise
+ *
+ * DESCRIPTION: Check ACPI Signature for a table that contains AML code.
+ * Currently, these are DSDT,SSDT,PSDT. All other table types are
+ * data tables that do not contain AML code.
+ *
+ ******************************************************************************/
+u8 acpi_ut_is_aml_table(struct acpi_table_header *table)
+{
+
+ /* Ignore tables that contain AML */
+
+ if (ACPI_COMPARE_NAME(table->signature, DSDT_SIG) ||
+ ACPI_COMPARE_NAME(table->signature, PSDT_SIG) ||
+ ACPI_COMPARE_NAME(table->signature, SSDT_SIG)) {
+ return (TRUE);
+ }
+
+ return (FALSE);
+}
+
+/*******************************************************************************
+ *
* FUNCTION: acpi_ut_allocate_owner_id
*
* PARAMETERS: owner_id - Where the new owner ID is returned
@@ -60,6 +87,7 @@ ACPI_MODULE_NAME("utmisc")
* when the method exits or the table is unloaded.
*
******************************************************************************/
+
acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id)
{
acpi_native_uint i;
@@ -67,7 +95,7 @@ acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id)
acpi_native_uint k;
acpi_status status;
- ACPI_FUNCTION_TRACE("ut_allocate_owner_id");
+ ACPI_FUNCTION_TRACE(ut_allocate_owner_id);
/* Guard against multiple allocations of ID to the same location */
@@ -97,6 +125,7 @@ acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id)
for (k = acpi_gbl_next_owner_id_offset; k < 32; k++) {
if (acpi_gbl_owner_id_mask[j] == ACPI_UINT32_MAX) {
+
/* There are no free IDs in this mask */
break;
@@ -123,7 +152,7 @@ acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id)
(acpi_owner_id) ((k + 1) + ACPI_MUL_32(j));
ACPI_DEBUG_PRINT((ACPI_DB_VALUES,
- "Allocated owner_id: %2.2X\n",
+ "Allocated OwnerId: %2.2X\n",
(unsigned int)*owner_id));
goto exit;
}
@@ -144,7 +173,7 @@ acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id)
*/
status = AE_OWNER_ID_LIMIT;
ACPI_ERROR((AE_INFO,
- "Could not allocate new owner_id (255 max), AE_OWNER_ID_LIMIT"));
+ "Could not allocate new OwnerId (255 max), AE_OWNER_ID_LIMIT"));
exit:
(void)acpi_ut_release_mutex(ACPI_MTX_CACHES);
@@ -172,7 +201,7 @@ void acpi_ut_release_owner_id(acpi_owner_id * owner_id_ptr)
acpi_native_uint index;
u32 bit;
- ACPI_FUNCTION_TRACE_U32("ut_release_owner_id", owner_id);
+ ACPI_FUNCTION_TRACE_U32(ut_release_owner_id, owner_id);
/* Always clear the input owner_id (zero is an invalid ID) */
@@ -181,7 +210,7 @@ void acpi_ut_release_owner_id(acpi_owner_id * owner_id_ptr)
/* Zero is not a valid owner_iD */
if (owner_id == 0) {
- ACPI_ERROR((AE_INFO, "Invalid owner_id: %2.2X", owner_id));
+ ACPI_ERROR((AE_INFO, "Invalid OwnerId: %2.2X", owner_id));
return_VOID;
}
@@ -207,7 +236,7 @@ void acpi_ut_release_owner_id(acpi_owner_id * owner_id_ptr)
acpi_gbl_owner_id_mask[index] ^= bit;
} else {
ACPI_ERROR((AE_INFO,
- "Release of non-allocated owner_id: %2.2X",
+ "Release of non-allocated OwnerId: %2.2X",
owner_id + 1));
}
@@ -273,6 +302,7 @@ void acpi_ut_print_string(char *string, u8 max_length)
acpi_os_printf("\"");
for (i = 0; string[i] && (i < max_length); i++) {
+
/* Escape sequences */
switch (string[i]) {
@@ -461,12 +491,47 @@ acpi_ut_display_init_pathname(u8 type,
}
acpi_os_printf("\n");
- ACPI_MEM_FREE(buffer.pointer);
+ ACPI_FREE(buffer.pointer);
}
#endif
/*******************************************************************************
*
+ * FUNCTION: acpi_ut_valid_acpi_char
+ *
+ * PARAMETERS: Char - The character to be examined
+ *
+ * RETURN: TRUE if the character is valid, FALSE otherwise
+ *
+ * DESCRIPTION: Check for a valid ACPI character. Must be one of:
+ * 1) Upper case alpha
+ * 2) numeric
+ * 3) underscore
+ *
+ * We allow a '!' as the last character because of the ASF! table
+ *
+ ******************************************************************************/
+
+u8 acpi_ut_valid_acpi_char(char character, acpi_native_uint position)
+{
+
+ if (!((character >= 'A' && character <= 'Z') ||
+ (character >= '0' && character <= '9') || (character == '_'))) {
+
+ /* Allow a '!' in the last position */
+
+ if (character == '!' && position == 3) {
+ return (TRUE);
+ }
+
+ return (FALSE);
+ }
+
+ return (TRUE);
+}
+
+/*******************************************************************************
+ *
* FUNCTION: acpi_ut_valid_acpi_name
*
* PARAMETERS: Name - The name to be examined
@@ -482,19 +547,13 @@ acpi_ut_display_init_pathname(u8 type,
u8 acpi_ut_valid_acpi_name(u32 name)
{
- char *name_ptr = (char *)&name;
- char character;
acpi_native_uint i;
ACPI_FUNCTION_ENTRY();
for (i = 0; i < ACPI_NAME_SIZE; i++) {
- character = *name_ptr;
- name_ptr++;
-
- if (!((character == '_') ||
- (character >= 'A' && character <= 'Z') ||
- (character >= '0' && character <= '9'))) {
+ if (!acpi_ut_valid_acpi_char
+ ((ACPI_CAST_PTR(char, &name))[i], i)) {
return (FALSE);
}
}
@@ -504,24 +563,37 @@ u8 acpi_ut_valid_acpi_name(u32 name)
/*******************************************************************************
*
- * FUNCTION: acpi_ut_valid_acpi_character
+ * FUNCTION: acpi_ut_repair_name
*
- * PARAMETERS: Character - The character to be examined
+ * PARAMETERS: Name - The ACPI name to be repaired
*
- * RETURN: 1 if Character may appear in a name, else 0
+ * RETURN: Repaired version of the name
*
- * DESCRIPTION: Check for a printable character
+ * DESCRIPTION: Repair an ACPI name: Change invalid characters to '*' and
+ * return the new name.
*
******************************************************************************/
-u8 acpi_ut_valid_acpi_character(char character)
+acpi_name acpi_ut_repair_name(acpi_name name)
{
+ char *name_ptr = ACPI_CAST_PTR(char, &name);
+ char new_name[ACPI_NAME_SIZE];
+ acpi_native_uint i;
- ACPI_FUNCTION_ENTRY();
+ for (i = 0; i < ACPI_NAME_SIZE; i++) {
+ new_name[i] = name_ptr[i];
- return ((u8) ((character == '_') ||
- (character >= 'A' && character <= 'Z') ||
- (character >= '0' && character <= '9')));
+ /*
+ * Replace a bad character with something printable, yet technically
+ * still invalid. This prevents any collisions with existing "good"
+ * names in the namespace.
+ */
+ if (!acpi_ut_valid_acpi_char(name_ptr[i], i)) {
+ new_name[i] = '*';
+ }
+ }
+
+ return (*ACPI_CAST_PTR(u32, new_name));
}
/*******************************************************************************
@@ -529,7 +601,8 @@ u8 acpi_ut_valid_acpi_character(char character)
* FUNCTION: acpi_ut_strtoul64
*
* PARAMETERS: String - Null terminated string
- * Base - Radix of the string: 10, 16, or ACPI_ANY_BASE
+ * Base - Radix of the string: 16 or ACPI_ANY_BASE;
+ * ACPI_ANY_BASE means 'in behalf of to_integer'
* ret_integer - Where the converted integer is returned
*
* RETURN: Status and Converted value
@@ -545,16 +618,17 @@ acpi_ut_strtoul64(char *string, u32 base, acpi_integer * ret_integer)
u32 this_digit = 0;
acpi_integer return_value = 0;
acpi_integer quotient;
+ acpi_integer dividend;
+ u32 to_integer_op = (base == ACPI_ANY_BASE);
+ u32 mode32 = (acpi_gbl_integer_byte_width == 4);
+ u8 valid_digits = 0;
+ u8 sign_of0x = 0;
+ u8 term = 0;
- ACPI_FUNCTION_TRACE("ut_stroul64");
-
- if ((!string) || !(*string)) {
- goto error_exit;
- }
+ ACPI_FUNCTION_TRACE(ut_stroul64);
switch (base) {
case ACPI_ANY_BASE:
- case 10:
case 16:
break;
@@ -563,76 +637,110 @@ acpi_ut_strtoul64(char *string, u32 base, acpi_integer * ret_integer)
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
+ if (!string) {
+ goto error_exit;
+ }
+
/* Skip over any white space in the buffer */
- while (ACPI_IS_SPACE(*string) || *string == '\t') {
+ while ((*string) && (ACPI_IS_SPACE(*string) || *string == '\t')) {
string++;
}
- /*
- * If the input parameter Base is zero, then we need to
- * determine if it is decimal or hexadecimal:
- */
- if (base == 0) {
+ if (to_integer_op) {
+ /*
+ * Base equal to ACPI_ANY_BASE means 'to_integer operation case'.
+ * We need to determine if it is decimal or hexadecimal.
+ */
if ((*string == '0') && (ACPI_TOLOWER(*(string + 1)) == 'x')) {
+ sign_of0x = 1;
base = 16;
+
+ /* Skip over the leading '0x' */
string += 2;
} else {
base = 10;
}
}
- /*
- * For hexadecimal base, skip over the leading
- * 0 or 0x, if they are present.
- */
- if ((base == 16) &&
- (*string == '0') && (ACPI_TOLOWER(*(string + 1)) == 'x')) {
- string += 2;
+ /* Any string left? Check that '0x' is not followed by white space. */
+
+ if (!(*string) || ACPI_IS_SPACE(*string) || *string == '\t') {
+ if (to_integer_op) {
+ goto error_exit;
+ } else {
+ goto all_done;
+ }
}
- /* Any string left? */
+ dividend = (mode32) ? ACPI_UINT32_MAX : ACPI_UINT64_MAX;
- if (!(*string)) {
- goto error_exit;
- }
+ /* At least one character in the string here */
/* Main loop: convert the string to a 64-bit integer */
while (*string) {
if (ACPI_IS_DIGIT(*string)) {
+
/* Convert ASCII 0-9 to Decimal value */
this_digit = ((u8) * string) - '0';
- } else {
- if (base == 10) {
- /* Digit is out of range */
+ } else if (base == 10) {
- goto error_exit;
- }
+ /* Digit is out of range; possible in to_integer case only */
+ term = 1;
+ } else {
this_digit = (u8) ACPI_TOUPPER(*string);
if (ACPI_IS_XDIGIT((char)this_digit)) {
+
/* Convert ASCII Hex char to value */
this_digit = this_digit - 'A' + 10;
} else {
- /*
- * We allow non-hex chars, just stop now, same as end-of-string.
- * See ACPI spec, string-to-integer conversion.
- */
+ term = 1;
+ }
+ }
+
+ if (term) {
+ if (to_integer_op) {
+ goto error_exit;
+ } else {
break;
}
+ } else if ((valid_digits == 0) && (this_digit == 0)
+ && !sign_of0x) {
+
+ /* Skip zeros */
+ string++;
+ continue;
+ }
+
+ valid_digits++;
+
+ if (sign_of0x
+ && ((valid_digits > 16)
+ || ((valid_digits > 8) && mode32))) {
+ /*
+ * This is to_integer operation case.
+ * No any restrictions for string-to-integer conversion,
+ * see ACPI spec.
+ */
+ goto error_exit;
}
/* Divide the digit into the correct position */
(void)
- acpi_ut_short_divide((ACPI_INTEGER_MAX -
- (acpi_integer) this_digit), base,
- &quotient, NULL);
+ acpi_ut_short_divide((dividend - (acpi_integer) this_digit),
+ base, &quotient, NULL);
+
if (return_value > quotient) {
- goto error_exit;
+ if (to_integer_op) {
+ goto error_exit;
+ } else {
+ break;
+ }
}
return_value *= base;
@@ -642,6 +750,8 @@ acpi_ut_strtoul64(char *string, u32 base, acpi_integer * ret_integer)
/* All done, normal exit */
+ all_done:
+
*ret_integer = return_value;
return_ACPI_STATUS(AE_OK);
@@ -719,7 +829,7 @@ acpi_ut_walk_package_tree(union acpi_operand_object * source_object,
u32 this_index;
union acpi_operand_object *this_source_obj;
- ACPI_FUNCTION_TRACE("ut_walk_package_tree");
+ ACPI_FUNCTION_TRACE(ut_walk_package_tree);
state = acpi_ut_create_pkg_state(source_object, target_object, 0);
if (!state) {
@@ -727,6 +837,7 @@ acpi_ut_walk_package_tree(union acpi_operand_object * source_object,
}
while (state) {
+
/* Get one element of the package */
this_index = state->pkg.index;
@@ -814,31 +925,6 @@ acpi_ut_walk_package_tree(union acpi_operand_object * source_object,
/*******************************************************************************
*
- * FUNCTION: acpi_ut_generate_checksum
- *
- * PARAMETERS: Buffer - Buffer to be scanned
- * Length - number of bytes to examine
- *
- * RETURN: The generated checksum
- *
- * DESCRIPTION: Generate a checksum on a raw buffer
- *
- ******************************************************************************/
-
-u8 acpi_ut_generate_checksum(u8 * buffer, u32 length)
-{
- u32 i;
- signed char sum = 0;
-
- for (i = 0; i < length; i++) {
- sum = (signed char)(sum + buffer[i]);
- }
-
- return ((u8) (0 - sum));
-}
-
-/*******************************************************************************
- *
* FUNCTION: acpi_ut_error, acpi_ut_warning, acpi_ut_info
*
* PARAMETERS: module_name - Caller's module name (for error output)
@@ -900,36 +986,3 @@ acpi_ut_info(char *module_name, u32 line_number, char *format, ...)
acpi_os_vprintf(format, args);
acpi_os_printf(" [%X]\n", ACPI_CA_VERSION);
}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_report_error, Warning, Info
- *
- * PARAMETERS: module_name - Caller's module name (for error output)
- * line_number - Caller's line number (for error output)
- *
- * RETURN: None
- *
- * DESCRIPTION: Print error message
- *
- * Note: Legacy only, should be removed when no longer used by drivers.
- *
- ******************************************************************************/
-
-void acpi_ut_report_error(char *module_name, u32 line_number)
-{
-
- acpi_os_printf("ACPI Error (%s-%04d): ", module_name, line_number);
-}
-
-void acpi_ut_report_warning(char *module_name, u32 line_number)
-{
-
- acpi_os_printf("ACPI Warning (%s-%04d): ", module_name, line_number);
-}
-
-void acpi_ut_report_info(char *module_name, u32 line_number)
-{
-
- acpi_os_printf("ACPI (%s-%04d): ", module_name, line_number);
-}
diff --git a/drivers/acpi/utilities/utmutex.c b/drivers/acpi/utilities/utmutex.c
index 45a7244df924..25eb34369afa 100644
--- a/drivers/acpi/utilities/utmutex.c
+++ b/drivers/acpi/utilities/utmutex.c
@@ -68,19 +68,26 @@ acpi_status acpi_ut_mutex_initialize(void)
u32 i;
acpi_status status;
- ACPI_FUNCTION_TRACE("ut_mutex_initialize");
+ ACPI_FUNCTION_TRACE(ut_mutex_initialize);
/*
* Create each of the predefined mutex objects
*/
- for (i = 0; i < NUM_MUTEX; i++) {
+ for (i = 0; i < ACPI_NUM_MUTEX; i++) {
status = acpi_ut_create_mutex(i);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
}
+ /* Create the spinlocks for use at interrupt level */
+
status = acpi_os_create_lock(&acpi_gbl_gpe_lock);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ status = acpi_os_create_lock(&acpi_gbl_hardware_lock);
return_ACPI_STATUS(status);
}
@@ -100,16 +107,19 @@ void acpi_ut_mutex_terminate(void)
{
u32 i;
- ACPI_FUNCTION_TRACE("ut_mutex_terminate");
+ ACPI_FUNCTION_TRACE(ut_mutex_terminate);
/*
* Delete each predefined mutex object
*/
- for (i = 0; i < NUM_MUTEX; i++) {
+ for (i = 0; i < ACPI_NUM_MUTEX; i++) {
(void)acpi_ut_delete_mutex(i);
}
+ /* Delete the spinlocks */
+
acpi_os_delete_lock(acpi_gbl_gpe_lock);
+ acpi_os_delete_lock(acpi_gbl_hardware_lock);
return_VOID;
}
@@ -129,9 +139,9 @@ static acpi_status acpi_ut_create_mutex(acpi_mutex_handle mutex_id)
{
acpi_status status = AE_OK;
- ACPI_FUNCTION_TRACE_U32("ut_create_mutex", mutex_id);
+ ACPI_FUNCTION_TRACE_U32(ut_create_mutex, mutex_id);
- if (mutex_id > MAX_MUTEX) {
+ if (mutex_id > ACPI_MAX_MUTEX) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
@@ -163,9 +173,9 @@ static acpi_status acpi_ut_delete_mutex(acpi_mutex_handle mutex_id)
{
acpi_status status;
- ACPI_FUNCTION_TRACE_U32("ut_delete_mutex", mutex_id);
+ ACPI_FUNCTION_TRACE_U32(ut_delete_mutex, mutex_id);
- if (mutex_id > MAX_MUTEX) {
+ if (mutex_id > ACPI_MAX_MUTEX) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
@@ -192,11 +202,11 @@ static acpi_status acpi_ut_delete_mutex(acpi_mutex_handle mutex_id)
acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id)
{
acpi_status status;
- u32 this_thread_id;
+ acpi_thread_id this_thread_id;
- ACPI_FUNCTION_NAME("ut_acquire_mutex");
+ ACPI_FUNCTION_NAME(ut_acquire_mutex);
- if (mutex_id > MAX_MUTEX) {
+ if (mutex_id > ACPI_MAX_MUTEX) {
return (AE_BAD_PARAMETER);
}
@@ -213,7 +223,7 @@ acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id)
* the mutex ordering rule. This indicates a coding error somewhere in
* the ACPI subsystem code.
*/
- for (i = mutex_id; i < MAX_MUTEX; i++) {
+ for (i = mutex_id; i < ACPI_MAX_MUTEX; i++) {
if (acpi_gbl_mutex_info[i].thread_id == this_thread_id) {
if (i == mutex_id) {
ACPI_ERROR((AE_INFO,
@@ -275,16 +285,16 @@ acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id)
acpi_status acpi_ut_release_mutex(acpi_mutex_handle mutex_id)
{
acpi_status status;
- u32 this_thread_id;
+ acpi_thread_id this_thread_id;
- ACPI_FUNCTION_NAME("ut_release_mutex");
+ ACPI_FUNCTION_NAME(ut_release_mutex);
this_thread_id = acpi_os_get_thread_id();
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
"Thread %X releasing Mutex [%s]\n", this_thread_id,
acpi_ut_get_mutex_name(mutex_id)));
- if (mutex_id > MAX_MUTEX) {
+ if (mutex_id > ACPI_MAX_MUTEX) {
return (AE_BAD_PARAMETER);
}
@@ -309,7 +319,7 @@ acpi_status acpi_ut_release_mutex(acpi_mutex_handle mutex_id)
* ordering rule. This indicates a coding error somewhere in
* the ACPI subsystem code.
*/
- for (i = mutex_id; i < MAX_MUTEX; i++) {
+ for (i = mutex_id; i < ACPI_MAX_MUTEX; i++) {
if (acpi_gbl_mutex_info[i].thread_id == this_thread_id) {
if (i == mutex_id) {
continue;
diff --git a/drivers/acpi/utilities/utobject.c b/drivers/acpi/utilities/utobject.c
index 7ee2d1d98071..ba7d8ac702df 100644
--- a/drivers/acpi/utilities/utobject.c
+++ b/drivers/acpi/utilities/utobject.c
@@ -92,7 +92,7 @@ union acpi_operand_object *acpi_ut_create_internal_object_dbg(char *module_name,
union acpi_operand_object *object;
union acpi_operand_object *second_object;
- ACPI_FUNCTION_TRACE_STR("ut_create_internal_object_dbg",
+ ACPI_FUNCTION_TRACE_STR(ut_create_internal_object_dbg,
acpi_ut_get_type_name(type));
/* Allocate the raw object descriptor */
@@ -161,7 +161,7 @@ union acpi_operand_object *acpi_ut_create_buffer_object(acpi_size buffer_size)
union acpi_operand_object *buffer_desc;
u8 *buffer = NULL;
- ACPI_FUNCTION_TRACE_U32("ut_create_buffer_object", buffer_size);
+ ACPI_FUNCTION_TRACE_U32(ut_create_buffer_object, buffer_size);
/* Create a new Buffer object */
@@ -173,9 +173,10 @@ union acpi_operand_object *acpi_ut_create_buffer_object(acpi_size buffer_size)
/* Create an actual buffer only if size > 0 */
if (buffer_size > 0) {
+
/* Allocate the actual buffer */
- buffer = ACPI_MEM_CALLOCATE(buffer_size);
+ buffer = ACPI_ALLOCATE_ZEROED(buffer_size);
if (!buffer) {
ACPI_ERROR((AE_INFO, "Could not allocate size %X",
(u32) buffer_size));
@@ -214,7 +215,7 @@ union acpi_operand_object *acpi_ut_create_string_object(acpi_size string_size)
union acpi_operand_object *string_desc;
char *string;
- ACPI_FUNCTION_TRACE_U32("ut_create_string_object", string_size);
+ ACPI_FUNCTION_TRACE_U32(ut_create_string_object, string_size);
/* Create a new String object */
@@ -227,7 +228,7 @@ union acpi_operand_object *acpi_ut_create_string_object(acpi_size string_size)
* Allocate the actual string buffer -- (Size + 1) for NULL terminator.
* NOTE: Zero-length strings are NULL terminated
*/
- string = ACPI_MEM_CALLOCATE(string_size + 1);
+ string = ACPI_ALLOCATE_ZEROED(string_size + 1);
if (!string) {
ACPI_ERROR((AE_INFO, "Could not allocate size %X",
(u32) string_size));
@@ -260,7 +261,7 @@ union acpi_operand_object *acpi_ut_create_string_object(acpi_size string_size)
u8 acpi_ut_valid_internal_object(void *object)
{
- ACPI_FUNCTION_NAME("ut_valid_internal_object");
+ ACPI_FUNCTION_NAME(ut_valid_internal_object);
/* Check for a null pointer */
@@ -308,7 +309,7 @@ void *acpi_ut_allocate_object_desc_dbg(char *module_name,
{
union acpi_operand_object *object;
- ACPI_FUNCTION_TRACE("ut_allocate_object_desc_dbg");
+ ACPI_FUNCTION_TRACE(ut_allocate_object_desc_dbg);
object = acpi_os_acquire_object(acpi_gbl_operand_cache);
if (!object) {
@@ -319,6 +320,7 @@ void *acpi_ut_allocate_object_desc_dbg(char *module_name,
}
/* Mark the descriptor type */
+
memset(object, 0, sizeof(union acpi_operand_object));
ACPI_SET_DESCRIPTOR_TYPE(object, ACPI_DESC_TYPE_OPERAND);
@@ -342,7 +344,7 @@ void *acpi_ut_allocate_object_desc_dbg(char *module_name,
void acpi_ut_delete_object_desc(union acpi_operand_object *object)
{
- ACPI_FUNCTION_TRACE_PTR("ut_delete_object_desc", object);
+ ACPI_FUNCTION_TRACE_PTR(ut_delete_object_desc, object);
/* Object must be an union acpi_operand_object */
@@ -381,7 +383,7 @@ acpi_ut_get_simple_object_size(union acpi_operand_object *internal_object,
acpi_size length;
acpi_status status = AE_OK;
- ACPI_FUNCTION_TRACE_PTR("ut_get_simple_object_size", internal_object);
+ ACPI_FUNCTION_TRACE_PTR(ut_get_simple_object_size, internal_object);
/*
* Handle a null object (Could be a uninitialized package
@@ -397,6 +399,7 @@ acpi_ut_get_simple_object_size(union acpi_operand_object *internal_object,
length = sizeof(union acpi_object);
if (ACPI_GET_DESCRIPTOR_TYPE(internal_object) == ACPI_DESC_TYPE_NAMED) {
+
/* Object is a named object (reference), just return the length */
*obj_length = ACPI_ROUND_UP_TO_NATIVE_WORD(length);
@@ -559,7 +562,7 @@ acpi_ut_get_package_object_size(union acpi_operand_object *internal_object,
acpi_status status;
struct acpi_pkg_info info;
- ACPI_FUNCTION_TRACE_PTR("ut_get_package_object_size", internal_object);
+ ACPI_FUNCTION_TRACE_PTR(ut_get_package_object_size, internal_object);
info.length = 0;
info.object_space = 0;
diff --git a/drivers/acpi/utilities/utresrc.c b/drivers/acpi/utilities/utresrc.c
index 16461317113f..5a2de92831d3 100644
--- a/drivers/acpi/utilities/utresrc.c
+++ b/drivers/acpi/utilities/utresrc.c
@@ -45,113 +45,113 @@
#include <acpi/amlresrc.h>
#define _COMPONENT ACPI_UTILITIES
-ACPI_MODULE_NAME("utmisc")
+ACPI_MODULE_NAME("utresrc")
#if defined(ACPI_DISASSEMBLER) || defined (ACPI_DEBUGGER)
/*
* Strings used to decode resource descriptors.
* Used by both the disasssembler and the debugger resource dump routines
*/
-const char *acpi_gbl_BMdecode[2] = {
- "not_bus_master",
- "bus_master"
+const char *acpi_gbl_bm_decode[] = {
+ "NotBusMaster",
+ "BusMaster"
};
-const char *acpi_gbl_config_decode[4] = {
+const char *acpi_gbl_config_decode[] = {
"0 - Good Configuration",
"1 - Acceptable Configuration",
"2 - Suboptimal Configuration",
"3 - ***Invalid Configuration***",
};
-const char *acpi_gbl_consume_decode[2] = {
- "resource_producer",
- "resource_consumer"
+const char *acpi_gbl_consume_decode[] = {
+ "ResourceProducer",
+ "ResourceConsumer"
};
-const char *acpi_gbl_DECdecode[2] = {
- "pos_decode",
- "sub_decode"
+const char *acpi_gbl_dec_decode[] = {
+ "PosDecode",
+ "SubDecode"
};
-const char *acpi_gbl_HEdecode[2] = {
+const char *acpi_gbl_he_decode[] = {
"Level",
"Edge"
};
-const char *acpi_gbl_io_decode[2] = {
+const char *acpi_gbl_io_decode[] = {
"Decode10",
"Decode16"
};
-const char *acpi_gbl_LLdecode[2] = {
- "active_high",
- "active_low"
+const char *acpi_gbl_ll_decode[] = {
+ "ActiveHigh",
+ "ActiveLow"
};
-const char *acpi_gbl_max_decode[2] = {
- "max_not_fixed",
- "max_fixed"
+const char *acpi_gbl_max_decode[] = {
+ "MaxNotFixed",
+ "MaxFixed"
};
-const char *acpi_gbl_MEMdecode[4] = {
- "non_cacheable",
+const char *acpi_gbl_mem_decode[] = {
+ "NonCacheable",
"Cacheable",
- "write_combining",
+ "WriteCombining",
"Prefetchable"
};
-const char *acpi_gbl_min_decode[2] = {
- "min_not_fixed",
- "min_fixed"
+const char *acpi_gbl_min_decode[] = {
+ "MinNotFixed",
+ "MinFixed"
};
-const char *acpi_gbl_MTPdecode[4] = {
- "address_range_memory",
- "address_range_reserved",
- "address_range_aCPI",
- "address_range_nVS"
+const char *acpi_gbl_mtp_decode[] = {
+ "AddressRangeMemory",
+ "AddressRangeReserved",
+ "AddressRangeACPI",
+ "AddressRangeNVS"
};
-const char *acpi_gbl_RNGdecode[4] = {
- "invalid_ranges",
- "non_iSAonly_ranges",
- "ISAonly_ranges",
- "entire_range"
+const char *acpi_gbl_rng_decode[] = {
+ "InvalidRanges",
+ "NonISAOnlyRanges",
+ "ISAOnlyRanges",
+ "EntireRange"
};
-const char *acpi_gbl_RWdecode[2] = {
- "read_only",
- "read_write"
+const char *acpi_gbl_rw_decode[] = {
+ "ReadOnly",
+ "ReadWrite"
};
-const char *acpi_gbl_SHRdecode[2] = {
+const char *acpi_gbl_shr_decode[] = {
"Exclusive",
"Shared"
};
-const char *acpi_gbl_SIZdecode[4] = {
+const char *acpi_gbl_siz_decode[] = {
"Transfer8",
"Transfer8_16",
"Transfer16",
- "invalid_size"
+ "InvalidSize"
};
-const char *acpi_gbl_TRSdecode[2] = {
- "dense_translation",
- "sparse_translation"
+const char *acpi_gbl_trs_decode[] = {
+ "DenseTranslation",
+ "SparseTranslation"
};
-const char *acpi_gbl_TTPdecode[2] = {
- "type_static",
- "type_translation"
+const char *acpi_gbl_ttp_decode[] = {
+ "TypeStatic",
+ "TypeTranslation"
};
-const char *acpi_gbl_TYPdecode[4] = {
+const char *acpi_gbl_typ_decode[] = {
"Compatibility",
- "type_a",
- "type_b",
- "type_f"
+ "TypeA",
+ "TypeB",
+ "TypeF"
};
#endif
@@ -240,6 +240,104 @@ static const u8 acpi_gbl_resource_types[] = {
/*******************************************************************************
*
+ * FUNCTION: acpi_ut_walk_aml_resources
+ *
+ * PARAMETERS: Aml - Pointer to the raw AML resource template
+ * aml_length - Length of the entire template
+ * user_function - Called once for each descriptor found. If
+ * NULL, a pointer to the end_tag is returned
+ * Context - Passed to user_function
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Walk a raw AML resource list(buffer). User function called
+ * once for each resource found.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ut_walk_aml_resources(u8 * aml,
+ acpi_size aml_length,
+ acpi_walk_aml_callback user_function, void **context)
+{
+ acpi_status status;
+ u8 *end_aml;
+ u8 resource_index;
+ u32 length;
+ u32 offset = 0;
+
+ ACPI_FUNCTION_TRACE(ut_walk_aml_resources);
+
+ /* The absolute minimum resource template is one end_tag descriptor */
+
+ if (aml_length < sizeof(struct aml_resource_end_tag)) {
+ return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
+ }
+
+ /* Point to the end of the resource template buffer */
+
+ end_aml = aml + aml_length;
+
+ /* Walk the byte list, abort on any invalid descriptor type or length */
+
+ while (aml < end_aml) {
+
+ /* Validate the Resource Type and Resource Length */
+
+ status = acpi_ut_validate_resource(aml, &resource_index);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Get the length of this descriptor */
+
+ length = acpi_ut_get_descriptor_length(aml);
+
+ /* Invoke the user function */
+
+ if (user_function) {
+ status =
+ user_function(aml, length, offset, resource_index,
+ context);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+ }
+
+ /* An end_tag descriptor terminates this resource template */
+
+ if (acpi_ut_get_resource_type(aml) ==
+ ACPI_RESOURCE_NAME_END_TAG) {
+ /*
+ * There must be at least one more byte in the buffer for
+ * the 2nd byte of the end_tag
+ */
+ if ((aml + 1) >= end_aml) {
+ return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
+ }
+
+ /* Return the pointer to the end_tag if requested */
+
+ if (!user_function) {
+ *context = aml;
+ }
+
+ /* Normal exit */
+
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ aml += length;
+ offset += length;
+ }
+
+ /* Did not find an end_tag descriptor */
+
+ return (AE_AML_NO_RESOURCE_END_TAG);
+}
+
+/*******************************************************************************
+ *
* FUNCTION: acpi_ut_validate_resource
*
* PARAMETERS: Aml - Pointer to the raw AML resource descriptor
@@ -273,6 +371,7 @@ acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index)
* Examine the large/small bit in the resource header
*/
if (resource_type & ACPI_RESOURCE_NAME_LARGE) {
+
/* Verify the large resource type (name) against the max */
if (resource_type > ACPI_RESOURCE_NAME_LARGE_MAX) {
@@ -376,6 +475,7 @@ u8 acpi_ut_get_resource_type(void *aml)
* Examine the large/small bit in the resource header
*/
if (ACPI_GET8(aml) & ACPI_RESOURCE_NAME_LARGE) {
+
/* Large Resource Type -- bits 6:0 contain the name */
return (ACPI_GET8(aml));
@@ -411,6 +511,7 @@ u16 acpi_ut_get_resource_length(void *aml)
* Examine the large/small bit in the resource header
*/
if (ACPI_GET8(aml) & ACPI_RESOURCE_NAME_LARGE) {
+
/* Large Resource type -- bytes 1-2 contain the 16-bit length */
ACPI_MOVE_16_TO_16(&resource_length, ACPI_ADD_PTR(u8, aml, 1));
@@ -495,60 +596,21 @@ acpi_ut_get_resource_end_tag(union acpi_operand_object * obj_desc,
u8 ** end_tag)
{
acpi_status status;
- u8 *aml;
- u8 *end_aml;
-
- ACPI_FUNCTION_TRACE("ut_get_resource_end_tag");
- /* Get start and end pointers */
-
- aml = obj_desc->buffer.pointer;
- end_aml = aml + obj_desc->buffer.length;
+ ACPI_FUNCTION_TRACE(ut_get_resource_end_tag);
/* Allow a buffer length of zero */
if (!obj_desc->buffer.length) {
- *end_tag = aml;
+ *end_tag = obj_desc->buffer.pointer;
return_ACPI_STATUS(AE_OK);
}
- /* Walk the resource template, one descriptor per iteration */
-
- while (aml < end_aml) {
- /* Validate the Resource Type and Resource Length */
-
- status = acpi_ut_validate_resource(aml, NULL);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
-
- /* end_tag resource indicates the end of the resource template */
-
- if (acpi_ut_get_resource_type(aml) ==
- ACPI_RESOURCE_NAME_END_TAG) {
- /*
- * There must be at least one more byte in the buffer for
- * the 2nd byte of the end_tag
- */
- if ((aml + 1) >= end_aml) {
- return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
- }
-
- /* Return the pointer to the end_tag */
-
- *end_tag = aml;
- return_ACPI_STATUS(AE_OK);
- }
-
- /*
- * Point to the next resource descriptor in the AML buffer. The
- * descriptor length is guaranteed to be non-zero by resource
- * validation above.
- */
- aml += acpi_ut_get_descriptor_length(aml);
- }
+ /* Validate the template and get a pointer to the end_tag */
- /* Did not find an end_tag resource descriptor */
+ status = acpi_ut_walk_aml_resources(obj_desc->buffer.pointer,
+ obj_desc->buffer.length, NULL,
+ (void **)end_tag);
- return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
+ return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/utilities/utstate.c b/drivers/acpi/utilities/utstate.c
index 4b134a722907..0f5c5bb5deff 100644
--- a/drivers/acpi/utilities/utstate.c
+++ b/drivers/acpi/utilities/utstate.c
@@ -96,7 +96,7 @@ void
acpi_ut_push_generic_state(union acpi_generic_state **list_head,
union acpi_generic_state *state)
{
- ACPI_FUNCTION_TRACE("ut_push_generic_state");
+ ACPI_FUNCTION_TRACE(ut_push_generic_state);
/* Push the state object onto the front of the list (stack) */
@@ -123,12 +123,13 @@ union acpi_generic_state *acpi_ut_pop_generic_state(union acpi_generic_state
{
union acpi_generic_state *state;
- ACPI_FUNCTION_TRACE("ut_pop_generic_state");
+ ACPI_FUNCTION_TRACE(ut_pop_generic_state);
/* Remove the state object at the head of the list (stack) */
state = *list_head;
if (state) {
+
/* Update the list head */
*list_head = state->common.next;
@@ -158,9 +159,10 @@ union acpi_generic_state *acpi_ut_create_generic_state(void)
state = acpi_os_acquire_object(acpi_gbl_state_cache);
if (state) {
+
/* Initialize */
memset(state, 0, sizeof(union acpi_generic_state));
- state->common.data_type = ACPI_DESC_TYPE_STATE;
+ state->common.descriptor_type = ACPI_DESC_TYPE_STATE;
}
return (state);
@@ -183,7 +185,7 @@ struct acpi_thread_state *acpi_ut_create_thread_state(void)
{
union acpi_generic_state *state;
- ACPI_FUNCTION_TRACE("ut_create_thread_state");
+ ACPI_FUNCTION_TRACE(ut_create_thread_state);
/* Create the generic state object */
@@ -194,7 +196,7 @@ struct acpi_thread_state *acpi_ut_create_thread_state(void)
/* Init fields specific to the update struct */
- state->common.data_type = ACPI_DESC_TYPE_STATE_THREAD;
+ state->common.descriptor_type = ACPI_DESC_TYPE_STATE_THREAD;
state->thread.thread_id = acpi_os_get_thread_id();
return_PTR((struct acpi_thread_state *)state);
@@ -220,7 +222,7 @@ union acpi_generic_state *acpi_ut_create_update_state(union acpi_operand_object
{
union acpi_generic_state *state;
- ACPI_FUNCTION_TRACE_PTR("ut_create_update_state", object);
+ ACPI_FUNCTION_TRACE_PTR(ut_create_update_state, object);
/* Create the generic state object */
@@ -231,7 +233,7 @@ union acpi_generic_state *acpi_ut_create_update_state(union acpi_operand_object
/* Init fields specific to the update struct */
- state->common.data_type = ACPI_DESC_TYPE_STATE_UPDATE;
+ state->common.descriptor_type = ACPI_DESC_TYPE_STATE_UPDATE;
state->update.object = object;
state->update.value = action;
@@ -257,7 +259,7 @@ union acpi_generic_state *acpi_ut_create_pkg_state(void *internal_object,
{
union acpi_generic_state *state;
- ACPI_FUNCTION_TRACE_PTR("ut_create_pkg_state", internal_object);
+ ACPI_FUNCTION_TRACE_PTR(ut_create_pkg_state, internal_object);
/* Create the generic state object */
@@ -268,7 +270,7 @@ union acpi_generic_state *acpi_ut_create_pkg_state(void *internal_object,
/* Init fields specific to the update struct */
- state->common.data_type = ACPI_DESC_TYPE_STATE_PACKAGE;
+ state->common.descriptor_type = ACPI_DESC_TYPE_STATE_PACKAGE;
state->pkg.source_object = (union acpi_operand_object *)internal_object;
state->pkg.dest_object = external_object;
state->pkg.index = index;
@@ -294,7 +296,7 @@ union acpi_generic_state *acpi_ut_create_control_state(void)
{
union acpi_generic_state *state;
- ACPI_FUNCTION_TRACE("ut_create_control_state");
+ ACPI_FUNCTION_TRACE(ut_create_control_state);
/* Create the generic state object */
@@ -305,7 +307,7 @@ union acpi_generic_state *acpi_ut_create_control_state(void)
/* Init fields specific to the control struct */
- state->common.data_type = ACPI_DESC_TYPE_STATE_CONTROL;
+ state->common.descriptor_type = ACPI_DESC_TYPE_STATE_CONTROL;
state->common.state = ACPI_CONTROL_CONDITIONAL_EXECUTING;
return_PTR(state);
@@ -319,15 +321,19 @@ union acpi_generic_state *acpi_ut_create_control_state(void)
*
* RETURN: None
*
- * DESCRIPTION: Put a state object back into the global state cache. The object
- * is not actually freed at this time.
+ * DESCRIPTION: Release a state object to the state cache. NULL state objects
+ * are ignored.
*
******************************************************************************/
void acpi_ut_delete_generic_state(union acpi_generic_state *state)
{
- ACPI_FUNCTION_TRACE("ut_delete_generic_state");
+ ACPI_FUNCTION_TRACE(ut_delete_generic_state);
+
+ /* Ignore null state */
- (void)acpi_os_release_object(acpi_gbl_state_cache, state);
+ if (state) {
+ (void)acpi_os_release_object(acpi_gbl_state_cache, state);
+ }
return_VOID;
}
diff --git a/drivers/acpi/utilities/utxface.c b/drivers/acpi/utilities/utxface.c
index 308a960871be..3538f69c82a1 100644
--- a/drivers/acpi/utilities/utxface.c
+++ b/drivers/acpi/utilities/utxface.c
@@ -41,8 +41,6 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/module.h>
-
#include <acpi/acpi.h>
#include <acpi/acevents.h>
#include <acpi/acnamesp.h>
@@ -67,7 +65,7 @@ acpi_status acpi_initialize_subsystem(void)
{
acpi_status status;
- ACPI_FUNCTION_TRACE("acpi_initialize_subsystem");
+ ACPI_FUNCTION_TRACE(acpi_initialize_subsystem);
ACPI_DEBUG_EXEC(acpi_ut_init_stack_ptr_trace());
@@ -109,6 +107,8 @@ acpi_status acpi_initialize_subsystem(void)
return_ACPI_STATUS(status);
}
+ACPI_EXPORT_SYMBOL(acpi_initialize_subsystem)
+
/*******************************************************************************
*
* FUNCTION: acpi_enable_subsystem
@@ -121,12 +121,11 @@ acpi_status acpi_initialize_subsystem(void)
* Puts system into ACPI mode if it isn't already.
*
******************************************************************************/
-
acpi_status acpi_enable_subsystem(u32 flags)
{
acpi_status status = AE_OK;
- ACPI_FUNCTION_TRACE("acpi_enable_subsystem");
+ ACPI_FUNCTION_TRACE(acpi_enable_subsystem);
/*
* We must initialize the hardware before we can enable ACPI.
@@ -152,7 +151,7 @@ acpi_status acpi_enable_subsystem(u32 flags)
status = acpi_enable();
if (ACPI_FAILURE(status)) {
- ACPI_WARNING((AE_INFO, "acpi_enable failed"));
+ ACPI_WARNING((AE_INFO, "AcpiEnable failed"));
return_ACPI_STATUS(status);
}
}
@@ -229,6 +228,8 @@ acpi_status acpi_enable_subsystem(u32 flags)
return_ACPI_STATUS(status);
}
+ACPI_EXPORT_SYMBOL(acpi_enable_subsystem)
+
/*******************************************************************************
*
* FUNCTION: acpi_initialize_objects
@@ -241,12 +242,11 @@ acpi_status acpi_enable_subsystem(u32 flags)
* objects and executing AML code for Regions, buffers, etc.
*
******************************************************************************/
-
acpi_status acpi_initialize_objects(u32 flags)
{
acpi_status status = AE_OK;
- ACPI_FUNCTION_TRACE("acpi_initialize_objects");
+ ACPI_FUNCTION_TRACE(acpi_initialize_objects);
/*
* Run all _REG methods
@@ -257,7 +257,7 @@ acpi_status acpi_initialize_objects(u32 flags)
*/
if (!(flags & ACPI_NO_ADDRESS_SPACE_INIT)) {
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "[Init] Executing _REG op_region methods\n"));
+ "[Init] Executing _REG OpRegion methods\n"));
status = acpi_ev_initialize_op_regions();
if (ACPI_FAILURE(status)) {
@@ -305,6 +305,8 @@ acpi_status acpi_initialize_objects(u32 flags)
return_ACPI_STATUS(status);
}
+ACPI_EXPORT_SYMBOL(acpi_initialize_objects)
+
/*******************************************************************************
*
* FUNCTION: acpi_terminate
@@ -316,12 +318,11 @@ acpi_status acpi_initialize_objects(u32 flags)
* DESCRIPTION: Shutdown the ACPI subsystem. Release all resources.
*
******************************************************************************/
-
acpi_status acpi_terminate(void)
{
acpi_status status;
- ACPI_FUNCTION_TRACE("acpi_terminate");
+ ACPI_FUNCTION_TRACE(acpi_terminate);
/* Terminate the AML Debugger if present */
@@ -348,6 +349,8 @@ acpi_status acpi_terminate(void)
return_ACPI_STATUS(status);
}
+ACPI_EXPORT_SYMBOL(acpi_terminate)
+
#ifdef ACPI_FUTURE_USAGE
/*******************************************************************************
*
@@ -362,7 +365,6 @@ acpi_status acpi_terminate(void)
* initialized successfully.
*
******************************************************************************/
-
acpi_status acpi_subsystem_status(void)
{
@@ -373,6 +375,8 @@ acpi_status acpi_subsystem_status(void)
}
}
+ACPI_EXPORT_SYMBOL(acpi_subsystem_status)
+
/*******************************************************************************
*
* FUNCTION: acpi_get_system_info
@@ -390,14 +394,13 @@ acpi_status acpi_subsystem_status(void)
* and the value of out_buffer is undefined.
*
******************************************************************************/
-
acpi_status acpi_get_system_info(struct acpi_buffer * out_buffer)
{
struct acpi_system_info *info_ptr;
acpi_status status;
u32 i;
- ACPI_FUNCTION_TRACE("acpi_get_system_info");
+ ACPI_FUNCTION_TRACE(acpi_get_system_info);
/* Parameter validation */
@@ -448,15 +451,15 @@ acpi_status acpi_get_system_info(struct acpi_buffer * out_buffer)
/* Current status of the ACPI tables, per table type */
- info_ptr->num_table_types = NUM_ACPI_TABLE_TYPES;
- for (i = 0; i < NUM_ACPI_TABLE_TYPES; i++) {
+ info_ptr->num_table_types = ACPI_TABLE_ID_MAX + 1;
+ for (i = 0; i < (ACPI_TABLE_ID_MAX + 1); i++) {
info_ptr->table_info[i].count = acpi_gbl_table_lists[i].count;
}
return_ACPI_STATUS(AE_OK);
}
-EXPORT_SYMBOL(acpi_get_system_info);
+ACPI_EXPORT_SYMBOL(acpi_get_system_info)
/*****************************************************************************
*
@@ -472,7 +475,6 @@ EXPORT_SYMBOL(acpi_get_system_info);
* TBD: When a second function is added, must save the Function also.
*
****************************************************************************/
-
acpi_status
acpi_install_initialization_handler(acpi_init_handler handler, u32 function)
{
@@ -489,6 +491,7 @@ acpi_install_initialization_handler(acpi_init_handler handler, u32 function)
return AE_OK;
}
+ACPI_EXPORT_SYMBOL(acpi_install_initialization_handler)
#endif /* ACPI_FUTURE_USAGE */
/*****************************************************************************
@@ -502,10 +505,9 @@ acpi_install_initialization_handler(acpi_init_handler handler, u32 function)
* DESCRIPTION: Empty all caches (delete the cached objects)
*
****************************************************************************/
-
acpi_status acpi_purge_cached_objects(void)
{
- ACPI_FUNCTION_TRACE("acpi_purge_cached_objects");
+ ACPI_FUNCTION_TRACE(acpi_purge_cached_objects);
(void)acpi_os_purge_cache(acpi_gbl_state_cache);
(void)acpi_os_purge_cache(acpi_gbl_operand_cache);
@@ -513,3 +515,5 @@ acpi_status acpi_purge_cached_objects(void)
(void)acpi_os_purge_cache(acpi_gbl_ps_node_ext_cache);
return_ACPI_STATUS(AE_OK);
}
+
+ACPI_EXPORT_SYMBOL(acpi_purge_cached_objects)
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index 6458c47f7ac2..6b516852ac12 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -273,11 +273,13 @@ acpi_evaluate_integer(acpi_handle handle,
status = acpi_evaluate_object(handle, pathname, arguments, &buffer);
if (ACPI_FAILURE(status)) {
acpi_util_eval_error(handle, pathname, status);
+ kfree(element);
return_ACPI_STATUS(status);
}
if (element->type != ACPI_TYPE_INTEGER) {
acpi_util_eval_error(handle, pathname, AE_BAD_DATA);
+ kfree(element);
return_ACPI_STATUS(AE_BAD_DATA);
}
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index bd4887518373..86531ab4ee55 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -323,7 +323,7 @@ acpi_video_device_lcd_query_levels(struct acpi_video_device *device,
if (!ACPI_SUCCESS(status))
return_VALUE(status);
obj = (union acpi_object *)buffer.pointer;
- if (!obj && (obj->type != ACPI_TYPE_PACKAGE)) {
+ if (!obj || (obj->type != ACPI_TYPE_PACKAGE)) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _BCL data\n"));
status = -EFAULT;
goto err;
@@ -1294,7 +1294,7 @@ acpi_video_bus_get_one_device(struct acpi_device *device,
struct acpi_video_bus *video)
{
unsigned long device_id;
- int status, result;
+ int status;
struct acpi_video_device *data;
ACPI_FUNCTION_TRACE("acpi_video_bus_get_one_device");
@@ -1346,8 +1346,11 @@ acpi_video_bus_get_one_device(struct acpi_device *device,
if (ACPI_FAILURE(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Error installing notify handler\n"));
- result = -ENODEV;
- goto end;
+ if(data->brightness)
+ kfree(data->brightness->levels);
+ kfree(data->brightness);
+ kfree(data);
+ return -ENODEV;
}
down(&video->sem);
@@ -1359,7 +1362,6 @@ acpi_video_bus_get_one_device(struct acpi_device *device,
return_VALUE(0);
}
- end:
return_VALUE(-ENOENT);
}
@@ -1643,8 +1645,9 @@ static int acpi_video_bus_put_devices(struct acpi_video_bus *video)
printk(KERN_WARNING PREFIX
"hhuuhhuu bug in acpi video driver.\n");
+ if (data->brightness);
+ kfree(data->brightness->levels);
kfree(data->brightness);
-
kfree(data);
}
@@ -1785,6 +1788,10 @@ static int acpi_video_bus_add(struct acpi_device *device)
if (ACPI_FAILURE(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Error installing notify handler\n"));
+ acpi_video_bus_stop_devices(video);
+ acpi_video_bus_put_devices(video);
+ kfree(video->attached_array);
+ acpi_video_bus_remove_fs(device);
result = -ENODEV;
goto end;
}
@@ -1796,10 +1803,8 @@ static int acpi_video_bus_add(struct acpi_device *device)
video->flags.post ? "yes" : "no");
end:
- if (result) {
- acpi_video_bus_remove_fs(device);
+ if (result)
kfree(video);
- }
return_VALUE(result);
}
diff --git a/drivers/base/core.c b/drivers/base/core.c
index d0f84ff78776..27c2176895de 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -356,6 +356,13 @@ int device_add(struct device *dev)
if (parent)
klist_add_tail(&dev->knode_parent, &parent->klist_children);
+ if (dev->class) {
+ /* tie the class to the device */
+ down(&dev->class->sem);
+ list_add_tail(&dev->node, &dev->class->devices);
+ up(&dev->class->sem);
+ }
+
/* notify platform of device entry */
if (platform_notify)
platform_notify(dev);
@@ -455,6 +462,9 @@ void device_del(struct device * dev)
sysfs_remove_link(&dev->kobj, "device");
sysfs_remove_link(&dev->parent->kobj, class_name);
kfree(class_name);
+ down(&dev->class->sem);
+ list_del_init(&dev->node);
+ up(&dev->class->sem);
}
device_remove_file(dev, &dev->uevent_attr);
@@ -601,11 +611,6 @@ struct device *device_create(struct class *class, struct device *parent,
if (retval)
goto error;
- /* tie the class to the device */
- down(&class->sem);
- list_add_tail(&dev->node, &class->devices);
- up(&class->sem);
-
return dev;
error:
@@ -636,9 +641,7 @@ void device_destroy(struct class *class, dev_t devt)
}
up(&class->sem);
- if (dev) {
- list_del_init(&dev->node);
+ if (dev)
device_unregister(dev);
- }
}
EXPORT_SYMBOL_GPL(device_destroy);
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index ae0949b3394f..93d94749310b 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -402,6 +402,7 @@ config BLK_DEV_RAM_SIZE
config BLK_DEV_INITRD
bool "Initial RAM filesystem and RAM disk (initramfs/initrd) support"
+ depends on BROKEN || !FRV
help
The initial RAM filesystem is a ramfs which is loaded by the
boot loader (loadlin or lilo) and that is mounted as root
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 9c3b94e8f03b..3c74ea729fc7 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -818,7 +818,7 @@ static int loop_set_fd(struct loop_device *lo, struct file *lo_file,
lo->lo_device = bdev;
lo->lo_flags = lo_flags;
lo->lo_backing_file = file;
- lo->transfer = NULL;
+ lo->transfer = transfer_none;
lo->ioctl = NULL;
lo->lo_sizelimit = 0;
lo->old_gfp_mask = mapping_gfp_mask(mapping);
diff --git a/drivers/block/viodasd.c b/drivers/block/viodasd.c
index f63e07bd9f9c..b0df4f5ab97a 100644
--- a/drivers/block/viodasd.c
+++ b/drivers/block/viodasd.c
@@ -747,7 +747,7 @@ static int viodasd_remove(struct vio_dev *vdev)
* support.
*/
static struct vio_device_id viodasd_device_table[] __devinitdata = {
- { "viodasd", "" },
+ { "block", "IBM,iSeries-viodasd" },
{ "", "" }
};
MODULE_DEVICE_TABLE(vio, viodasd_device_table);
diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c
index c0f817ba7adb..af6b3bfd169b 100644
--- a/drivers/cdrom/viocd.c
+++ b/drivers/cdrom/viocd.c
@@ -731,7 +731,7 @@ static int viocd_remove(struct vio_dev *vdev)
* support.
*/
static struct vio_device_id viocd_device_table[] __devinitdata = {
- { "viocd", "" },
+ { "block", "IBM,iSeries-viocd" },
{ "", "" }
};
MODULE_DEVICE_TABLE(vio, viocd_device_table);
diff --git a/drivers/char/agp/hp-agp.c b/drivers/char/agp/hp-agp.c
index 8c4c6ef748ec..907fb66ec4a9 100644
--- a/drivers/char/agp/hp-agp.c
+++ b/drivers/char/agp/hp-agp.c
@@ -497,7 +497,7 @@ zx1_gart_probe (acpi_handle obj, u32 depth, void *context, void **ret)
info = buffer.pointer;
info->hardware_id.value[sizeof(info->hardware_id)-1] = '\0';
match = (strcmp(info->hardware_id.value, "HWP0001") == 0);
- ACPI_MEM_FREE(info);
+ kfree(info);
if (match) {
status = hp_acpi_csr_space(handle, &sba_hpa, &length);
if (ACPI_SUCCESS(status))
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index ef140ebde117..07473cd84121 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -925,11 +925,8 @@ static acpi_status hpet_resources(struct acpi_resource *res, void *data)
status = acpi_resource_to_address64(res, &addr);
if (ACPI_SUCCESS(status)) {
- unsigned long size;
-
- size = addr.maximum - addr.minimum + 1;
hdp->hd_phys_address = addr.minimum;
- hdp->hd_address = ioremap(addr.minimum, size);
+ hdp->hd_address = ioremap(addr.minimum, addr.address_length);
if (hpet_is_known(hdp)) {
printk(KERN_DEBUG "%s: 0x%lx is busy\n",
diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c
index 2b6a56b2bf35..a5c6a9d7ff08 100644
--- a/drivers/char/hvc_console.c
+++ b/drivers/char/hvc_console.c
@@ -553,7 +553,6 @@ static int hvc_chars_in_buffer(struct tty_struct *tty)
#define HVC_POLL_READ 0x00000001
#define HVC_POLL_WRITE 0x00000002
-#define HVC_POLL_QUICK 0x00000004
static int hvc_poll(struct hvc_struct *hp)
{
@@ -568,6 +567,7 @@ static int hvc_poll(struct hvc_struct *hp)
/* Push pending writes */
if (hp->n_outbuf > 0)
hvc_push(hp);
+
/* Reschedule us if still some write pending */
if (hp->n_outbuf > 0)
poll_mask |= HVC_POLL_WRITE;
@@ -680,7 +680,7 @@ int khvcd(void *unused)
poll_mask |= HVC_POLL_READ;
if (hvc_kicked)
continue;
- if (poll_mask & HVC_POLL_QUICK) {
+ if (poll_mask & HVC_POLL_WRITE) {
yield();
continue;
}
diff --git a/drivers/char/hvc_rtas.c b/drivers/char/hvc_rtas.c
index 83364ea63cba..57106e02fd2e 100644
--- a/drivers/char/hvc_rtas.c
+++ b/drivers/char/hvc_rtas.c
@@ -41,37 +41,28 @@
#define hvc_rtas_cookie 0x67781e15
struct hvc_struct *hvc_rtas_dev;
-#define RTASCONS_PUT_ATTEMPTS 16
-
static int rtascons_put_char_token = RTAS_UNKNOWN_SERVICE;
static int rtascons_get_char_token = RTAS_UNKNOWN_SERVICE;
-static int rtascons_put_delay = 100;
-module_param_named(put_delay, rtascons_put_delay, int, 0644);
-static inline int hvc_rtas_write_console(uint32_t vtermno, const char *buf, int count)
+static inline int hvc_rtas_write_console(uint32_t vtermno, const char *buf,
+ int count)
{
- int done;
+ int i;
- /* if there is more than one character to be displayed, wait a bit */
- for (done = 0; done < count; done++) {
- int result;
- result = rtas_call(rtascons_put_char_token, 1, 1, NULL, buf[done]);
- if (result)
+ for (i = 0; i < count; i++) {
+ if (rtas_call(rtascons_put_char_token, 1, 1, NULL, buf[i]))
break;
}
- /* the calling routine expects to receive the number of bytes sent */
- return done;
+
+ return i;
}
static int hvc_rtas_read_console(uint32_t vtermno, char *buf, int count)
{
- int i;
+ int i, c;
for (i = 0; i < count; i++) {
- int c, err;
-
- err = rtas_call(rtascons_get_char_token, 0, 2, &c);
- if (err)
+ if (rtas_call(rtascons_get_char_token, 0, 2, &c))
break;
buf[i] = c;
@@ -106,7 +97,9 @@ static int hvc_rtas_init(void)
hp = hvc_alloc(hvc_rtas_cookie, NO_IRQ, &hvc_rtas_get_put_ops);
if (IS_ERR(hp))
return PTR_ERR(hp);
+
hvc_rtas_dev = hp;
+
return 0;
}
module_init(hvc_rtas_init);
@@ -114,8 +107,8 @@ module_init(hvc_rtas_init);
/* This will tear down the tty portion of the driver */
static void __exit hvc_rtas_exit(void)
{
- /* Really the fun isn't over until the worker thread breaks down and the
- * tty cleans up */
+ /* Really the fun isn't over until the worker thread breaks down and
+ * the tty cleans up */
if (hvc_rtas_dev)
hvc_remove(hvc_rtas_dev);
}
@@ -127,12 +120,14 @@ static int hvc_rtas_console_init(void)
rtascons_put_char_token = rtas_token("put-term-char");
if (rtascons_put_char_token == RTAS_UNKNOWN_SERVICE)
return -EIO;
+
rtascons_get_char_token = rtas_token("get-term-char");
if (rtascons_get_char_token == RTAS_UNKNOWN_SERVICE)
return -EIO;
- hvc_instantiate(hvc_rtas_cookie, 0, &hvc_rtas_get_put_ops );
+ hvc_instantiate(hvc_rtas_cookie, 0, &hvc_rtas_get_put_ops);
add_preferred_console("hvc", 0, NULL);
+
return 0;
}
console_initcall(hvc_rtas_console_init);
diff --git a/drivers/char/hvsi.c b/drivers/char/hvsi.c
index a9522189fc9e..a0370ed752ce 100644
--- a/drivers/char/hvsi.c
+++ b/drivers/char/hvsi.c
@@ -1179,7 +1179,7 @@ static int __init hvsi_init(void)
if (tty_register_driver(hvsi_driver))
panic("Couldn't register hvsi console driver\n");
- printk(KERN_INFO "HVSI: registered %i devices\n", hvsi_count);
+ printk(KERN_DEBUG "HVSI: registered %i devices\n", hvsi_count);
return 0;
}
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index 2d11ddd99e55..8f8867170973 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -212,24 +212,16 @@ static int set_param_str(const char *val, struct kernel_param *kp)
{
action_fn fn = (action_fn) kp->arg;
int rv = 0;
- const char *end;
- char valcp[16];
- int len;
-
- /* Truncate leading and trailing spaces. */
- while (isspace(*val))
- val++;
- end = val + strlen(val) - 1;
- while ((end >= val) && isspace(*end))
- end--;
- len = end - val + 1;
- if (len > sizeof(valcp) - 1)
- return -EINVAL;
- memcpy(valcp, val, len);
- valcp[len] = '\0';
+ char *dup, *s;
+
+ dup = kstrdup(val, GFP_KERNEL);
+ if (!dup)
+ return -ENOMEM;
+
+ s = strstrip(dup);
down_read(&register_sem);
- rv = fn(valcp, NULL);
+ rv = fn(s, NULL);
if (rv)
goto out_unlock;
@@ -239,6 +231,7 @@ static int set_param_str(const char *val, struct kernel_param *kp)
out_unlock:
up_read(&register_sem);
+ kfree(dup);
return rv;
}
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index 07213454c458..0c141c295fb6 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -844,7 +844,7 @@ static int bh_action(MGSLPC_INFO *info)
return rc;
}
-void bh_handler(void* Context)
+static void bh_handler(void* Context)
{
MGSLPC_INFO *info = (MGSLPC_INFO*)Context;
int action;
@@ -888,7 +888,7 @@ void bh_handler(void* Context)
__FILE__,__LINE__,info->device_name);
}
-void bh_transmit(MGSLPC_INFO *info)
+static void bh_transmit(MGSLPC_INFO *info)
{
struct tty_struct *tty = info->tty;
if (debug_level >= DEBUG_LEVEL_BH)
@@ -900,7 +900,7 @@ void bh_transmit(MGSLPC_INFO *info)
}
}
-void bh_status(MGSLPC_INFO *info)
+static void bh_status(MGSLPC_INFO *info)
{
info->ri_chkcount = 0;
info->dsr_chkcount = 0;
@@ -2305,7 +2305,7 @@ static int mgslpc_ioctl(struct tty_struct *tty, struct file * file,
return ioctl_common(info, cmd, arg);
}
-int ioctl_common(MGSLPC_INFO *info, unsigned int cmd, unsigned long arg)
+static int ioctl_common(MGSLPC_INFO *info, unsigned int cmd, unsigned long arg)
{
int error;
struct mgsl_icount cnow; /* kernel counter temps */
@@ -2877,7 +2877,7 @@ done:
return ((count < begin+len-off) ? count : begin+len-off);
}
-int rx_alloc_buffers(MGSLPC_INFO *info)
+static int rx_alloc_buffers(MGSLPC_INFO *info)
{
/* each buffer has header and data */
info->rx_buf_size = sizeof(RXBUF) + info->max_frame_size;
@@ -2900,13 +2900,13 @@ int rx_alloc_buffers(MGSLPC_INFO *info)
return 0;
}
-void rx_free_buffers(MGSLPC_INFO *info)
+static void rx_free_buffers(MGSLPC_INFO *info)
{
kfree(info->rx_buf);
info->rx_buf = NULL;
}
-int claim_resources(MGSLPC_INFO *info)
+static int claim_resources(MGSLPC_INFO *info)
{
if (rx_alloc_buffers(info) < 0 ) {
printk( "Cant allocate rx buffer %s\n", info->device_name);
@@ -2916,7 +2916,7 @@ int claim_resources(MGSLPC_INFO *info)
return 0;
}
-void release_resources(MGSLPC_INFO *info)
+static void release_resources(MGSLPC_INFO *info)
{
if (debug_level >= DEBUG_LEVEL_INFO)
printk("release_resources(%s)\n", info->device_name);
@@ -2928,7 +2928,7 @@ void release_resources(MGSLPC_INFO *info)
*
* Arguments: info pointer to device instance data
*/
-void mgslpc_add_device(MGSLPC_INFO *info)
+static void mgslpc_add_device(MGSLPC_INFO *info)
{
info->next_device = NULL;
info->line = mgslpc_device_count;
@@ -2964,7 +2964,7 @@ void mgslpc_add_device(MGSLPC_INFO *info)
#endif
}
-void mgslpc_remove_device(MGSLPC_INFO *remove_info)
+static void mgslpc_remove_device(MGSLPC_INFO *remove_info)
{
MGSLPC_INFO *info = mgslpc_device_list;
MGSLPC_INFO *last = NULL;
@@ -3257,7 +3257,7 @@ static void loopback_enable(MGSLPC_INFO *info)
write_reg(info, CHA + MODE, val);
}
-void hdlc_mode(MGSLPC_INFO *info)
+static void hdlc_mode(MGSLPC_INFO *info)
{
unsigned char val;
unsigned char clkmode, clksubmode;
@@ -3497,7 +3497,7 @@ void hdlc_mode(MGSLPC_INFO *info)
rx_stop(info);
}
-void rx_stop(MGSLPC_INFO *info)
+static void rx_stop(MGSLPC_INFO *info)
{
if (debug_level >= DEBUG_LEVEL_ISR)
printk("%s(%d):rx_stop(%s)\n",
@@ -3510,7 +3510,7 @@ void rx_stop(MGSLPC_INFO *info)
info->rx_overflow = 0;
}
-void rx_start(MGSLPC_INFO *info)
+static void rx_start(MGSLPC_INFO *info)
{
if (debug_level >= DEBUG_LEVEL_ISR)
printk("%s(%d):rx_start(%s)\n",
@@ -3526,7 +3526,7 @@ void rx_start(MGSLPC_INFO *info)
info->rx_enabled = 1;
}
-void tx_start(MGSLPC_INFO *info)
+static void tx_start(MGSLPC_INFO *info)
{
if (debug_level >= DEBUG_LEVEL_ISR)
printk("%s(%d):tx_start(%s)\n",
@@ -3564,7 +3564,7 @@ void tx_start(MGSLPC_INFO *info)
info->tx_enabled = 1;
}
-void tx_stop(MGSLPC_INFO *info)
+static void tx_stop(MGSLPC_INFO *info)
{
if (debug_level >= DEBUG_LEVEL_ISR)
printk("%s(%d):tx_stop(%s)\n",
@@ -3578,7 +3578,7 @@ void tx_stop(MGSLPC_INFO *info)
/* Reset the adapter to a known state and prepare it for further use.
*/
-void reset_device(MGSLPC_INFO *info)
+static void reset_device(MGSLPC_INFO *info)
{
/* power up both channels (set BIT7) */
write_reg(info, CHA + CCR0, 0x80);
@@ -3628,7 +3628,7 @@ void reset_device(MGSLPC_INFO *info)
write_reg(info, IPC, 0x05);
}
-void async_mode(MGSLPC_INFO *info)
+static void async_mode(MGSLPC_INFO *info)
{
unsigned char val;
@@ -3799,7 +3799,7 @@ void async_mode(MGSLPC_INFO *info)
/* Set the HDLC idle mode for the transmitter.
*/
-void tx_set_idle(MGSLPC_INFO *info)
+static void tx_set_idle(MGSLPC_INFO *info)
{
/* Note: ESCC2 only supports flags and one idle modes */
if (info->idle_mode == HDLC_TXIDLE_FLAGS)
@@ -3810,7 +3810,7 @@ void tx_set_idle(MGSLPC_INFO *info)
/* get state of the V24 status (input) signals.
*/
-void get_signals(MGSLPC_INFO *info)
+static void get_signals(MGSLPC_INFO *info)
{
unsigned char status = 0;
@@ -3832,7 +3832,7 @@ void get_signals(MGSLPC_INFO *info)
/* Set the state of DTR and RTS based on contents of
* serial_signals member of device extension.
*/
-void set_signals(MGSLPC_INFO *info)
+static void set_signals(MGSLPC_INFO *info)
{
unsigned char val;
@@ -3856,7 +3856,7 @@ void set_signals(MGSLPC_INFO *info)
set_reg_bits(info, CHA + PVR, PVR_DTR);
}
-void rx_reset_buffers(MGSLPC_INFO *info)
+static void rx_reset_buffers(MGSLPC_INFO *info)
{
RXBUF *buf;
int i;
@@ -3875,7 +3875,7 @@ void rx_reset_buffers(MGSLPC_INFO *info)
*
* Returns 1 if frame returned, otherwise 0
*/
-int rx_get_frame(MGSLPC_INFO *info)
+static int rx_get_frame(MGSLPC_INFO *info)
{
unsigned short status;
RXBUF *buf;
@@ -3961,7 +3961,7 @@ int rx_get_frame(MGSLPC_INFO *info)
return 1;
}
-BOOLEAN register_test(MGSLPC_INFO *info)
+static BOOLEAN register_test(MGSLPC_INFO *info)
{
static unsigned char patterns[] =
{ 0x00, 0xff, 0xaa, 0x55, 0x69, 0x96, 0x0f };
@@ -3987,7 +3987,7 @@ BOOLEAN register_test(MGSLPC_INFO *info)
return rc;
}
-BOOLEAN irq_test(MGSLPC_INFO *info)
+static BOOLEAN irq_test(MGSLPC_INFO *info)
{
unsigned long end_time;
unsigned long flags;
@@ -4022,7 +4022,7 @@ BOOLEAN irq_test(MGSLPC_INFO *info)
return info->irq_occurred ? TRUE : FALSE;
}
-int adapter_test(MGSLPC_INFO *info)
+static int adapter_test(MGSLPC_INFO *info)
{
if (!register_test(info)) {
info->init_error = DiagStatus_AddressFailure;
@@ -4044,7 +4044,7 @@ int adapter_test(MGSLPC_INFO *info)
return 0;
}
-void trace_block(MGSLPC_INFO *info,const char* data, int count, int xmit)
+static void trace_block(MGSLPC_INFO *info,const char* data, int count, int xmit)
{
int i;
int linecount;
@@ -4079,7 +4079,7 @@ void trace_block(MGSLPC_INFO *info,const char* data, int count, int xmit)
/* HDLC frame time out
* update stats and do tx completion processing
*/
-void tx_timeout(unsigned long context)
+static void tx_timeout(unsigned long context)
{
MGSLPC_INFO *info = (MGSLPC_INFO*)context;
unsigned long flags;
diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
index a90f5d97df35..43dfd8689dce 100644
--- a/drivers/char/sonypi.c
+++ b/drivers/char/sonypi.c
@@ -512,7 +512,7 @@ static struct sonypi_device {
#ifdef CONFIG_ACPI
static struct acpi_device *sonypi_acpi_device;
-static int acpi_enabled;
+static int acpi_driver_registered;
#endif
static int sonypi_ec_write(u8 addr, u8 value)
@@ -869,7 +869,7 @@ found:
sonypi_report_input_event(event);
#ifdef CONFIG_ACPI
- if (acpi_enabled)
+ if (sonypi_acpi_device)
acpi_bus_generate_event(sonypi_acpi_device, 1, event);
#endif
@@ -1551,8 +1551,8 @@ static int __init sonypi_init(void)
goto err_free_device;
#ifdef CONFIG_ACPI
- if (acpi_bus_register_driver(&sonypi_acpi_driver) > 0)
- acpi_enabled = 1;
+ if (acpi_bus_register_driver(&sonypi_acpi_driver) >= 0)
+ acpi_driver_registered = 1;
#endif
return 0;
@@ -1567,7 +1567,7 @@ static int __init sonypi_init(void)
static void __exit sonypi_exit(void)
{
#ifdef CONFIG_ACPI
- if (acpi_enabled)
+ if (acpi_driver_registered)
acpi_bus_unregister_driver(&sonypi_acpi_driver);
#endif
platform_device_unregister(sonypi_platform_device);
diff --git a/drivers/char/viotape.c b/drivers/char/viotape.c
index 60aabdb4a046..11c7e9de5958 100644
--- a/drivers/char/viotape.c
+++ b/drivers/char/viotape.c
@@ -989,7 +989,7 @@ static int viotape_remove(struct vio_dev *vdev)
* support.
*/
static struct vio_device_id viotape_device_table[] __devinitdata = {
- { "viotape", "" },
+ { "byte", "IBM,iSeries-viotape" },
{ "", "" }
};
MODULE_DEVICE_TABLE(vio, viotape_device_table);
diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
index 4b4d7db1ff7b..498aa37bca22 100644
--- a/drivers/connector/cn_proc.c
+++ b/drivers/connector/cn_proc.c
@@ -26,6 +26,7 @@
#include <linux/kernel.h>
#include <linux/ktime.h>
#include <linux/init.h>
+#include <linux/connector.h>
#include <asm/atomic.h>
#include <linux/cn_proc.h>
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index 79d581c86520..b49bacfd8de8 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -121,6 +121,7 @@ nlmsg_failure:
kfree_skb(skb);
return -EINVAL;
}
+EXPORT_SYMBOL_GPL(cn_netlink_send);
/*
* Callback helper - queues work and setup destructor for given data.
@@ -319,6 +320,7 @@ int cn_add_callback(struct cb_id *id, char *name, void (*callback)(void *))
return 0;
}
+EXPORT_SYMBOL_GPL(cn_add_callback);
/*
* Callback remove routing - removes callback
@@ -335,6 +337,7 @@ void cn_del_callback(struct cb_id *id)
cn_queue_del_callback(dev->cbdev, id);
cn_notify(id, 1);
}
+EXPORT_SYMBOL_GPL(cn_del_callback);
/*
* Checks two connector's control messages to be the same.
@@ -488,7 +491,3 @@ static void __devexit cn_fini(void)
subsys_initcall(cn_init);
module_exit(cn_fini);
-
-EXPORT_SYMBOL_GPL(cn_add_callback);
-EXPORT_SYMBOL_GPL(cn_del_callback);
-EXPORT_SYMBOL_GPL(cn_netlink_send);
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index e07a35487bde..b3ebc8f01975 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -22,6 +22,7 @@
#include <linux/types.h>
#include <linux/fs.h>
#include <linux/sysfs.h>
+#include <linux/cpu.h>
#include <linux/sched.h>
#include <linux/kmod.h>
#include <linux/workqueue.h>
@@ -72,6 +73,14 @@ static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
static unsigned int dbs_enable; /* number of CPUs using this policy */
+/*
+ * DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug
+ * lock and dbs_mutex. cpu_hotplug lock should always be held before
+ * dbs_mutex. If any function that can potentially take cpu_hotplug lock
+ * (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then
+ * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock
+ * is recursive for the same process. -Venki
+ */
static DEFINE_MUTEX (dbs_mutex);
static DECLARE_WORK (dbs_work, do_dbs_timer, NULL);
@@ -414,12 +423,14 @@ static void dbs_check_cpu(int cpu)
static void do_dbs_timer(void *data)
{
int i;
+ lock_cpu_hotplug();
mutex_lock(&dbs_mutex);
for_each_online_cpu(i)
dbs_check_cpu(i);
schedule_delayed_work(&dbs_work,
usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
mutex_unlock(&dbs_mutex);
+ unlock_cpu_hotplug();
}
static inline void dbs_timer_init(void)
@@ -514,6 +525,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
break;
case CPUFREQ_GOV_LIMITS:
+ lock_cpu_hotplug();
mutex_lock(&dbs_mutex);
if (policy->max < this_dbs_info->cur_policy->cur)
__cpufreq_driver_target(
@@ -524,6 +536,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
this_dbs_info->cur_policy,
policy->min, CPUFREQ_RELATION_L);
mutex_unlock(&dbs_mutex);
+ unlock_cpu_hotplug();
break;
}
return 0;
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 3e6ffcaa5af4..693e540481b4 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -21,6 +21,7 @@
#include <linux/types.h>
#include <linux/fs.h>
#include <linux/sysfs.h>
+#include <linux/cpu.h>
#include <linux/sched.h>
#include <linux/kmod.h>
#include <linux/workqueue.h>
@@ -71,6 +72,14 @@ static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
static unsigned int dbs_enable; /* number of CPUs using this policy */
+/*
+ * DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug
+ * lock and dbs_mutex. cpu_hotplug lock should always be held before
+ * dbs_mutex. If any function that can potentially take cpu_hotplug lock
+ * (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then
+ * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock
+ * is recursive for the same process. -Venki
+ */
static DEFINE_MUTEX (dbs_mutex);
static DECLARE_WORK (dbs_work, do_dbs_timer, NULL);
@@ -363,12 +372,14 @@ static void dbs_check_cpu(int cpu)
static void do_dbs_timer(void *data)
{
int i;
+ lock_cpu_hotplug();
mutex_lock(&dbs_mutex);
for_each_online_cpu(i)
dbs_check_cpu(i);
queue_delayed_work(dbs_workq, &dbs_work,
usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
mutex_unlock(&dbs_mutex);
+ unlock_cpu_hotplug();
}
static inline void dbs_timer_init(void)
@@ -469,6 +480,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
break;
case CPUFREQ_GOV_LIMITS:
+ lock_cpu_hotplug();
mutex_lock(&dbs_mutex);
if (policy->max < this_dbs_info->cur_policy->cur)
__cpufreq_driver_target(
@@ -479,6 +491,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
this_dbs_info->cur_policy,
policy->min, CPUFREQ_RELATION_L);
mutex_unlock(&dbs_mutex);
+ unlock_cpu_hotplug();
break;
}
return 0;
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index f7af7e9bb7d9..884320e70403 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -255,12 +255,12 @@ config I2C_POWERMAC
will be called i2c-powermac.
config I2C_MPC
- tristate "MPC107/824x/85xx/52xx"
+ tristate "MPC107/824x/85xx/52xx/86xx"
depends on I2C && PPC32
help
If you say yes to this option, support will be included for the
built-in I2C interface on the MPC107/Tsi107/MPC8240/MPC8245 and
- MPC85xx family processors. The driver may also work on 52xx
+ MPC85xx/MPC8641 family processors. The driver may also work on 52xx
family processors, though interrupts are known not to work.
This driver can also be built as a module. If so, the module
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index b4a41d6d0714..6de3cd3d6e8e 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -1451,9 +1451,12 @@ static ide_startstop_t cdrom_pc_intr (ide_drive_t *drive)
} else {
confused:
printk (KERN_ERR "%s: cdrom_pc_intr: The drive "
- "appears confused (ireason = 0x%02x)\n",
+ "appears confused (ireason = 0x%02x). "
+ "Trying to recover by ending request.\n",
drive->name, ireason);
rq->flags |= REQ_FAILED;
+ cdrom_end_request(drive, 0);
+ return ide_stopped;
}
/* Now we wait for another interrupt. */
@@ -1722,8 +1725,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
}
}
- if (HWGROUP(drive)->handler != NULL)
- BUG();
+ BUG_ON(HWGROUP(drive)->handler != NULL);
ide_set_handler(drive, cdrom_newpc_intr, rq->timeout, NULL);
return ide_started;
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c
index c481be8b807f..783a2475ee8b 100644
--- a/drivers/ide/ide-dma.c
+++ b/drivers/ide/ide-dma.c
@@ -206,8 +206,7 @@ int ide_build_sglist(ide_drive_t *drive, struct request *rq)
ide_hwif_t *hwif = HWIF(drive);
struct scatterlist *sg = hwif->sg_table;
- if ((rq->flags & REQ_DRIVE_TASKFILE) && rq->nr_sectors > 256)
- BUG();
+ BUG_ON((rq->flags & REQ_DRIVE_TASKFILE) && rq->nr_sectors > 256);
ide_map_sg(drive, rq);
@@ -947,8 +946,7 @@ void ide_setup_dma (ide_hwif_t *hwif, unsigned long dma_base, unsigned int num_p
}
printk("\n");
- if (!(hwif->dma_master))
- BUG();
+ BUG_ON(!hwif->dma_master);
}
EXPORT_SYMBOL_GPL(ide_setup_dma);
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
index a53e3ce4a142..a1179e924962 100644
--- a/drivers/ide/ide-floppy.c
+++ b/drivers/ide/ide-floppy.c
@@ -898,8 +898,7 @@ static ide_startstop_t idefloppy_pc_intr (ide_drive_t *drive)
"to send us more data than expected "
"- discarding data\n");
idefloppy_discard_data(drive,bcount.all);
- if (HWGROUP(drive)->handler != NULL)
- BUG();
+ BUG_ON(HWGROUP(drive)->handler != NULL);
ide_set_handler(drive,
&idefloppy_pc_intr,
IDEFLOPPY_WAIT_CMD,
@@ -932,8 +931,7 @@ static ide_startstop_t idefloppy_pc_intr (ide_drive_t *drive)
pc->actually_transferred += bcount.all;
pc->current_position += bcount.all;
- if (HWGROUP(drive)->handler != NULL)
- BUG();
+ BUG_ON(HWGROUP(drive)->handler != NULL);
ide_set_handler(drive, &idefloppy_pc_intr, IDEFLOPPY_WAIT_CMD, NULL); /* And set the interrupt handler again */
return ide_started;
}
@@ -960,8 +958,7 @@ static ide_startstop_t idefloppy_transfer_pc (ide_drive_t *drive)
"issuing a packet command\n");
return ide_do_reset(drive);
}
- if (HWGROUP(drive)->handler != NULL)
- BUG();
+ BUG_ON(HWGROUP(drive)->handler != NULL);
/* Set the interrupt routine */
ide_set_handler(drive, &idefloppy_pc_intr, IDEFLOPPY_WAIT_CMD, NULL);
/* Send the actual packet */
@@ -1017,8 +1014,7 @@ static ide_startstop_t idefloppy_transfer_pc1 (ide_drive_t *drive)
* 40 and 50msec work well. idefloppy_pc_intr will not be actually
* used until after the packet is moved in about 50 msec.
*/
- if (HWGROUP(drive)->handler != NULL)
- BUG();
+ BUG_ON(HWGROUP(drive)->handler != NULL);
ide_set_handler(drive,
&idefloppy_pc_intr, /* service routine for packet command */
floppy->ticks, /* wait this long before "failing" */
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index c01615dec202..4f2f138de2ca 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -142,38 +142,41 @@ enum {
static void ide_complete_power_step(ide_drive_t *drive, struct request *rq, u8 stat, u8 error)
{
+ struct request_pm_state *pm = rq->end_io_data;
+
if (drive->media != ide_disk)
return;
- switch (rq->pm->pm_step) {
+ switch (pm->pm_step) {
case ide_pm_flush_cache: /* Suspend step 1 (flush cache) complete */
- if (rq->pm->pm_state == PM_EVENT_FREEZE)
- rq->pm->pm_step = ide_pm_state_completed;
+ if (pm->pm_state == PM_EVENT_FREEZE)
+ pm->pm_step = ide_pm_state_completed;
else
- rq->pm->pm_step = idedisk_pm_standby;
+ pm->pm_step = idedisk_pm_standby;
break;
case idedisk_pm_standby: /* Suspend step 2 (standby) complete */
- rq->pm->pm_step = ide_pm_state_completed;
+ pm->pm_step = ide_pm_state_completed;
break;
case idedisk_pm_idle: /* Resume step 1 (idle) complete */
- rq->pm->pm_step = ide_pm_restore_dma;
+ pm->pm_step = ide_pm_restore_dma;
break;
}
}
static ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *rq)
{
+ struct request_pm_state *pm = rq->end_io_data;
ide_task_t *args = rq->special;
memset(args, 0, sizeof(*args));
if (drive->media != ide_disk) {
/* skip idedisk_pm_idle for ATAPI devices */
- if (rq->pm->pm_step == idedisk_pm_idle)
- rq->pm->pm_step = ide_pm_restore_dma;
+ if (pm->pm_step == idedisk_pm_idle)
+ pm->pm_step = ide_pm_restore_dma;
}
- switch (rq->pm->pm_step) {
+ switch (pm->pm_step) {
case ide_pm_flush_cache: /* Suspend step 1 (flush cache) */
if (drive->media != ide_disk)
break;
@@ -215,7 +218,7 @@ static ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *
drive->hwif->ide_dma_check(drive);
break;
}
- rq->pm->pm_step = ide_pm_state_completed;
+ pm->pm_step = ide_pm_state_completed;
return ide_stopped;
}
@@ -362,12 +365,13 @@ void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err)
}
}
} else if (blk_pm_request(rq)) {
+ struct request_pm_state *pm = rq->end_io_data;
#ifdef DEBUG_PM
printk("%s: complete_power_step(step: %d, stat: %x, err: %x)\n",
drive->name, rq->pm->pm_step, stat, err);
#endif
ide_complete_power_step(drive, rq, stat, err);
- if (rq->pm->pm_step == ide_pm_state_completed)
+ if (pm->pm_step == ide_pm_state_completed)
ide_complete_pm_request(drive, rq);
return;
}
@@ -871,6 +875,39 @@ done:
return ide_stopped;
}
+static void ide_check_pm_state(ide_drive_t *drive, struct request *rq)
+{
+ struct request_pm_state *pm = rq->end_io_data;
+
+ if (blk_pm_suspend_request(rq) &&
+ pm->pm_step == ide_pm_state_start_suspend)
+ /* Mark drive blocked when starting the suspend sequence. */
+ drive->blocked = 1;
+ else if (blk_pm_resume_request(rq) &&
+ pm->pm_step == ide_pm_state_start_resume) {
+ /*
+ * The first thing we do on wakeup is to wait for BSY bit to
+ * go away (with a looong timeout) as a drive on this hwif may
+ * just be POSTing itself.
+ * We do that before even selecting as the "other" device on
+ * the bus may be broken enough to walk on our toes at this
+ * point.
+ */
+ int rc;
+#ifdef DEBUG_PM
+ printk("%s: Wakeup request inited, waiting for !BSY...\n", drive->name);
+#endif
+ rc = ide_wait_not_busy(HWIF(drive), 35000);
+ if (rc)
+ printk(KERN_WARNING "%s: bus not ready on wakeup\n", drive->name);
+ SELECT_DRIVE(drive);
+ HWIF(drive)->OUTB(8, HWIF(drive)->io_ports[IDE_CONTROL_OFFSET]);
+ rc = ide_wait_not_busy(HWIF(drive), 10000);
+ if (rc)
+ printk(KERN_WARNING "%s: drive not ready on wakeup\n", drive->name);
+ }
+}
+
/**
* start_request - start of I/O and command issuing for IDE
*
@@ -909,33 +946,8 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
if (block == 0 && drive->remap_0_to_1 == 1)
block = 1; /* redirect MBR access to EZ-Drive partn table */
- if (blk_pm_suspend_request(rq) &&
- rq->pm->pm_step == ide_pm_state_start_suspend)
- /* Mark drive blocked when starting the suspend sequence. */
- drive->blocked = 1;
- else if (blk_pm_resume_request(rq) &&
- rq->pm->pm_step == ide_pm_state_start_resume) {
- /*
- * The first thing we do on wakeup is to wait for BSY bit to
- * go away (with a looong timeout) as a drive on this hwif may
- * just be POSTing itself.
- * We do that before even selecting as the "other" device on
- * the bus may be broken enough to walk on our toes at this
- * point.
- */
- int rc;
-#ifdef DEBUG_PM
- printk("%s: Wakeup request inited, waiting for !BSY...\n", drive->name);
-#endif
- rc = ide_wait_not_busy(HWIF(drive), 35000);
- if (rc)
- printk(KERN_WARNING "%s: bus not ready on wakeup\n", drive->name);
- SELECT_DRIVE(drive);
- HWIF(drive)->OUTB(8, HWIF(drive)->io_ports[IDE_CONTROL_OFFSET]);
- rc = ide_wait_not_busy(HWIF(drive), 10000);
- if (rc)
- printk(KERN_WARNING "%s: drive not ready on wakeup\n", drive->name);
- }
+ if (blk_pm_request(rq))
+ ide_check_pm_state(drive, rq);
SELECT_DRIVE(drive);
if (ide_wait_stat(&startstop, drive, drive->ready_stat, BUSY_STAT|DRQ_STAT, WAIT_READY)) {
@@ -950,13 +962,14 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
else if (rq->flags & REQ_DRIVE_TASKFILE)
return execute_drive_cmd(drive, rq);
else if (blk_pm_request(rq)) {
+ struct request_pm_state *pm = rq->end_io_data;
#ifdef DEBUG_PM
printk("%s: start_power_step(step: %d)\n",
drive->name, rq->pm->pm_step);
#endif
startstop = ide_start_power_step(drive, rq);
if (startstop == ide_stopped &&
- rq->pm->pm_step == ide_pm_state_completed)
+ pm->pm_step == ide_pm_state_completed)
ide_complete_pm_request(drive, rq);
return startstop;
}
diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c
index b72dde70840a..97a49e77a8f1 100644
--- a/drivers/ide/ide-iops.c
+++ b/drivers/ide/ide-iops.c
@@ -939,8 +939,7 @@ void ide_execute_command(ide_drive_t *drive, task_ioreg_t cmd, ide_handler_t *ha
spin_lock_irqsave(&ide_lock, flags);
- if(hwgroup->handler)
- BUG();
+ BUG_ON(hwgroup->handler);
hwgroup->handler = handler;
hwgroup->expiry = expiry;
hwgroup->timer.expires = jiffies + timeout;
@@ -981,8 +980,7 @@ static ide_startstop_t atapi_reset_pollfunc (ide_drive_t *drive)
printk("%s: ATAPI reset complete\n", drive->name);
} else {
if (time_before(jiffies, hwgroup->poll_timeout)) {
- if (HWGROUP(drive)->handler != NULL)
- BUG();
+ BUG_ON(HWGROUP(drive)->handler != NULL);
ide_set_handler(drive, &atapi_reset_pollfunc, HZ/20, NULL);
/* continue polling */
return ide_started;
@@ -1021,8 +1019,7 @@ static ide_startstop_t reset_pollfunc (ide_drive_t *drive)
if (!OK_STAT(tmp = hwif->INB(IDE_STATUS_REG), 0, BUSY_STAT)) {
if (time_before(jiffies, hwgroup->poll_timeout)) {
- if (HWGROUP(drive)->handler != NULL)
- BUG();
+ BUG_ON(HWGROUP(drive)->handler != NULL);
ide_set_handler(drive, &reset_pollfunc, HZ/20, NULL);
/* continue polling */
return ide_started;
@@ -1138,8 +1135,7 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
hwgroup = HWGROUP(drive);
/* We must not reset with running handlers */
- if(hwgroup->handler != NULL)
- BUG();
+ BUG_ON(hwgroup->handler != NULL);
/* For an ATAPI device, first try an ATAPI SRST. */
if (drive->media != ide_disk && !do_not_try_atapi) {
diff --git a/drivers/ide/ide-lib.c b/drivers/ide/ide-lib.c
index 41d46dbe6c24..16a143133f93 100644
--- a/drivers/ide/ide-lib.c
+++ b/drivers/ide/ide-lib.c
@@ -164,8 +164,7 @@ u8 ide_rate_filter (u8 mode, u8 speed)
// printk("%s: mode 0x%02x, speed 0x%02x\n", __FUNCTION__, mode, speed);
/* So that we remember to update this if new modes appear */
- if (mode > 4)
- BUG();
+ BUG_ON(mode > 4);
return min(speed, speed_max[mode]);
#else /* !CONFIG_BLK_DEV_IDEDMA */
return min(speed, (u8)XFER_PIO_4);
diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c
index 9233b8109a0f..a839b2a8f6f4 100644
--- a/drivers/ide/ide-taskfile.c
+++ b/drivers/ide/ide-taskfile.c
@@ -196,8 +196,7 @@ ide_startstop_t set_geometry_intr (ide_drive_t *drive)
if (stat & (ERR_STAT|DRQ_STAT))
return ide_error(drive, "set_geometry_intr", stat);
- if (HWGROUP(drive)->handler != NULL)
- BUG();
+ BUG_ON(HWGROUP(drive)->handler != NULL);
ide_set_handler(drive, &set_geometry_intr, WAIT_WORSTCASE, NULL);
return ide_started;
}
diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c
index 3fdab563fec2..59fe358048b3 100644
--- a/drivers/ide/ide.c
+++ b/drivers/ide/ide.c
@@ -726,6 +726,7 @@ void ide_setup_ports ( hw_regs_t *hw,
{
int i;
+ memset(hw, 0, sizeof(hw_regs_t));
for (i = 0; i < IDE_NR_PORTS; i++) {
if (offsets[i] == -1) {
switch(i) {
@@ -1225,7 +1226,7 @@ static int generic_ide_suspend(struct device *dev, pm_message_t state)
memset(&args, 0, sizeof(args));
rq.flags = REQ_PM_SUSPEND;
rq.special = &args;
- rq.pm = &rqpm;
+ rq.end_io_data = &rqpm;
rqpm.pm_step = ide_pm_state_start_suspend;
rqpm.pm_state = state.event;
@@ -1244,7 +1245,7 @@ static int generic_ide_resume(struct device *dev)
memset(&args, 0, sizeof(args));
rq.flags = REQ_PM_RESUME;
rq.special = &args;
- rq.pm = &rqpm;
+ rq.end_io_data = &rqpm;
rqpm.pm_step = ide_pm_state_start_resume;
rqpm.pm_state = PM_EVENT_ON;
@@ -1366,8 +1367,7 @@ int generic_ide_ioctl(ide_drive_t *drive, struct file *file, struct block_device
ide_abort(drive, "drive reset");
- if(HWGROUP(drive)->handler)
- BUG();
+ BUG_ON(HWGROUP(drive)->handler);
/* Ensure nothing gets queued after we
drop the lock. Reset will clear the busy */
diff --git a/drivers/ide/legacy/q40ide.c b/drivers/ide/legacy/q40ide.c
index 2a78b792f7fb..434a94faa3b7 100644
--- a/drivers/ide/legacy/q40ide.c
+++ b/drivers/ide/legacy/q40ide.c
@@ -80,6 +80,7 @@ void q40_ide_setup_ports ( hw_regs_t *hw,
{
int i;
+ memset(hw, 0, sizeof(hw_regs_t));
for (i = 0; i < IDE_NR_PORTS; i++) {
/* BIG FAT WARNING:
assumption: only DATA port is ever used in 16 bit mode */
diff --git a/drivers/ide/pci/sgiioc4.c b/drivers/ide/pci/sgiioc4.c
index 27c9eb989a9a..e125032bb403 100644
--- a/drivers/ide/pci/sgiioc4.c
+++ b/drivers/ide/pci/sgiioc4.c
@@ -723,6 +723,12 @@ static ide_pci_device_t sgiioc4_chipsets[] __devinitdata = {
int
ioc4_ide_attach_one(struct ioc4_driver_data *idd)
{
+ /* PCI-RT does not bring out IDE connection.
+ * Do not attach to this particular IOC4.
+ */
+ if (idd->idd_variant == IOC4_VARIANT_PCI_RT)
+ return 0;
+
return pci_init_sgiioc4(idd->idd_pdev,
&sgiioc4_chipsets[idd->idd_pci_id->driver_data]);
}
diff --git a/drivers/ide/pci/trm290.c b/drivers/ide/pci/trm290.c
index c26c8ca90dd4..fe80295974e1 100644
--- a/drivers/ide/pci/trm290.c
+++ b/drivers/ide/pci/trm290.c
@@ -183,8 +183,7 @@ static void trm290_ide_dma_exec_cmd(ide_drive_t *drive, u8 command)
{
ide_hwif_t *hwif = HWIF(drive);
- if (HWGROUP(drive)->handler != NULL) /* paranoia check */
- BUG();
+ BUG_ON(HWGROUP(drive)->handler != NULL); /* paranoia check */
ide_set_handler(drive, &ide_dma_intr, WAIT_CMD, NULL);
/* issue cmd to drive */
hwif->OUTB(command, IDE_COMMAND_REG);
diff --git a/drivers/ieee1394/Kconfig b/drivers/ieee1394/Kconfig
index 186737539cf5..79b81be67975 100644
--- a/drivers/ieee1394/Kconfig
+++ b/drivers/ieee1394/Kconfig
@@ -4,7 +4,7 @@ menu "IEEE 1394 (FireWire) support"
config IEEE1394
tristate "IEEE 1394 (FireWire) support"
- depends on PCI || BROKEN
+ depends on (PCI || BROKEN) && (BROKEN || !FRV)
select NET
help
IEEE 1394 describes a high performance serial bus, which is also
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 5ec2d49e9bb6..e57d3c50f75f 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -821,11 +821,12 @@ static void ib_uverbs_remove_one(struct ib_device *device)
kref_put(&uverbs_dev->ref, ib_uverbs_release_dev);
}
-static struct super_block *uverbs_event_get_sb(struct file_system_type *fs_type, int flags,
- const char *dev_name, void *data)
+static int uverbs_event_get_sb(struct file_system_type *fs_type, int flags,
+ const char *dev_name, void *data,
+ struct vfsmount *mnt)
{
return get_sb_pseudo(fs_type, "infinibandevent:", NULL,
- INFINIBANDEVENTFS_MAGIC);
+ INFINIBANDEVENTFS_MAGIC, mnt);
}
static struct file_system_type uverbs_event_fs = {
diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c
index e274120567e1..63de3046aff3 100644
--- a/drivers/infiniband/hw/ipath/ipath_fs.c
+++ b/drivers/infiniband/hw/ipath/ipath_fs.c
@@ -542,13 +542,14 @@ bail:
return ret;
}
-static struct super_block *ipathfs_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name,
- void *data)
+static int ipathfs_get_sb(struct file_system_type *fs_type, int flags,
+ const char *dev_name, void *data, struct vfsmount *mnt)
{
- ipath_super = get_sb_single(fs_type, flags, data,
- ipathfs_fill_super);
- return ipath_super;
+ int ret = get_sb_single(fs_type, flags, data,
+ ipathfs_fill_super, mnt);
+ if (ret >= 0)
+ ipath_super = mnt->mnt_sb;
+ return ret;
}
static void ipathfs_kill_super(struct super_block *s)
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index ba325f16d077..5f561fce32d8 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -82,7 +82,7 @@ static int evdev_fasync(int fd, struct file *file, int on)
return retval < 0 ? retval : 0;
}
-static int evdev_flush(struct file * file)
+static int evdev_flush(struct file * file, fl_owner_t id)
{
struct evdev_list *list = file->private_data;
if (!list->evdev->exist) return -ENODEV;
diff --git a/drivers/input/keyboard/amikbd.c b/drivers/input/keyboard/amikbd.c
index 4c8fb1f8631f..f1f9db9d282c 100644
--- a/drivers/input/keyboard/amikbd.c
+++ b/drivers/input/keyboard/amikbd.c
@@ -36,6 +36,7 @@
#include <linux/input.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include <linux/keyboard.h>
#include <asm/amigaints.h>
#include <asm/amigahw.h>
@@ -45,7 +46,7 @@ MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
MODULE_DESCRIPTION("Amiga keyboard driver");
MODULE_LICENSE("GPL");
-static unsigned char amikbd_keycode[0x78] = {
+static unsigned char amikbd_keycode[0x78] __initdata = {
[0] = KEY_GRAVE,
[1] = KEY_1,
[2] = KEY_2,
@@ -170,12 +171,9 @@ static irqreturn_t amikbd_interrupt(int irq, void *dummy, struct pt_regs *fp)
scancode >>= 1;
if (scancode < 0x78) { /* scancodes < 0x78 are keys */
-
- scancode = amikbd_keycode[scancode];
-
input_regs(amikbd_dev, fp);
- if (scancode == KEY_CAPSLOCK) { /* CapsLock is a toggle switch key on Amiga */
+ if (scancode == 98) { /* CapsLock is a toggle switch key on Amiga */
input_report_key(amikbd_dev, scancode, 1);
input_report_key(amikbd_dev, scancode, 0);
} else {
@@ -191,7 +189,7 @@ static irqreturn_t amikbd_interrupt(int irq, void *dummy, struct pt_regs *fp)
static int __init amikbd_init(void)
{
- int i;
+ int i, j;
if (!AMIGAHW_PRESENT(AMI_KEYBOARD))
return -EIO;
@@ -214,14 +212,26 @@ static int __init amikbd_init(void)
amikbd_dev->id.version = 0x0100;
amikbd_dev->evbit[0] = BIT(EV_KEY) | BIT(EV_REP);
- amikbd_dev->keycode = amikbd_keycode;
- amikbd_dev->keycodesize = sizeof(unsigned char);
- amikbd_dev->keycodemax = ARRAY_SIZE(amikbd_keycode);
for (i = 0; i < 0x78; i++)
- if (amikbd_keycode[i])
- set_bit(amikbd_keycode[i], amikbd_dev->keybit);
-
+ set_bit(i, amikbd_dev->keybit);
+
+ for (i = 0; i < MAX_NR_KEYMAPS; i++) {
+ static u_short temp_map[NR_KEYS] __initdata;
+ if (!key_maps[i])
+ continue;
+ memset(temp_map, 0, sizeof(temp_map));
+ for (j = 0; j < 0x78; j++) {
+ if (!amikbd_keycode[j])
+ continue;
+ temp_map[j] = key_maps[i][amikbd_keycode[j]];
+ }
+ for (j = 0; j < NR_KEYS; j++) {
+ if (!temp_map[j])
+ temp_map[j] = 0xf200;
+ }
+ memcpy(key_maps[i], temp_map, sizeof(temp_map));
+ }
ciaa.cra &= ~0x41; /* serial data in, turn off TA */
request_irq(IRQ_AMIGA_CIAA_SP, amikbd_interrupt, 0, "amikbd", amikbd_interrupt);
diff --git a/drivers/isdn/capi/capifs.c b/drivers/isdn/capi/capifs.c
index 0a37aded4b54..9ea6bd0ddc35 100644
--- a/drivers/isdn/capi/capifs.c
+++ b/drivers/isdn/capi/capifs.c
@@ -121,10 +121,10 @@ fail:
return -ENOMEM;
}
-static struct super_block *capifs_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int capifs_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_single(fs_type, flags, data, capifs_fill_super);
+ return get_sb_single(fs_type, flags, data, capifs_fill_super, mnt);
}
static struct file_system_type capifs_fs_type = {
diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
index 22759c01746a..81accdf35168 100644
--- a/drivers/isdn/i4l/isdn_common.c
+++ b/drivers/isdn/i4l/isdn_common.c
@@ -1177,9 +1177,8 @@ isdn_write(struct file *file, const char __user *buf, size_t count, loff_t * off
goto out;
}
chidx = isdn_minor2chan(minor);
- while (isdn_writebuf_stub(drvidx, chidx, buf, count) != count)
+ while ((retval = isdn_writebuf_stub(drvidx, chidx, buf, count)) == 0)
interruptible_sleep_on(&dev->drv[drvidx]->snd_waitq[chidx]);
- retval = count;
goto out;
}
if (minor <= ISDN_MINOR_CTRLMAX) {
@@ -1951,9 +1950,10 @@ isdn_writebuf_stub(int drvidx, int chan, const u_char __user * buf, int len)
struct sk_buff *skb = alloc_skb(hl + len, GFP_ATOMIC);
if (!skb)
- return 0;
+ return -ENOMEM;
skb_reserve(skb, hl);
- copy_from_user(skb_put(skb, len), buf, len);
+ if (copy_from_user(skb_put(skb, len), buf, len))
+ return -EFAULT;
ret = dev->drv[drvidx]->interface->writebuf_skb(drvidx, chan, 1, skb);
if (ret <= 0)
dev_kfree_skb(skb);
diff --git a/drivers/isdn/sc/ioctl.c b/drivers/isdn/sc/ioctl.c
index f4f71226a078..57c4ab96d136 100644
--- a/drivers/isdn/sc/ioctl.c
+++ b/drivers/isdn/sc/ioctl.c
@@ -97,6 +97,7 @@ int sc_ioctl(int card, scs_ioctl *data)
case SCIOCSTART:
{
+ kfree(rcvmsg);
pr_debug("%s: SCIOSTART: ioctl received\n",
sc_adapter[card]->devicename);
if(sc_adapter[card]->EngineUp) {
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 626506234b76..f573d5af0b1f 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -63,6 +63,12 @@ config LEDS_S3C24XX
This option enables support for LEDs connected to GPIO lines
on Samsung S3C24XX series CPUs, such as the S3C2410 and S3C2440.
+config LEDS_AMS_DELTA
+ tristate "LED Support for the Amstrad Delta (E3)"
+ depends LEDS_CLASS && MACH_AMS_DELTA
+ help
+ This option enables support for the LEDs on Amstrad Delta (E3).
+
comment "LED Triggers"
config LEDS_TRIGGERS
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 40f042633bf5..dcea1001faa4 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_LEDS_SPITZ) += leds-spitz.o
obj-$(CONFIG_LEDS_IXP4XX) += leds-ixp4xx-gpio.o
obj-$(CONFIG_LEDS_TOSA) += leds-tosa.o
obj-$(CONFIG_LEDS_S3C24XX) += leds-s3c24xx.o
+obj-$(CONFIG_LEDS_AMS_DELTA) += leds-ams-delta.o
# LED Triggers
obj-$(CONFIG_LEDS_TRIGGER_TIMER) += ledtrig-timer.o
diff --git a/drivers/leds/leds-ams-delta.c b/drivers/leds/leds-ams-delta.c
new file mode 100644
index 000000000000..e9f06116c4d7
--- /dev/null
+++ b/drivers/leds/leds-ams-delta.c
@@ -0,0 +1,162 @@
+/*
+ * LEDs driver for Amstrad Delta (E3)
+ *
+ * Copyright (C) 2006 Jonathan McDowell <noodles@earth.li>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/leds.h>
+#include <asm/arch/board-ams-delta.h>
+
+/*
+ * Our context
+ */
+struct ams_delta_led {
+ struct led_classdev cdev;
+ u8 bitmask;
+};
+
+static void ams_delta_led_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct ams_delta_led *led_dev =
+ container_of(led_cdev, struct ams_delta_led, cdev);
+
+ if (value)
+ ams_delta_latch1_write(led_dev->bitmask, led_dev->bitmask);
+ else
+ ams_delta_latch1_write(led_dev->bitmask, 0);
+}
+
+static struct ams_delta_led ams_delta_leds[] = {
+ {
+ .cdev = {
+ .name = "ams-delta:camera",
+ .brightness_set = ams_delta_led_set,
+ },
+ .bitmask = AMS_DELTA_LATCH1_LED_CAMERA,
+ },
+ {
+ .cdev = {
+ .name = "ams-delta:advert",
+ .brightness_set = ams_delta_led_set,
+ },
+ .bitmask = AMS_DELTA_LATCH1_LED_ADVERT,
+ },
+ {
+ .cdev = {
+ .name = "ams-delta:email",
+ .brightness_set = ams_delta_led_set,
+ },
+ .bitmask = AMS_DELTA_LATCH1_LED_EMAIL,
+ },
+ {
+ .cdev = {
+ .name = "ams-delta:handsfree",
+ .brightness_set = ams_delta_led_set,
+ },
+ .bitmask = AMS_DELTA_LATCH1_LED_HANDSFREE,
+ },
+ {
+ .cdev = {
+ .name = "ams-delta:voicemail",
+ .brightness_set = ams_delta_led_set,
+ },
+ .bitmask = AMS_DELTA_LATCH1_LED_VOICEMAIL,
+ },
+ {
+ .cdev = {
+ .name = "ams-delta:voice",
+ .brightness_set = ams_delta_led_set,
+ },
+ .bitmask = AMS_DELTA_LATCH1_LED_VOICE,
+ },
+};
+
+#ifdef CONFIG_PM
+static int ams_delta_led_suspend(struct platform_device *dev,
+ pm_message_t state)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ams_delta_leds); i++)
+ led_classdev_suspend(&ams_delta_leds[i].cdev);
+
+ return 0;
+}
+
+static int ams_delta_led_resume(struct platform_device *dev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ams_delta_leds); i++)
+ led_classdev_resume(&ams_delta_leds[i].cdev);
+
+ return 0;
+}
+#else
+#define ams_delta_led_suspend NULL
+#define ams_delta_led_resume NULL
+#endif
+
+static int ams_delta_led_probe(struct platform_device *pdev)
+{
+ int i;
+ int ret;
+
+ for (i = ret = 0; ret >= 0 && i < ARRAY_SIZE(ams_delta_leds); i++) {
+ ret = led_classdev_register(&pdev->dev,
+ &ams_delta_leds[i].cdev);
+ }
+
+ if (ret < 0 && i > 1) {
+ for (i = i - 2; i >= 0; i--)
+ led_classdev_unregister(&ams_delta_leds[i].cdev);
+ }
+
+ return ret;
+}
+
+static int ams_delta_led_remove(struct platform_device *pdev)
+{
+ int i;
+
+ for (i = ARRAY_SIZE(ams_delta_leds) - 1; i >= 0; i--)
+ led_classdev_unregister(&ams_delta_leds[i].cdev);
+
+ return 0;
+}
+
+static struct platform_driver ams_delta_led_driver = {
+ .probe = ams_delta_led_probe,
+ .remove = ams_delta_led_remove,
+ .suspend = ams_delta_led_suspend,
+ .resume = ams_delta_led_resume,
+ .driver = {
+ .name = "ams-delta-led",
+ },
+};
+
+static int __init ams_delta_led_init(void)
+{
+ return platform_driver_register(&ams_delta_led_driver);
+}
+
+static void __exit ams_delta_led_exit(void)
+{
+ return platform_driver_unregister(&ams_delta_led_driver);
+}
+
+module_init(ams_delta_led_init);
+module_exit(ams_delta_led_exit);
+
+MODULE_AUTHOR("Jonathan McDowell <noodles@earth.li>");
+MODULE_DESCRIPTION("Amstrad Delta LED driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/macintosh/Kconfig b/drivers/macintosh/Kconfig
index 12ad462737ba..ccf5df44cde4 100644
--- a/drivers/macintosh/Kconfig
+++ b/drivers/macintosh/Kconfig
@@ -171,6 +171,7 @@ config THERM_PM72
config WINDFARM
tristate "New PowerMac thermal control infrastructure"
+ depends on PPC
config WINDFARM_PM81
tristate "Support for thermal management on iMac G5"
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
index 0b5ff553e39a..c63d4e7984be 100644
--- a/drivers/macintosh/via-pmu.c
+++ b/drivers/macintosh/via-pmu.c
@@ -2268,7 +2268,7 @@ static int powerbook_sleep_grackle(void)
_set_L2CR(save_l2cr);
/* Restore userland MMU context */
- set_context(current->active_mm->context, current->active_mm->pgd);
+ set_context(current->active_mm->context.id, current->active_mm->pgd);
/* Power things up */
pmu_unlock();
@@ -2366,7 +2366,7 @@ powerbook_sleep_Core99(void)
_set_L3CR(save_l3cr);
/* Restore userland MMU context */
- set_context(current->active_mm->context, current->active_mm->pgd);
+ set_context(current->active_mm->context.id, current->active_mm->pgd);
/* Tell PMU we are ready */
pmu_unlock();
diff --git a/drivers/md/raid6algos.c b/drivers/md/raid6algos.c
index 51c63c0cf1c9..926576156578 100644
--- a/drivers/md/raid6algos.c
+++ b/drivers/md/raid6algos.c
@@ -139,15 +139,14 @@ int __init raid6_select_algo(void)
}
}
- if ( best )
+ if (best) {
printk("raid6: using algorithm %s (%ld MB/s)\n",
best->name,
(bestperf*HZ) >> (20-16+RAID6_TIME_JIFFIES_LG2));
- else
+ raid6_call = *best;
+ } else
printk("raid6: Yikes! No algorithm found!\n");
- raid6_call = *best;
-
free_pages((unsigned long)syndromes, 1);
return best ? 0 : -EINVAL;
diff --git a/drivers/misc/ibmasm/ibmasmfs.c b/drivers/misc/ibmasm/ibmasmfs.c
index 26a230b6ff80..4a35caff5d02 100644
--- a/drivers/misc/ibmasm/ibmasmfs.c
+++ b/drivers/misc/ibmasm/ibmasmfs.c
@@ -90,10 +90,11 @@ static void ibmasmfs_create_files (struct super_block *sb, struct dentry *root);
static int ibmasmfs_fill_super (struct super_block *sb, void *data, int silent);
-static struct super_block *ibmasmfs_get_super(struct file_system_type *fst,
- int flags, const char *name, void *data)
+static int ibmasmfs_get_super(struct file_system_type *fst,
+ int flags, const char *name, void *data,
+ struct vfsmount *mnt)
{
- return get_sb_single(fst, flags, data, ibmasmfs_fill_super);
+ return get_sb_single(fst, flags, data, ibmasmfs_fill_super, mnt);
}
static struct super_operations ibmasmfs_s_ops = {
diff --git a/drivers/mmc/at91_mci.c b/drivers/mmc/at91_mci.c
index 88f0eef9cf33..3228516b7d19 100644
--- a/drivers/mmc/at91_mci.c
+++ b/drivers/mmc/at91_mci.c
@@ -81,13 +81,6 @@
#undef SUPPORT_4WIRE
-#ifdef CONFIG_MMC_DEBUG
-#define DBG(fmt...) \
- printk(fmt)
-#else
-#define DBG(fmt...) do { } while (0)
-#endif
-
static struct clk *mci_clk;
#define FL_SENT_COMMAND (1 << 0)
@@ -202,50 +195,50 @@ static void at91mci_pre_dma_read(struct at91mci_host *host)
struct mmc_command *cmd;
struct mmc_data *data;
- DBG("pre dma read\n");
+ pr_debug("pre dma read\n");
cmd = host->cmd;
if (!cmd) {
- DBG("no command\n");
+ pr_debug("no command\n");
return;
}
data = cmd->data;
if (!data) {
- DBG("no data\n");
+ pr_debug("no data\n");
return;
}
for (i = 0; i < 2; i++) {
/* nothing left to transfer */
if (host->transfer_index >= data->sg_len) {
- DBG("Nothing left to transfer (index = %d)\n", host->transfer_index);
+ pr_debug("Nothing left to transfer (index = %d)\n", host->transfer_index);
break;
}
/* Check to see if this needs filling */
if (i == 0) {
if (at91_mci_read(AT91_PDC_RCR) != 0) {
- DBG("Transfer active in current\n");
+ pr_debug("Transfer active in current\n");
continue;
}
}
else {
if (at91_mci_read(AT91_PDC_RNCR) != 0) {
- DBG("Transfer active in next\n");
+ pr_debug("Transfer active in next\n");
continue;
}
}
/* Setup the next transfer */
- DBG("Using transfer index %d\n", host->transfer_index);
+ pr_debug("Using transfer index %d\n", host->transfer_index);
sg = &data->sg[host->transfer_index++];
- DBG("sg = %p\n", sg);
+ pr_debug("sg = %p\n", sg);
sg->dma_address = dma_map_page(NULL, sg->page, sg->offset, sg->length, DMA_FROM_DEVICE);
- DBG("dma address = %08X, length = %d\n", sg->dma_address, sg->length);
+ pr_debug("dma address = %08X, length = %d\n", sg->dma_address, sg->length);
if (i == 0) {
at91_mci_write(AT91_PDC_RPR, sg->dma_address);
@@ -257,7 +250,7 @@ static void at91mci_pre_dma_read(struct at91mci_host *host)
}
}
- DBG("pre dma read done\n");
+ pr_debug("pre dma read done\n");
}
/*
@@ -268,17 +261,17 @@ static void at91mci_post_dma_read(struct at91mci_host *host)
struct mmc_command *cmd;
struct mmc_data *data;
- DBG("post dma read\n");
+ pr_debug("post dma read\n");
cmd = host->cmd;
if (!cmd) {
- DBG("no command\n");
+ pr_debug("no command\n");
return;
}
data = cmd->data;
if (!data) {
- DBG("no data\n");
+ pr_debug("no data\n");
return;
}
@@ -289,17 +282,17 @@ static void at91mci_post_dma_read(struct at91mci_host *host)
struct scatterlist *sg;
- DBG("finishing index %d\n", host->in_use_index);
+ pr_debug("finishing index %d\n", host->in_use_index);
sg = &data->sg[host->in_use_index++];
- DBG("Unmapping page %08X\n", sg->dma_address);
+ pr_debug("Unmapping page %08X\n", sg->dma_address);
dma_unmap_page(NULL, sg->dma_address, sg->length, DMA_FROM_DEVICE);
/* Swap the contents of the buffer */
buffer = kmap_atomic(sg->page, KM_BIO_SRC_IRQ) + sg->offset;
- DBG("buffer = %p, length = %d\n", buffer, sg->length);
+ pr_debug("buffer = %p, length = %d\n", buffer, sg->length);
data->bytes_xfered += sg->length;
@@ -320,7 +313,7 @@ static void at91mci_post_dma_read(struct at91mci_host *host)
at91_mci_write(AT91_PDC_PTCR, AT91_PDC_RXTDIS | AT91_PDC_TXTDIS);
}
- DBG("post dma read done\n");
+ pr_debug("post dma read done\n");
}
/*
@@ -331,7 +324,7 @@ static void at91_mci_handle_transmitted(struct at91mci_host *host)
struct mmc_command *cmd;
struct mmc_data *data;
- DBG("Handling the transmit\n");
+ pr_debug("Handling the transmit\n");
/* Disable the transfer */
at91_mci_write(AT91_PDC_PTCR, AT91_PDC_RXTDIS | AT91_PDC_TXTDIS);
@@ -387,12 +380,12 @@ static unsigned int at91_mci_send_command(struct at91mci_host *host, struct mmc_
/* Not sure if this is needed */
#if 0
if ((at91_mci_read(AT91_MCI_SR) & AT91_MCI_RTOE) && (cmd->opcode == 1)) {
- DBG("Clearing timeout\n");
+ pr_debug("Clearing timeout\n");
at91_mci_write(AT91_MCI_ARGR, 0);
at91_mci_write(AT91_MCI_CMDR, AT91_MCI_OPDCMD);
while (!(at91_mci_read(AT91_MCI_SR) & AT91_MCI_CMDRDY)) {
/* spin */
- DBG("Clearing: SR = %08X\n", at91_mci_read(AT91_MCI_SR));
+ pr_debug("Clearing: SR = %08X\n", at91_mci_read(AT91_MCI_SR));
}
}
#endif
@@ -411,7 +404,7 @@ static unsigned int at91_mci_send_command(struct at91mci_host *host, struct mmc_
}
if (data) {
- block_length = 1 << data->blksz_bits;
+ block_length = data->blksz;
blocks = data->blocks;
/* always set data start - also set direction flag for read */
@@ -439,7 +432,7 @@ static unsigned int at91_mci_send_command(struct at91mci_host *host, struct mmc_
/*
* Set the arguments and send the command
*/
- DBG("Sending command %d as %08X, arg = %08X, blocks = %d, length = %d (MR = %08lX)\n",
+ pr_debug("Sending command %d as %08X, arg = %08X, blocks = %d, length = %d (MR = %08lX)\n",
cmd->opcode, cmdr, cmd->arg, blocks, block_length, at91_mci_read(AT91_MCI_MR));
if (!data) {
@@ -491,7 +484,7 @@ static unsigned int at91_mci_send_command(struct at91mci_host *host, struct mmc_
at91mci_sg_to_dma(host, data);
- DBG("Transmitting %d bytes\n", host->total_length);
+ pr_debug("Transmitting %d bytes\n", host->total_length);
at91_mci_write(AT91_PDC_TPR, host->physical_address);
at91_mci_write(AT91_PDC_TCR, host->total_length / 4);
@@ -525,7 +518,7 @@ static void at91mci_process_command(struct at91mci_host *host, struct mmc_comman
ier = at91_mci_send_command(host, cmd);
- DBG("setting ier to %08X\n", ier);
+ pr_debug("setting ier to %08X\n", ier);
/* Stop on errors or the required value */
at91_mci_write(AT91_MCI_IER, 0xffff0000 | ier);
@@ -570,7 +563,7 @@ static void at91mci_completed_command(struct at91mci_host *host)
status = at91_mci_read(AT91_MCI_SR);
- DBG("Status = %08X [%08X %08X %08X %08X]\n",
+ pr_debug("Status = %08X [%08X %08X %08X %08X]\n",
status, cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3]);
if (status & (AT91_MCI_RINDE | AT91_MCI_RDIRE | AT91_MCI_RCRCE |
@@ -590,7 +583,7 @@ static void at91mci_completed_command(struct at91mci_host *host)
else
cmd->error = MMC_ERR_FAILED;
- DBG("Error detected and set to %d (cmd = %d, retries = %d)\n",
+ pr_debug("Error detected and set to %d (cmd = %d, retries = %d)\n",
cmd->error, cmd->opcode, cmd->retries);
}
}
@@ -621,10 +614,7 @@ static void at91_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
struct at91mci_host *host = mmc_priv(mmc);
unsigned long at91_master_clock = clk_get_rate(mci_clk);
- if (host)
- host->bus_mode = ios->bus_mode;
- else
- printk("MMC: No host for bus_mode\n");
+ host->bus_mode = ios->bus_mode;
if (ios->clock == 0) {
/* Disable the MCI controller */
@@ -640,15 +630,15 @@ static void at91_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
else
clkdiv = (at91_master_clock / ios->clock) / 2;
- DBG("clkdiv = %d. mcck = %ld\n", clkdiv,
+ pr_debug("clkdiv = %d. mcck = %ld\n", clkdiv,
at91_master_clock / (2 * (clkdiv + 1)));
}
if (ios->bus_width == MMC_BUS_WIDTH_4 && host->board->wire4) {
- DBG("MMC: Setting controller bus width to 4\n");
+ pr_debug("MMC: Setting controller bus width to 4\n");
at91_mci_write(AT91_MCI_SDCR, at91_mci_read(AT91_MCI_SDCR) | AT91_MCI_SDCBUS);
}
else {
- DBG("MMC: Setting controller bus width to 1\n");
+ pr_debug("MMC: Setting controller bus width to 1\n");
at91_mci_write(AT91_MCI_SDCR, at91_mci_read(AT91_MCI_SDCR) & ~AT91_MCI_SDCBUS);
}
@@ -656,7 +646,7 @@ static void at91_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
at91_mci_write(AT91_MCI_MR, (at91_mci_read(AT91_MCI_MR) & ~AT91_MCI_CLKDIV) | clkdiv);
/* maybe switch power to the card */
- if (host && host->board->vcc_pin) {
+ if (host->board->vcc_pin) {
switch (ios->power_mode) {
case MMC_POWER_OFF:
at91_set_gpio_output(host->board->vcc_pin, 0);
@@ -679,11 +669,8 @@ static irqreturn_t at91_mci_irq(int irq, void *devid, struct pt_regs *regs)
unsigned int int_status;
- if (host == NULL)
- return IRQ_HANDLED;
-
int_status = at91_mci_read(AT91_MCI_SR);
- DBG("MCI irq: status = %08X, %08lX, %08lX\n", int_status, at91_mci_read(AT91_MCI_IMR),
+ pr_debug("MCI irq: status = %08X, %08lX, %08lX\n", int_status, at91_mci_read(AT91_MCI_IMR),
int_status & at91_mci_read(AT91_MCI_IMR));
if ((int_status & at91_mci_read(AT91_MCI_IMR)) & 0xffff0000)
@@ -692,75 +679,75 @@ static irqreturn_t at91_mci_irq(int irq, void *devid, struct pt_regs *regs)
int_status &= at91_mci_read(AT91_MCI_IMR);
if (int_status & AT91_MCI_UNRE)
- DBG("MMC: Underrun error\n");
+ pr_debug("MMC: Underrun error\n");
if (int_status & AT91_MCI_OVRE)
- DBG("MMC: Overrun error\n");
+ pr_debug("MMC: Overrun error\n");
if (int_status & AT91_MCI_DTOE)
- DBG("MMC: Data timeout\n");
+ pr_debug("MMC: Data timeout\n");
if (int_status & AT91_MCI_DCRCE)
- DBG("MMC: CRC error in data\n");
+ pr_debug("MMC: CRC error in data\n");
if (int_status & AT91_MCI_RTOE)
- DBG("MMC: Response timeout\n");
+ pr_debug("MMC: Response timeout\n");
if (int_status & AT91_MCI_RENDE)
- DBG("MMC: Response end bit error\n");
+ pr_debug("MMC: Response end bit error\n");
if (int_status & AT91_MCI_RCRCE)
- DBG("MMC: Response CRC error\n");
+ pr_debug("MMC: Response CRC error\n");
if (int_status & AT91_MCI_RDIRE)
- DBG("MMC: Response direction error\n");
+ pr_debug("MMC: Response direction error\n");
if (int_status & AT91_MCI_RINDE)
- DBG("MMC: Response index error\n");
+ pr_debug("MMC: Response index error\n");
/* Only continue processing if no errors */
if (!completed) {
if (int_status & AT91_MCI_TXBUFE) {
- DBG("TX buffer empty\n");
+ pr_debug("TX buffer empty\n");
at91_mci_handle_transmitted(host);
}
if (int_status & AT91_MCI_RXBUFF) {
- DBG("RX buffer full\n");
+ pr_debug("RX buffer full\n");
at91_mci_write(AT91_MCI_IER, AT91_MCI_CMDRDY);
}
if (int_status & AT91_MCI_ENDTX) {
- DBG("Transmit has ended\n");
+ pr_debug("Transmit has ended\n");
}
if (int_status & AT91_MCI_ENDRX) {
- DBG("Receive has ended\n");
+ pr_debug("Receive has ended\n");
at91mci_post_dma_read(host);
}
if (int_status & AT91_MCI_NOTBUSY) {
- DBG("Card is ready\n");
+ pr_debug("Card is ready\n");
at91_mci_write(AT91_MCI_IER, AT91_MCI_CMDRDY);
}
if (int_status & AT91_MCI_DTIP) {
- DBG("Data transfer in progress\n");
+ pr_debug("Data transfer in progress\n");
}
if (int_status & AT91_MCI_BLKE) {
- DBG("Block transfer has ended\n");
+ pr_debug("Block transfer has ended\n");
}
if (int_status & AT91_MCI_TXRDY) {
- DBG("Ready to transmit\n");
+ pr_debug("Ready to transmit\n");
}
if (int_status & AT91_MCI_RXRDY) {
- DBG("Ready to receive\n");
+ pr_debug("Ready to receive\n");
}
if (int_status & AT91_MCI_CMDRDY) {
- DBG("Command ready\n");
+ pr_debug("Command ready\n");
completed = 1;
}
}
at91_mci_write(AT91_MCI_IDR, int_status);
if (completed) {
- DBG("Completed command\n");
+ pr_debug("Completed command\n");
at91_mci_write(AT91_MCI_IDR, 0xffffffff);
at91mci_completed_command(host);
}
@@ -779,10 +766,10 @@ static irqreturn_t at91_mmc_det_irq(int irq, void *_host, struct pt_regs *regs)
*/
if (present != host->present) {
host->present = present;
- DBG("%s: card %s\n", mmc_hostname(host->mmc),
+ pr_debug("%s: card %s\n", mmc_hostname(host->mmc),
present ? "insert" : "remove");
if (!present) {
- DBG("****** Resetting SD-card bus width ******\n");
+ pr_debug("****** Resetting SD-card bus width ******\n");
at91_mci_write(AT91_MCI_SDCR, 0);
}
mmc_detect_change(host->mmc, msecs_to_jiffies(100));
@@ -822,13 +809,13 @@ static int at91_mci_probe(struct platform_device *pdev)
struct at91mci_host *host;
int ret;
- DBG("Probe MCI devices\n");
+ pr_debug("Probe MCI devices\n");
at91_mci_disable();
at91_mci_enable();
mmc = mmc_alloc_host(sizeof(struct at91mci_host), &pdev->dev);
if (!mmc) {
- DBG("Failed to allocate mmc host\n");
+ pr_debug("Failed to allocate mmc host\n");
return -ENOMEM;
}
@@ -854,8 +841,9 @@ static int at91_mci_probe(struct platform_device *pdev)
* Get Clock
*/
mci_clk = clk_get(&pdev->dev, "mci_clk");
- if (!mci_clk) {
+ if (IS_ERR(mci_clk)) {
printk(KERN_ERR "AT91 MMC: no clock defined.\n");
+ mmc_free_host(mmc);
return -ENODEV;
}
clk_enable(mci_clk); /* Enable the peripheral clock */
@@ -865,7 +853,10 @@ static int at91_mci_probe(struct platform_device *pdev)
*/
ret = request_irq(AT91_ID_MCI, at91_mci_irq, SA_SHIRQ, DRIVER_NAME, host);
if (ret) {
- DBG("Failed to request MCI interrupt\n");
+ printk(KERN_ERR "Failed to request MCI interrupt\n");
+ clk_disable(mci_clk);
+ clk_put(mci_clk);
+ mmc_free_host(mmc);
return ret;
}
@@ -886,12 +877,12 @@ static int at91_mci_probe(struct platform_device *pdev)
*/
if (host->board->det_pin) {
ret = request_irq(host->board->det_pin, at91_mmc_det_irq,
- SA_SAMPLE_RANDOM, DRIVER_NAME, host);
+ 0, DRIVER_NAME, host);
if (ret)
- DBG("couldn't allocate MMC detect irq\n");
+ printk(KERN_ERR "couldn't allocate MMC detect irq\n");
}
- DBG(KERN_INFO "Added MCI driver\n");
+ pr_debug(KERN_INFO "Added MCI driver\n");
return 0;
}
@@ -924,7 +915,7 @@ static int at91_mci_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
- DBG("Removed\n");
+ pr_debug("MCI Removed\n");
return 0;
}
diff --git a/drivers/mmc/imxmmc.c b/drivers/mmc/imxmmc.c
index a4eb1d0e7a71..5c62f4e6ad06 100644
--- a/drivers/mmc/imxmmc.c
+++ b/drivers/mmc/imxmmc.c
@@ -228,7 +228,7 @@ static int imxmci_busy_wait_for_status(struct imxmci_host *host,
static void imxmci_setup_data(struct imxmci_host *host, struct mmc_data *data)
{
unsigned int nob = data->blocks;
- unsigned int blksz = 1 << data->blksz_bits;
+ unsigned int blksz = data->blksz;
unsigned int datasz = nob * blksz;
int i;
diff --git a/drivers/mmc/omap.c b/drivers/mmc/omap.c
index becb3c68c34d..c25244b3657b 100644
--- a/drivers/mmc/omap.c
+++ b/drivers/mmc/omap.c
@@ -584,10 +584,10 @@ mmc_omap_prepare_dma(struct mmc_omap_host *host, struct mmc_data *data)
int sync_dev = 0;
data_addr = io_v2p((u32) host->base) + OMAP_MMC_REG_DATA;
- frame = 1 << data->blksz_bits;
+ frame = data->blksz;
count = sg_dma_len(sg);
- if ((data->blocks == 1) && (count > (1 << data->blksz_bits)))
+ if ((data->blocks == 1) && (count > data->blksz))
count = frame;
host->dma_len = count;
@@ -776,7 +776,7 @@ mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req)
}
- block_size = 1 << data->blksz_bits;
+ block_size = data->blksz;
OMAP_MMC_WRITE(host->base, NBLK, data->blocks - 1);
OMAP_MMC_WRITE(host->base, BLEN, block_size - 1);
diff --git a/drivers/mmc/sdhci.c b/drivers/mmc/sdhci.c
index 6bfcdbc7491e..8e9100bd57ef 100644
--- a/drivers/mmc/sdhci.c
+++ b/drivers/mmc/sdhci.c
@@ -268,7 +268,7 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data)
}
DBG("blksz %04x blks %04x flags %08x\n",
- 1 << data->blksz_bits, data->blocks, data->flags);
+ data->blksz, data->blocks, data->flags);
DBG("tsac %d ms nsac %d clk\n",
data->timeout_ns / 1000000, data->timeout_clks);
@@ -282,7 +282,7 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data)
writew(mode, host->ioaddr + SDHCI_TRANSFER_MODE);
- writew(1 << data->blksz_bits, host->ioaddr + SDHCI_BLOCK_SIZE);
+ writew(data->blksz, host->ioaddr + SDHCI_BLOCK_SIZE);
writew(data->blocks, host->ioaddr + SDHCI_BLOCK_COUNT);
if (host->flags & SDHCI_USE_DMA) {
@@ -294,7 +294,7 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data)
writel(sg_dma_address(data->sg), host->ioaddr + SDHCI_DMA_ADDRESS);
} else {
- host->size = (1 << data->blksz_bits) * data->blocks;
+ host->size = data->blksz * data->blocks;
host->cur_sg = data->sg;
host->num_sg = data->sg_len;
@@ -335,7 +335,7 @@ static void sdhci_finish_data(struct sdhci_host *host)
blocks = 0;
else
blocks = readw(host->ioaddr + SDHCI_BLOCK_COUNT);
- data->bytes_xfered = (1 << data->blksz_bits) * (data->blocks - blocks);
+ data->bytes_xfered = data->blksz * (data->blocks - blocks);
if ((data->error == MMC_ERR_NONE) && blocks) {
printk(KERN_ERR "%s: Controller signalled completion even "
diff --git a/drivers/net/3c527.c b/drivers/net/3c527.c
index 1b1cb0026072..157eda573925 100644
--- a/drivers/net/3c527.c
+++ b/drivers/net/3c527.c
@@ -1031,8 +1031,7 @@ static int mc32_send_packet(struct sk_buff *skb, struct net_device *dev)
return 1;
}
- skb = skb_padto(skb, ETH_ZLEN);
- if (skb == NULL) {
+ if (skb_padto(skb, ETH_ZLEN)) {
netif_wake_queue(dev);
return 0;
}
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index 46d8c01437e9..0cdc830449d8 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -401,6 +401,11 @@ static void cp_clean_rings (struct cp_private *cp);
#ifdef CONFIG_NET_POLL_CONTROLLER
static void cp_poll_controller(struct net_device *dev);
#endif
+static int cp_get_eeprom_len(struct net_device *dev);
+static int cp_get_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *eeprom, u8 *data);
+static int cp_set_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *eeprom, u8 *data);
static struct pci_device_id cp_pci_tbl[] = {
{ PCI_VENDOR_ID_REALTEK, PCI_DEVICE_ID_REALTEK_8139,
@@ -792,7 +797,7 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
entry = cp->tx_head;
eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
if (dev->features & NETIF_F_TSO)
- mss = skb_shinfo(skb)->tso_size;
+ mss = skb_shinfo(skb)->gso_size;
if (skb_shinfo(skb)->nr_frags == 0) {
struct cp_desc *txd = &cp->tx_ring[entry];
@@ -1577,6 +1582,9 @@ static struct ethtool_ops cp_ethtool_ops = {
.get_strings = cp_get_strings,
.get_ethtool_stats = cp_get_ethtool_stats,
.get_perm_addr = ethtool_op_get_perm_addr,
+ .get_eeprom_len = cp_get_eeprom_len,
+ .get_eeprom = cp_get_eeprom,
+ .set_eeprom = cp_set_eeprom,
};
static int cp_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
@@ -1612,24 +1620,32 @@ static int cp_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
#define eeprom_delay() readl(ee_addr)
/* The EEPROM commands include the alway-set leading bit. */
+#define EE_EXTEND_CMD (4)
#define EE_WRITE_CMD (5)
#define EE_READ_CMD (6)
#define EE_ERASE_CMD (7)
-static int read_eeprom (void __iomem *ioaddr, int location, int addr_len)
-{
- int i;
- unsigned retval = 0;
- void __iomem *ee_addr = ioaddr + Cfg9346;
- int read_cmd = location | (EE_READ_CMD << addr_len);
+#define EE_EWDS_ADDR (0)
+#define EE_WRAL_ADDR (1)
+#define EE_ERAL_ADDR (2)
+#define EE_EWEN_ADDR (3)
+
+#define CP_EEPROM_MAGIC PCI_DEVICE_ID_REALTEK_8139
+static void eeprom_cmd_start(void __iomem *ee_addr)
+{
writeb (EE_ENB & ~EE_CS, ee_addr);
writeb (EE_ENB, ee_addr);
eeprom_delay ();
+}
- /* Shift the read command bits out. */
- for (i = 4 + addr_len; i >= 0; i--) {
- int dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0;
+static void eeprom_cmd(void __iomem *ee_addr, int cmd, int cmd_len)
+{
+ int i;
+
+ /* Shift the command bits out. */
+ for (i = cmd_len - 1; i >= 0; i--) {
+ int dataval = (cmd & (1 << i)) ? EE_DATA_WRITE : 0;
writeb (EE_ENB | dataval, ee_addr);
eeprom_delay ();
writeb (EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
@@ -1637,6 +1653,33 @@ static int read_eeprom (void __iomem *ioaddr, int location, int addr_len)
}
writeb (EE_ENB, ee_addr);
eeprom_delay ();
+}
+
+static void eeprom_cmd_end(void __iomem *ee_addr)
+{
+ writeb (~EE_CS, ee_addr);
+ eeprom_delay ();
+}
+
+static void eeprom_extend_cmd(void __iomem *ee_addr, int extend_cmd,
+ int addr_len)
+{
+ int cmd = (EE_EXTEND_CMD << addr_len) | (extend_cmd << (addr_len - 2));
+
+ eeprom_cmd_start(ee_addr);
+ eeprom_cmd(ee_addr, cmd, 3 + addr_len);
+ eeprom_cmd_end(ee_addr);
+}
+
+static u16 read_eeprom (void __iomem *ioaddr, int location, int addr_len)
+{
+ int i;
+ u16 retval = 0;
+ void __iomem *ee_addr = ioaddr + Cfg9346;
+ int read_cmd = location | (EE_READ_CMD << addr_len);
+
+ eeprom_cmd_start(ee_addr);
+ eeprom_cmd(ee_addr, read_cmd, 3 + addr_len);
for (i = 16; i > 0; i--) {
writeb (EE_ENB | EE_SHIFT_CLK, ee_addr);
@@ -1648,13 +1691,125 @@ static int read_eeprom (void __iomem *ioaddr, int location, int addr_len)
eeprom_delay ();
}
- /* Terminate the EEPROM access. */
- writeb (~EE_CS, ee_addr);
- eeprom_delay ();
+ eeprom_cmd_end(ee_addr);
return retval;
}
+static void write_eeprom(void __iomem *ioaddr, int location, u16 val,
+ int addr_len)
+{
+ int i;
+ void __iomem *ee_addr = ioaddr + Cfg9346;
+ int write_cmd = location | (EE_WRITE_CMD << addr_len);
+
+ eeprom_extend_cmd(ee_addr, EE_EWEN_ADDR, addr_len);
+
+ eeprom_cmd_start(ee_addr);
+ eeprom_cmd(ee_addr, write_cmd, 3 + addr_len);
+ eeprom_cmd(ee_addr, val, 16);
+ eeprom_cmd_end(ee_addr);
+
+ eeprom_cmd_start(ee_addr);
+ for (i = 0; i < 20000; i++)
+ if (readb(ee_addr) & EE_DATA_READ)
+ break;
+ eeprom_cmd_end(ee_addr);
+
+ eeprom_extend_cmd(ee_addr, EE_EWDS_ADDR, addr_len);
+}
+
+static int cp_get_eeprom_len(struct net_device *dev)
+{
+ struct cp_private *cp = netdev_priv(dev);
+ int size;
+
+ spin_lock_irq(&cp->lock);
+ size = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 256 : 128;
+ spin_unlock_irq(&cp->lock);
+
+ return size;
+}
+
+static int cp_get_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ struct cp_private *cp = netdev_priv(dev);
+ unsigned int addr_len;
+ u16 val;
+ u32 offset = eeprom->offset >> 1;
+ u32 len = eeprom->len;
+ u32 i = 0;
+
+ eeprom->magic = CP_EEPROM_MAGIC;
+
+ spin_lock_irq(&cp->lock);
+
+ addr_len = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 8 : 6;
+
+ if (eeprom->offset & 1) {
+ val = read_eeprom(cp->regs, offset, addr_len);
+ data[i++] = (u8)(val >> 8);
+ offset++;
+ }
+
+ while (i < len - 1) {
+ val = read_eeprom(cp->regs, offset, addr_len);
+ data[i++] = (u8)val;
+ data[i++] = (u8)(val >> 8);
+ offset++;
+ }
+
+ if (i < len) {
+ val = read_eeprom(cp->regs, offset, addr_len);
+ data[i] = (u8)val;
+ }
+
+ spin_unlock_irq(&cp->lock);
+ return 0;
+}
+
+static int cp_set_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ struct cp_private *cp = netdev_priv(dev);
+ unsigned int addr_len;
+ u16 val;
+ u32 offset = eeprom->offset >> 1;
+ u32 len = eeprom->len;
+ u32 i = 0;
+
+ if (eeprom->magic != CP_EEPROM_MAGIC)
+ return -EINVAL;
+
+ spin_lock_irq(&cp->lock);
+
+ addr_len = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 8 : 6;
+
+ if (eeprom->offset & 1) {
+ val = read_eeprom(cp->regs, offset, addr_len) & 0xff;
+ val |= (u16)data[i++] << 8;
+ write_eeprom(cp->regs, offset, val, addr_len);
+ offset++;
+ }
+
+ while (i < len - 1) {
+ val = (u16)data[i++];
+ val |= (u16)data[i++] << 8;
+ write_eeprom(cp->regs, offset, val, addr_len);
+ offset++;
+ }
+
+ if (i < len) {
+ val = read_eeprom(cp->regs, offset, addr_len) & 0xff00;
+ val |= (u16)data[i];
+ write_eeprom(cp->regs, offset, val, addr_len);
+ }
+
+ spin_unlock_irq(&cp->lock);
+ return 0;
+}
+
/* Put the board into D3cold state and wait for WakeUp signal */
static void cp_set_d3_state (struct cp_private *cp)
{
diff --git a/drivers/net/82596.c b/drivers/net/82596.c
index da0c878dcba8..8a9f7d61b9b1 100644
--- a/drivers/net/82596.c
+++ b/drivers/net/82596.c
@@ -1070,8 +1070,7 @@ static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb->len, (unsigned int)skb->data));
if (skb->len < ETH_ZLEN) {
- skb = skb_padto(skb, ETH_ZLEN);
- if (skb == NULL)
+ if (skb_padto(skb, ETH_ZLEN))
return 0;
length = ETH_ZLEN;
}
diff --git a/drivers/net/8390.c b/drivers/net/8390.c
index f87027420081..86be96af9c8f 100644
--- a/drivers/net/8390.c
+++ b/drivers/net/8390.c
@@ -275,12 +275,14 @@ static int ei_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
int send_length = skb->len, output_page;
unsigned long flags;
+ char buf[ETH_ZLEN];
+ char *data = skb->data;
if (skb->len < ETH_ZLEN) {
- skb = skb_padto(skb, ETH_ZLEN);
- if (skb == NULL)
- return 0;
+ memset(buf, 0, ETH_ZLEN); /* more efficient than doing just the needed bits */
+ memcpy(buf, data, skb->len);
send_length = ETH_ZLEN;
+ data = buf;
}
/* Mask interrupts from the ethercard.
@@ -347,7 +349,7 @@ static int ei_start_xmit(struct sk_buff *skb, struct net_device *dev)
* trigger the send later, upon receiving a Tx done interrupt.
*/
- ei_block_output(dev, send_length, skb->data, output_page);
+ ei_block_output(dev, send_length, data, output_page);
if (! ei_local->txing)
{
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 0c6b45a11d15..39189903e355 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -854,6 +854,17 @@ config SMC9194
<file:Documentation/networking/net-modules.txt>. The module
will be called smc9194.
+config NET_NETX
+ tristate "NetX Ethernet support"
+ select MII
+ depends on NET_ETHERNET && ARCH_NETX
+ help
+ This is support for the Hilscher netX builtin Ethernet ports
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/networking/net-modules.txt>. The module
+ will be called netx-eth.
+
config DM9000
tristate "DM9000 support"
depends on (ARM || MIPS) && NET_ETHERNET
@@ -1376,8 +1387,8 @@ config APRICOT
called apricot.
config B44
- tristate "Broadcom 4400 ethernet support (EXPERIMENTAL)"
- depends on NET_PCI && PCI && EXPERIMENTAL
+ tristate "Broadcom 4400 ethernet support"
+ depends on NET_PCI && PCI
select MII
help
If you have a network (Ethernet) controller of this type, say Y and
@@ -2190,7 +2201,7 @@ config BNX2
config SPIDER_NET
tristate "Spider Gigabit Ethernet driver"
- depends on PCI && PPC_CELL
+ depends on PCI && PPC_IBM_CELL_BLADE
select FW_LOADER
help
This driver supports the Gigabit Ethernet chips present on the
@@ -2198,11 +2209,11 @@ config SPIDER_NET
config GIANFAR
tristate "Gianfar Ethernet"
- depends on 85xx || 83xx
+ depends on 85xx || 83xx || PPC_86xx
select PHYLIB
help
- This driver supports the Gigabit TSEC on the MPC85xx
- family of chips, and the FEC on the 8540
+ This driver supports the Gigabit TSEC on the MPC83xx, MPC85xx,
+ and MPC86xx family of chips, and the FEC on the 8540.
config GFAR_NAPI
bool "NAPI Support"
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 1eced3287507..c91e95126f78 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -187,6 +187,7 @@ obj-$(CONFIG_MACSONIC) += macsonic.o
obj-$(CONFIG_MACMACE) += macmace.o
obj-$(CONFIG_MAC89x0) += mac89x0.o
obj-$(CONFIG_TUN) += tun.o
+obj-$(CONFIG_NET_NETX) += netx-eth.o
obj-$(CONFIG_DL2K) += dl2k.o
obj-$(CONFIG_R8169) += r8169.o
obj-$(CONFIG_AMD8111_ETH) += amd8111e.o
diff --git a/drivers/net/a2065.c b/drivers/net/a2065.c
index 79bb56b8dcef..71165ac0257a 100644
--- a/drivers/net/a2065.c
+++ b/drivers/net/a2065.c
@@ -573,8 +573,7 @@ static int lance_start_xmit (struct sk_buff *skb, struct net_device *dev)
if (len < ETH_ZLEN) {
len = ETH_ZLEN;
- skb = skb_padto(skb, ETH_ZLEN);
- if (skb == NULL)
+ if (skb_padto(skb, ETH_ZLEN))
return 0;
}
diff --git a/drivers/net/ariadne.c b/drivers/net/ariadne.c
index d1b6b1f794e2..a9bb7a4aff98 100644
--- a/drivers/net/ariadne.c
+++ b/drivers/net/ariadne.c
@@ -607,8 +607,7 @@ static int ariadne_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* FIXME: is the 79C960 new enough to do its own padding right ? */
if (skb->len < ETH_ZLEN)
{
- skb = skb_padto(skb, ETH_ZLEN);
- if (skb == NULL)
+ if (skb_padto(skb, ETH_ZLEN))
return 0;
len = ETH_ZLEN;
}
diff --git a/drivers/net/arm/at91_ether.c b/drivers/net/arm/at91_ether.c
index 5503dc8a66e4..613005a0285d 100644
--- a/drivers/net/arm/at91_ether.c
+++ b/drivers/net/arm/at91_ether.c
@@ -43,7 +43,9 @@
#define DRV_VERSION "1.0"
static struct net_device *at91_dev;
-static struct clk *ether_clk;
+
+static struct timer_list check_timer;
+#define LINK_POLL_INTERVAL (HZ)
/* ..................................................................... */
@@ -143,7 +145,7 @@ static void read_phy(unsigned char phy_addr, unsigned char address, unsigned int
* MAC accordingly.
* If no link or auto-negotiation is busy, then no changes are made.
*/
-static void update_linkspeed(struct net_device *dev)
+static void update_linkspeed(struct net_device *dev, int silent)
{
struct at91_private *lp = (struct at91_private *) dev->priv;
unsigned int bmsr, bmcr, lpa, mac_cfg;
@@ -151,7 +153,8 @@ static void update_linkspeed(struct net_device *dev)
if (!mii_link_ok(&lp->mii)) { /* no link */
netif_carrier_off(dev);
- printk(KERN_INFO "%s: Link down.\n", dev->name);
+ if (!silent)
+ printk(KERN_INFO "%s: Link down.\n", dev->name);
return;
}
@@ -186,7 +189,8 @@ static void update_linkspeed(struct net_device *dev)
}
at91_emac_write(AT91_EMAC_CFG, mac_cfg);
- printk(KERN_INFO "%s: Link now %i-%s\n", dev->name, speed, (duplex == DUPLEX_FULL) ? "FullDuplex" : "HalfDuplex");
+ if (!silent)
+ printk(KERN_INFO "%s: Link now %i-%s\n", dev->name, speed, (duplex == DUPLEX_FULL) ? "FullDuplex" : "HalfDuplex");
netif_carrier_on(dev);
}
@@ -226,7 +230,7 @@ static irqreturn_t at91ether_phy_interrupt(int irq, void *dev_id, struct pt_regs
goto done;
}
- update_linkspeed(dev);
+ update_linkspeed(dev, 0);
done:
disable_mdi();
@@ -243,14 +247,17 @@ static void enable_phyirq(struct net_device *dev)
unsigned int dsintr, irq_number;
int status;
- if (lp->phy_type == MII_RTL8201_ID) /* RTL8201 does not have an interrupt */
- return;
- if (lp->phy_type == MII_DP83847_ID) /* DP83847 does not have an interrupt */
- return;
- if (lp->phy_type == MII_AC101L_ID) /* AC101L interrupt not supported yet */
+ irq_number = lp->board_data.phy_irq_pin;
+ if (!irq_number) {
+ /*
+ * PHY doesn't have an IRQ pin (RTL8201, DP83847, AC101L),
+ * or board does not have it connected.
+ */
+ check_timer.expires = jiffies + LINK_POLL_INTERVAL;
+ add_timer(&check_timer);
return;
+ }
- irq_number = lp->board_data.phy_irq_pin;
status = request_irq(irq_number, at91ether_phy_interrupt, 0, dev->name, dev);
if (status) {
printk(KERN_ERR "at91_ether: PHY IRQ %d request failed - status %d!\n", irq_number, status);
@@ -292,12 +299,11 @@ static void disable_phyirq(struct net_device *dev)
unsigned int dsintr;
unsigned int irq_number;
- if (lp->phy_type == MII_RTL8201_ID) /* RTL8201 does not have an interrupt */
- return;
- if (lp->phy_type == MII_DP83847_ID) /* DP83847 does not have an interrupt */
- return;
- if (lp->phy_type == MII_AC101L_ID) /* AC101L interrupt not supported yet */
+ irq_number = lp->board_data.phy_irq_pin;
+ if (!irq_number) {
+ del_timer_sync(&check_timer);
return;
+ }
spin_lock_irq(&lp->lock);
enable_mdi();
@@ -326,7 +332,6 @@ static void disable_phyirq(struct net_device *dev)
disable_mdi();
spin_unlock_irq(&lp->lock);
- irq_number = lp->board_data.phy_irq_pin;
free_irq(irq_number, dev); /* Free interrupt handler */
}
@@ -355,6 +360,18 @@ static void reset_phy(struct net_device *dev)
}
#endif
+static void at91ether_check_link(unsigned long dev_id)
+{
+ struct net_device *dev = (struct net_device *) dev_id;
+
+ enable_mdi();
+ update_linkspeed(dev, 1);
+ disable_mdi();
+
+ check_timer.expires = jiffies + LINK_POLL_INTERVAL;
+ add_timer(&check_timer);
+}
+
/* ......................... ADDRESS MANAGEMENT ........................ */
/*
@@ -501,7 +518,7 @@ static int hash_get_index(__u8 *addr)
hash_index |= (bitval << j);
}
- return hash_index;
+ return hash_index;
}
/*
@@ -557,10 +574,8 @@ static void at91ether_set_rx_mode(struct net_device *dev)
at91_emac_write(AT91_EMAC_CFG, cfg);
}
-
/* ......................... ETHTOOL SUPPORT ........................... */
-
static int mdio_read(struct net_device *dev, int phy_id, int location)
{
unsigned int value;
@@ -642,6 +657,22 @@ static struct ethtool_ops at91ether_ethtool_ops = {
.get_link = ethtool_op_get_link,
};
+static int at91ether_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct at91_private *lp = (struct at91_private *) dev->priv;
+ int res;
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ spin_lock_irq(&lp->lock);
+ enable_mdi();
+ res = generic_mii_ioctl(&lp->mii, if_mii(rq), cmd, NULL);
+ disable_mdi();
+ spin_unlock_irq(&lp->lock);
+
+ return res;
+}
/* ................................ MAC ................................ */
@@ -685,10 +716,10 @@ static int at91ether_open(struct net_device *dev)
struct at91_private *lp = (struct at91_private *) dev->priv;
unsigned long ctl;
- if (!is_valid_ether_addr(dev->dev_addr))
- return -EADDRNOTAVAIL;
+ if (!is_valid_ether_addr(dev->dev_addr))
+ return -EADDRNOTAVAIL;
- clk_enable(ether_clk); /* Re-enable Peripheral clock */
+ clk_enable(lp->ether_clk); /* Re-enable Peripheral clock */
/* Clear internal statistics */
ctl = at91_emac_read(AT91_EMAC_CTL);
@@ -708,7 +739,7 @@ static int at91ether_open(struct net_device *dev)
/* Determine current link speed */
spin_lock_irq(&lp->lock);
enable_mdi();
- update_linkspeed(dev);
+ update_linkspeed(dev, 0);
disable_mdi();
spin_unlock_irq(&lp->lock);
@@ -722,6 +753,7 @@ static int at91ether_open(struct net_device *dev)
*/
static int at91ether_close(struct net_device *dev)
{
+ struct at91_private *lp = (struct at91_private *) dev->priv;
unsigned long ctl;
/* Disable Receiver and Transmitter */
@@ -738,7 +770,7 @@ static int at91ether_close(struct net_device *dev)
netif_stop_queue(dev);
- clk_disable(ether_clk); /* Disable Peripheral clock */
+ clk_disable(lp->ether_clk); /* Disable Peripheral clock */
return 0;
}
@@ -870,7 +902,7 @@ static irqreturn_t at91ether_interrupt(int irq, void *dev_id, struct pt_regs *re
if (intstatus & AT91_EMAC_RCOM) /* Receive complete */
at91ether_rx(dev);
- if (intstatus & AT91_EMAC_TCOM) { /* Transmit complete */
+ if (intstatus & AT91_EMAC_TCOM) { /* Transmit complete */
/* The TCOM bit is set even if the transmission failed. */
if (intstatus & (AT91_EMAC_TUND | AT91_EMAC_RTRY))
lp->stats.tx_errors += 1;
@@ -899,7 +931,8 @@ static irqreturn_t at91ether_interrupt(int irq, void *dev_id, struct pt_regs *re
/*
* Initialize the ethernet interface
*/
-static int __init at91ether_setup(unsigned long phy_type, unsigned short phy_address, struct platform_device *pdev)
+static int __init at91ether_setup(unsigned long phy_type, unsigned short phy_address,
+ struct platform_device *pdev, struct clk *ether_clk)
{
struct at91_eth_data *board_data = pdev->dev.platform_data;
struct net_device *dev;
@@ -933,6 +966,7 @@ static int __init at91ether_setup(unsigned long phy_type, unsigned short phy_add
return -ENOMEM;
}
lp->board_data = *board_data;
+ lp->ether_clk = ether_clk;
platform_set_drvdata(pdev, dev);
spin_lock_init(&lp->lock);
@@ -945,6 +979,7 @@ static int __init at91ether_setup(unsigned long phy_type, unsigned short phy_add
dev->set_multicast_list = at91ether_set_rx_mode;
dev->set_mac_address = set_mac_address;
dev->ethtool_ops = &at91ether_ethtool_ops;
+ dev->do_ioctl = at91ether_ioctl;
SET_NETDEV_DEV(dev, &pdev->dev);
@@ -975,6 +1010,9 @@ static int __init at91ether_setup(unsigned long phy_type, unsigned short phy_add
lp->mii.dev = dev; /* Support for ethtool */
lp->mii.mdio_read = mdio_read;
lp->mii.mdio_write = mdio_write;
+ lp->mii.phy_id = phy_address;
+ lp->mii.phy_id_mask = 0x1f;
+ lp->mii.reg_num_mask = 0x1f;
lp->phy_type = phy_type; /* Type of PHY connected */
lp->phy_address = phy_address; /* MDI address of PHY */
@@ -992,11 +1030,18 @@ static int __init at91ether_setup(unsigned long phy_type, unsigned short phy_add
/* Determine current link speed */
spin_lock_irq(&lp->lock);
enable_mdi();
- update_linkspeed(dev);
+ update_linkspeed(dev, 0);
disable_mdi();
spin_unlock_irq(&lp->lock);
netif_carrier_off(dev); /* will be enabled in open() */
+ /* If board has no PHY IRQ, use a timer to poll the PHY */
+ if (!lp->board_data.phy_irq_pin) {
+ init_timer(&check_timer);
+ check_timer.data = (unsigned long)dev;
+ check_timer.function = at91ether_check_link;
+ }
+
/* Display ethernet banner */
printk(KERN_INFO "%s: AT91 ethernet at 0x%08x int=%d %s%s (%02x:%02x:%02x:%02x:%02x:%02x)\n",
dev->name, (uint) dev->base_addr, dev->irq,
@@ -1005,7 +1050,7 @@ static int __init at91ether_setup(unsigned long phy_type, unsigned short phy_add
dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
if ((phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID))
- printk(KERN_INFO "%s: Davicom 9196 PHY %s\n", dev->name, (lp->phy_media == PORT_FIBRE) ? "(Fiber)" : "(Copper)");
+ printk(KERN_INFO "%s: Davicom 9161 PHY %s\n", dev->name, (lp->phy_media == PORT_FIBRE) ? "(Fiber)" : "(Copper)");
else if (phy_type == MII_LXT971A_ID)
printk(KERN_INFO "%s: Intel LXT971A PHY\n", dev->name);
else if (phy_type == MII_RTL8201_ID)
@@ -1031,9 +1076,10 @@ static int __init at91ether_probe(struct platform_device *pdev)
int detected = -1;
unsigned long phy_id;
unsigned short phy_address = 0;
+ struct clk *ether_clk;
ether_clk = clk_get(&pdev->dev, "ether_clk");
- if (!ether_clk) {
+ if (IS_ERR(ether_clk)) {
printk(KERN_ERR "at91_ether: no clock defined\n");
return -ENODEV;
}
@@ -1056,7 +1102,7 @@ static int __init at91ether_probe(struct platform_device *pdev)
case MII_DP83847_ID: /* National Semiconductor DP83847: */
case MII_AC101L_ID: /* Altima AC101L: PHY_ID1 = 0x22, PHY_ID2 = 0x5520 */
case MII_KS8721_ID: /* Micrel KS8721: PHY_ID1 = 0x22, PHY_ID2 = 0x1610 */
- detected = at91ether_setup(phy_id, phy_address, pdev);
+ detected = at91ether_setup(phy_id, phy_address, pdev, ether_clk);
break;
}
@@ -1075,17 +1121,61 @@ static int __devexit at91ether_remove(struct platform_device *pdev)
unregister_netdev(at91_dev);
free_irq(at91_dev->irq, at91_dev);
dma_free_coherent(NULL, sizeof(struct recv_desc_bufs), lp->dlist, (dma_addr_t)lp->dlist_phys);
- clk_put(ether_clk);
+ clk_put(lp->ether_clk);
free_netdev(at91_dev);
at91_dev = NULL;
return 0;
}
+#ifdef CONFIG_PM
+
+static int at91ether_suspend(struct platform_device *pdev, pm_message_t mesg)
+{
+ struct at91_private *lp = (struct at91_private *) at91_dev->priv;
+ struct net_device *net_dev = platform_get_drvdata(pdev);
+ int phy_irq = lp->board_data.phy_irq_pin;
+
+ if (netif_running(net_dev)) {
+ if (phy_irq)
+ disable_irq(phy_irq);
+
+ netif_stop_queue(net_dev);
+ netif_device_detach(net_dev);
+
+ clk_disable(lp->ether_clk);
+ }
+ return 0;
+}
+
+static int at91ether_resume(struct platform_device *pdev)
+{
+ struct at91_private *lp = (struct at91_private *) at91_dev->priv;
+ struct net_device *net_dev = platform_get_drvdata(pdev);
+ int phy_irq = lp->board_data.phy_irq_pin;
+
+ if (netif_running(net_dev)) {
+ clk_enable(lp->ether_clk);
+
+ netif_device_attach(net_dev);
+ netif_start_queue(net_dev);
+
+ if (phy_irq)
+ enable_irq(phy_irq);
+ }
+ return 0;
+}
+
+#else
+#define at91ether_suspend NULL
+#define at91ether_resume NULL
+#endif
+
static struct platform_driver at91ether_driver = {
.probe = at91ether_probe,
.remove = __devexit_p(at91ether_remove),
- /* FIXME: support suspend and resume */
+ .suspend = at91ether_suspend,
+ .resume = at91ether_resume,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/net/arm/at91_ether.h b/drivers/net/arm/at91_ether.h
index 9885735c9c8a..d1e72e02be3a 100644
--- a/drivers/net/arm/at91_ether.h
+++ b/drivers/net/arm/at91_ether.h
@@ -80,6 +80,7 @@ struct at91_private
struct net_device_stats stats;
struct mii_if_info mii; /* ethtool support */
struct at91_eth_data board_data; /* board-specific configuration */
+ struct clk *ether_clk; /* clock */
/* PHY */
unsigned long phy_type; /* type of PHY (PHY_ID) */
diff --git a/drivers/net/arm/ether1.c b/drivers/net/arm/ether1.c
index 36475eb2727f..312955d07b28 100644
--- a/drivers/net/arm/ether1.c
+++ b/drivers/net/arm/ether1.c
@@ -700,8 +700,7 @@ ether1_sendpacket (struct sk_buff *skb, struct net_device *dev)
}
if (skb->len < ETH_ZLEN) {
- skb = skb_padto(skb, ETH_ZLEN);
- if (skb == NULL)
+ if (skb_padto(skb, ETH_ZLEN))
goto out;
}
diff --git a/drivers/net/arm/ether3.c b/drivers/net/arm/ether3.c
index f1d5b1027ff7..081074180e62 100644
--- a/drivers/net/arm/ether3.c
+++ b/drivers/net/arm/ether3.c
@@ -518,8 +518,7 @@ ether3_sendpacket(struct sk_buff *skb, struct net_device *dev)
length = (length + 1) & ~1;
if (length != skb->len) {
- skb = skb_padto(skb, length);
- if (skb == NULL)
+ if (skb_padto(skb, length))
goto out;
}
diff --git a/drivers/net/atarilance.c b/drivers/net/atarilance.c
index 442b2cbeb58a..91783a8008be 100644
--- a/drivers/net/atarilance.c
+++ b/drivers/net/atarilance.c
@@ -804,8 +804,7 @@ static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev )
++len;
if (len > skb->len) {
- skb = skb_padto(skb, len);
- if (skb == NULL)
+ if (skb_padto(skb, len))
return 0;
}
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index d8233e0b7899..a7e4ba5a580f 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -29,8 +29,8 @@
#define DRV_MODULE_NAME "b44"
#define PFX DRV_MODULE_NAME ": "
-#define DRV_MODULE_VERSION "1.00"
-#define DRV_MODULE_RELDATE "Apr 7, 2006"
+#define DRV_MODULE_VERSION "1.01"
+#define DRV_MODULE_RELDATE "Jun 16, 2006"
#define B44_DEF_MSG_ENABLE \
(NETIF_MSG_DRV | \
@@ -75,6 +75,15 @@
/* minimum number of free TX descriptors required to wake up TX process */
#define B44_TX_WAKEUP_THRESH (B44_TX_RING_SIZE / 4)
+/* b44 internal pattern match filter info */
+#define B44_PATTERN_BASE 0x400
+#define B44_PATTERN_SIZE 0x80
+#define B44_PMASK_BASE 0x600
+#define B44_PMASK_SIZE 0x10
+#define B44_MAX_PATTERNS 16
+#define B44_ETHIPV6UDP_HLEN 62
+#define B44_ETHIPV4UDP_HLEN 42
+
static char version[] __devinitdata =
DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
@@ -101,7 +110,7 @@ MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
static void b44_halt(struct b44 *);
static void b44_init_rings(struct b44 *);
-static void b44_init_hw(struct b44 *);
+static void b44_init_hw(struct b44 *, int);
static int dma_desc_align_mask;
static int dma_desc_sync_size;
@@ -873,7 +882,7 @@ static int b44_poll(struct net_device *netdev, int *budget)
spin_lock_irq(&bp->lock);
b44_halt(bp);
b44_init_rings(bp);
- b44_init_hw(bp);
+ b44_init_hw(bp, 1);
netif_wake_queue(bp->dev);
spin_unlock_irq(&bp->lock);
done = 1;
@@ -942,7 +951,7 @@ static void b44_tx_timeout(struct net_device *dev)
b44_halt(bp);
b44_init_rings(bp);
- b44_init_hw(bp);
+ b44_init_hw(bp, 1);
spin_unlock_irq(&bp->lock);
@@ -1059,7 +1068,7 @@ static int b44_change_mtu(struct net_device *dev, int new_mtu)
b44_halt(bp);
dev->mtu = new_mtu;
b44_init_rings(bp);
- b44_init_hw(bp);
+ b44_init_hw(bp, 1);
spin_unlock_irq(&bp->lock);
b44_enable_ints(bp);
@@ -1356,13 +1365,15 @@ static int b44_set_mac_addr(struct net_device *dev, void *p)
* packet processing. Invoked with bp->lock held.
*/
static void __b44_set_rx_mode(struct net_device *);
-static void b44_init_hw(struct b44 *bp)
+static void b44_init_hw(struct b44 *bp, int full_reset)
{
u32 val;
b44_chip_reset(bp);
- b44_phy_reset(bp);
- b44_setup_phy(bp);
+ if (full_reset) {
+ b44_phy_reset(bp);
+ b44_setup_phy(bp);
+ }
/* Enable CRC32, set proper LED modes and power on PHY */
bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
@@ -1376,16 +1387,21 @@ static void b44_init_hw(struct b44 *bp)
bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
bw32(bp, B44_TX_WMARK, 56); /* XXX magic */
- bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
- bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
- bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
- (bp->rx_offset << DMARX_CTRL_ROSHIFT)));
- bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
+ if (full_reset) {
+ bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
+ bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
+ bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
+ (bp->rx_offset << DMARX_CTRL_ROSHIFT)));
+ bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
- bw32(bp, B44_DMARX_PTR, bp->rx_pending);
- bp->rx_prod = bp->rx_pending;
+ bw32(bp, B44_DMARX_PTR, bp->rx_pending);
+ bp->rx_prod = bp->rx_pending;
- bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
+ bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
+ } else {
+ bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
+ (bp->rx_offset << DMARX_CTRL_ROSHIFT)));
+ }
val = br32(bp, B44_ENET_CTRL);
bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
@@ -1401,7 +1417,7 @@ static int b44_open(struct net_device *dev)
goto out;
b44_init_rings(bp);
- b44_init_hw(bp);
+ b44_init_hw(bp, 1);
b44_check_phy(bp);
@@ -1450,6 +1466,140 @@ static void b44_poll_controller(struct net_device *dev)
}
#endif
+static void bwfilter_table(struct b44 *bp, u8 *pp, u32 bytes, u32 table_offset)
+{
+ u32 i;
+ u32 *pattern = (u32 *) pp;
+
+ for (i = 0; i < bytes; i += sizeof(u32)) {
+ bw32(bp, B44_FILT_ADDR, table_offset + i);
+ bw32(bp, B44_FILT_DATA, pattern[i / sizeof(u32)]);
+ }
+}
+
+static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset)
+{
+ int magicsync = 6;
+ int k, j, len = offset;
+ int ethaddr_bytes = ETH_ALEN;
+
+ memset(ppattern + offset, 0xff, magicsync);
+ for (j = 0; j < magicsync; j++)
+ set_bit(len++, (unsigned long *) pmask);
+
+ for (j = 0; j < B44_MAX_PATTERNS; j++) {
+ if ((B44_PATTERN_SIZE - len) >= ETH_ALEN)
+ ethaddr_bytes = ETH_ALEN;
+ else
+ ethaddr_bytes = B44_PATTERN_SIZE - len;
+ if (ethaddr_bytes <=0)
+ break;
+ for (k = 0; k< ethaddr_bytes; k++) {
+ ppattern[offset + magicsync +
+ (j * ETH_ALEN) + k] = macaddr[k];
+ len++;
+ set_bit(len, (unsigned long *) pmask);
+ }
+ }
+ return len - 1;
+}
+
+/* Setup magic packet patterns in the b44 WOL
+ * pattern matching filter.
+ */
+static void b44_setup_pseudo_magicp(struct b44 *bp)
+{
+
+ u32 val;
+ int plen0, plen1, plen2;
+ u8 *pwol_pattern;
+ u8 pwol_mask[B44_PMASK_SIZE];
+
+ pwol_pattern = kmalloc(B44_PATTERN_SIZE, GFP_KERNEL);
+ if (!pwol_pattern) {
+ printk(KERN_ERR PFX "Memory not available for WOL\n");
+ return;
+ }
+
+ /* Ipv4 magic packet pattern - pattern 0.*/
+ memset(pwol_pattern, 0, B44_PATTERN_SIZE);
+ memset(pwol_mask, 0, B44_PMASK_SIZE);
+ plen0 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
+ B44_ETHIPV4UDP_HLEN);
+
+ bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, B44_PATTERN_BASE);
+ bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, B44_PMASK_BASE);
+
+ /* Raw ethernet II magic packet pattern - pattern 1 */
+ memset(pwol_pattern, 0, B44_PATTERN_SIZE);
+ memset(pwol_mask, 0, B44_PMASK_SIZE);
+ plen1 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
+ ETH_HLEN);
+
+ bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
+ B44_PATTERN_BASE + B44_PATTERN_SIZE);
+ bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
+ B44_PMASK_BASE + B44_PMASK_SIZE);
+
+ /* Ipv6 magic packet pattern - pattern 2 */
+ memset(pwol_pattern, 0, B44_PATTERN_SIZE);
+ memset(pwol_mask, 0, B44_PMASK_SIZE);
+ plen2 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
+ B44_ETHIPV6UDP_HLEN);
+
+ bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
+ B44_PATTERN_BASE + B44_PATTERN_SIZE + B44_PATTERN_SIZE);
+ bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
+ B44_PMASK_BASE + B44_PMASK_SIZE + B44_PMASK_SIZE);
+
+ kfree(pwol_pattern);
+
+ /* set these pattern's lengths: one less than each real length */
+ val = plen0 | (plen1 << 8) | (plen2 << 16) | WKUP_LEN_ENABLE_THREE;
+ bw32(bp, B44_WKUP_LEN, val);
+
+ /* enable wakeup pattern matching */
+ val = br32(bp, B44_DEVCTRL);
+ bw32(bp, B44_DEVCTRL, val | DEVCTRL_PFE);
+
+}
+
+static void b44_setup_wol(struct b44 *bp)
+{
+ u32 val;
+ u16 pmval;
+
+ bw32(bp, B44_RXCONFIG, RXCONFIG_ALLMULTI);
+
+ if (bp->flags & B44_FLAG_B0_ANDLATER) {
+
+ bw32(bp, B44_WKUP_LEN, WKUP_LEN_DISABLE);
+
+ val = bp->dev->dev_addr[2] << 24 |
+ bp->dev->dev_addr[3] << 16 |
+ bp->dev->dev_addr[4] << 8 |
+ bp->dev->dev_addr[5];
+ bw32(bp, B44_ADDR_LO, val);
+
+ val = bp->dev->dev_addr[0] << 8 |
+ bp->dev->dev_addr[1];
+ bw32(bp, B44_ADDR_HI, val);
+
+ val = br32(bp, B44_DEVCTRL);
+ bw32(bp, B44_DEVCTRL, val | DEVCTRL_MPM | DEVCTRL_PFE);
+
+ } else {
+ b44_setup_pseudo_magicp(bp);
+ }
+
+ val = br32(bp, B44_SBTMSLOW);
+ bw32(bp, B44_SBTMSLOW, val | SBTMSLOW_PE);
+
+ pci_read_config_word(bp->pdev, SSB_PMCSR, &pmval);
+ pci_write_config_word(bp->pdev, SSB_PMCSR, pmval | SSB_PE);
+
+}
+
static int b44_close(struct net_device *dev)
{
struct b44 *bp = netdev_priv(dev);
@@ -1475,6 +1625,11 @@ static int b44_close(struct net_device *dev)
netif_poll_enable(dev);
+ if (bp->flags & B44_FLAG_WOL_ENABLE) {
+ b44_init_hw(bp, 0);
+ b44_setup_wol(bp);
+ }
+
b44_free_consistent(bp);
return 0;
@@ -1620,8 +1775,6 @@ static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct b44 *bp = netdev_priv(dev);
- if (!netif_running(dev))
- return -EAGAIN;
cmd->supported = (SUPPORTED_Autoneg);
cmd->supported |= (SUPPORTED_100baseT_Half |
SUPPORTED_100baseT_Full |
@@ -1649,6 +1802,12 @@ static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
XCVR_INTERNAL : XCVR_EXTERNAL;
cmd->autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
AUTONEG_DISABLE : AUTONEG_ENABLE;
+ if (cmd->autoneg == AUTONEG_ENABLE)
+ cmd->advertising |= ADVERTISED_Autoneg;
+ if (!netif_running(dev)){
+ cmd->speed = 0;
+ cmd->duplex = 0xff;
+ }
cmd->maxtxpkt = 0;
cmd->maxrxpkt = 0;
return 0;
@@ -1658,9 +1817,6 @@ static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct b44 *bp = netdev_priv(dev);
- if (!netif_running(dev))
- return -EAGAIN;
-
/* We do not support gigabit. */
if (cmd->autoneg == AUTONEG_ENABLE) {
if (cmd->advertising &
@@ -1677,28 +1833,39 @@ static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
spin_lock_irq(&bp->lock);
if (cmd->autoneg == AUTONEG_ENABLE) {
- bp->flags &= ~B44_FLAG_FORCE_LINK;
- bp->flags &= ~(B44_FLAG_ADV_10HALF |
+ bp->flags &= ~(B44_FLAG_FORCE_LINK |
+ B44_FLAG_100_BASE_T |
+ B44_FLAG_FULL_DUPLEX |
+ B44_FLAG_ADV_10HALF |
B44_FLAG_ADV_10FULL |
B44_FLAG_ADV_100HALF |
B44_FLAG_ADV_100FULL);
- if (cmd->advertising & ADVERTISE_10HALF)
- bp->flags |= B44_FLAG_ADV_10HALF;
- if (cmd->advertising & ADVERTISE_10FULL)
- bp->flags |= B44_FLAG_ADV_10FULL;
- if (cmd->advertising & ADVERTISE_100HALF)
- bp->flags |= B44_FLAG_ADV_100HALF;
- if (cmd->advertising & ADVERTISE_100FULL)
- bp->flags |= B44_FLAG_ADV_100FULL;
+ if (cmd->advertising == 0) {
+ bp->flags |= (B44_FLAG_ADV_10HALF |
+ B44_FLAG_ADV_10FULL |
+ B44_FLAG_ADV_100HALF |
+ B44_FLAG_ADV_100FULL);
+ } else {
+ if (cmd->advertising & ADVERTISED_10baseT_Half)
+ bp->flags |= B44_FLAG_ADV_10HALF;
+ if (cmd->advertising & ADVERTISED_10baseT_Full)
+ bp->flags |= B44_FLAG_ADV_10FULL;
+ if (cmd->advertising & ADVERTISED_100baseT_Half)
+ bp->flags |= B44_FLAG_ADV_100HALF;
+ if (cmd->advertising & ADVERTISED_100baseT_Full)
+ bp->flags |= B44_FLAG_ADV_100FULL;
+ }
} else {
bp->flags |= B44_FLAG_FORCE_LINK;
+ bp->flags &= ~(B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX);
if (cmd->speed == SPEED_100)
bp->flags |= B44_FLAG_100_BASE_T;
if (cmd->duplex == DUPLEX_FULL)
bp->flags |= B44_FLAG_FULL_DUPLEX;
}
- b44_setup_phy(bp);
+ if (netif_running(dev))
+ b44_setup_phy(bp);
spin_unlock_irq(&bp->lock);
@@ -1734,7 +1901,7 @@ static int b44_set_ringparam(struct net_device *dev,
b44_halt(bp);
b44_init_rings(bp);
- b44_init_hw(bp);
+ b44_init_hw(bp, 1);
netif_wake_queue(bp->dev);
spin_unlock_irq(&bp->lock);
@@ -1777,7 +1944,7 @@ static int b44_set_pauseparam(struct net_device *dev,
if (bp->flags & B44_FLAG_PAUSE_AUTO) {
b44_halt(bp);
b44_init_rings(bp);
- b44_init_hw(bp);
+ b44_init_hw(bp, 1);
} else {
__b44_set_flow_ctrl(bp, bp->flags);
}
@@ -1819,12 +1986,40 @@ static void b44_get_ethtool_stats(struct net_device *dev,
spin_unlock_irq(&bp->lock);
}
+static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct b44 *bp = netdev_priv(dev);
+
+ wol->supported = WAKE_MAGIC;
+ if (bp->flags & B44_FLAG_WOL_ENABLE)
+ wol->wolopts = WAKE_MAGIC;
+ else
+ wol->wolopts = 0;
+ memset(&wol->sopass, 0, sizeof(wol->sopass));
+}
+
+static int b44_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct b44 *bp = netdev_priv(dev);
+
+ spin_lock_irq(&bp->lock);
+ if (wol->wolopts & WAKE_MAGIC)
+ bp->flags |= B44_FLAG_WOL_ENABLE;
+ else
+ bp->flags &= ~B44_FLAG_WOL_ENABLE;
+ spin_unlock_irq(&bp->lock);
+
+ return 0;
+}
+
static struct ethtool_ops b44_ethtool_ops = {
.get_drvinfo = b44_get_drvinfo,
.get_settings = b44_get_settings,
.set_settings = b44_set_settings,
.nway_reset = b44_nway_reset,
.get_link = ethtool_op_get_link,
+ .get_wol = b44_get_wol,
+ .set_wol = b44_set_wol,
.get_ringparam = b44_get_ringparam,
.set_ringparam = b44_set_ringparam,
.get_pauseparam = b44_get_pauseparam,
@@ -1903,6 +2098,10 @@ static int __devinit b44_get_invariants(struct b44 *bp)
/* XXX - really required?
bp->flags |= B44_FLAG_BUGGY_TXPTR;
*/
+
+ if (ssb_get_core_rev(bp) >= 7)
+ bp->flags |= B44_FLAG_B0_ANDLATER;
+
out:
return err;
}
@@ -2103,6 +2302,10 @@ static int b44_suspend(struct pci_dev *pdev, pm_message_t state)
spin_unlock_irq(&bp->lock);
free_irq(dev->irq, dev);
+ if (bp->flags & B44_FLAG_WOL_ENABLE) {
+ b44_init_hw(bp, 0);
+ b44_setup_wol(bp);
+ }
pci_disable_device(pdev);
return 0;
}
@@ -2125,7 +2328,7 @@ static int b44_resume(struct pci_dev *pdev)
spin_lock_irq(&bp->lock);
b44_init_rings(bp);
- b44_init_hw(bp);
+ b44_init_hw(bp, 1);
netif_device_attach(bp->dev);
spin_unlock_irq(&bp->lock);
diff --git a/drivers/net/b44.h b/drivers/net/b44.h
index b178662978f3..4944507fad23 100644
--- a/drivers/net/b44.h
+++ b/drivers/net/b44.h
@@ -24,6 +24,9 @@
#define WKUP_LEN_P3_MASK 0x7f000000 /* Pattern 3 */
#define WKUP_LEN_P3_SHIFT 24
#define WKUP_LEN_D3 0x80000000
+#define WKUP_LEN_DISABLE 0x80808080
+#define WKUP_LEN_ENABLE_TWO 0x80800000
+#define WKUP_LEN_ENABLE_THREE 0x80000000
#define B44_ISTAT 0x0020UL /* Interrupt Status */
#define ISTAT_LS 0x00000020 /* Link Change (B0 only) */
#define ISTAT_PME 0x00000040 /* Power Management Event */
@@ -264,6 +267,8 @@
#define SBIDHIGH_VC_SHIFT 16
/* SSB PCI config space registers. */
+#define SSB_PMCSR 0x44
+#define SSB_PE 0x100
#define SSB_BAR0_WIN 0x80
#define SSB_BAR1_WIN 0x84
#define SSB_SPROM_CONTROL 0x88
@@ -420,6 +425,7 @@ struct b44 {
u32 dma_offset;
u32 flags;
+#define B44_FLAG_B0_ANDLATER 0x00000001
#define B44_FLAG_BUGGY_TXPTR 0x00000002
#define B44_FLAG_REORDER_BUG 0x00000004
#define B44_FLAG_PAUSE_AUTO 0x00008000
@@ -435,6 +441,7 @@ struct b44 {
#define B44_FLAG_INTERNAL_PHY 0x10000000
#define B44_FLAG_RX_RING_HACK 0x20000000
#define B44_FLAG_TX_RING_HACK 0x40000000
+#define B44_FLAG_WOL_ENABLE 0x80000000
u32 rx_offset;
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 702d546567ad..7635736cc791 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -1640,7 +1640,7 @@ bnx2_tx_int(struct bnx2 *bp)
skb = tx_buf->skb;
#ifdef BCM_TSO
/* partial BD completions possible with TSO packets */
- if (skb_shinfo(skb)->tso_size) {
+ if (skb_shinfo(skb)->gso_size) {
u16 last_idx, last_ring_idx;
last_idx = sw_cons +
@@ -4428,7 +4428,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
(TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
}
#ifdef BCM_TSO
- if ((mss = skb_shinfo(skb)->tso_size) &&
+ if ((mss = skb_shinfo(skb)->gso_size) &&
(skb->len > (bp->dev->mtu + ETH_HLEN))) {
u32 tcp_opt_len, ip_tcp_len;
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index 39f36aa05aa8..565a54f1d06a 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -2915,8 +2915,7 @@ static int cas_start_xmit(struct sk_buff *skb, struct net_device *dev)
*/
static int ring;
- skb = skb_padto(skb, cp->min_frame_size);
- if (!skb)
+ if (skb_padto(skb, cp->min_frame_size))
return 0;
/* XXX: we need some higher-level QoS hooks to steer packets to
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c
index 4391bf4bf573..53efff6da784 100644
--- a/drivers/net/chelsio/sge.c
+++ b/drivers/net/chelsio/sge.c
@@ -1418,7 +1418,7 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct cpl_tx_pkt *cpl;
#ifdef NETIF_F_TSO
- if (skb_shinfo(skb)->tso_size) {
+ if (skb_shinfo(skb)->gso_size) {
int eth_type;
struct cpl_tx_pkt_lso *hdr;
@@ -1433,7 +1433,7 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
hdr->ip_hdr_words = skb->nh.iph->ihl;
hdr->tcp_hdr_words = skb->h.th->doff;
hdr->eth_type_mss = htons(MK_ETH_TYPE_MSS(eth_type,
- skb_shinfo(skb)->tso_size));
+ skb_shinfo(skb)->gso_size));
hdr->len = htonl(skb->len - sizeof(*hdr));
cpl = (struct cpl_tx_pkt *)hdr;
sge->stats.tx_lso_pkts++;
diff --git a/drivers/net/declance.c b/drivers/net/declance.c
index f130bdab3fd3..d3d958e7ac56 100644
--- a/drivers/net/declance.c
+++ b/drivers/net/declance.c
@@ -885,8 +885,7 @@ static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
len = skblen;
if (len < ETH_ZLEN) {
- skb = skb_padto(skb, ETH_ZLEN);
- if (skb == NULL)
+ if (skb_padto(skb, ETH_ZLEN))
return 0;
len = ETH_ZLEN;
}
diff --git a/drivers/net/depca.c b/drivers/net/depca.c
index 0941d40f046f..e946c43d3b10 100644
--- a/drivers/net/depca.c
+++ b/drivers/net/depca.c
@@ -938,11 +938,8 @@ static int depca_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (skb->len < 1)
goto out;
- if (skb->len < ETH_ZLEN) {
- skb = skb_padto(skb, ETH_ZLEN);
- if (skb == NULL)
- goto out;
- }
+ if (skb_padto(skb, ETH_ZLEN))
+ goto out;
netif_stop_queue(dev);
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index a373ccb308d8..32b7d444b374 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -2394,7 +2394,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
uint8_t ipcss, ipcso, tucss, tucso, hdr_len;
int err;
- if (skb_shinfo(skb)->tso_size) {
+ if (skb_shinfo(skb)->gso_size) {
if (skb_header_cloned(skb)) {
err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
if (err)
@@ -2402,7 +2402,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
}
hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
- mss = skb_shinfo(skb)->tso_size;
+ mss = skb_shinfo(skb)->gso_size;
if (skb->protocol == htons(ETH_P_IP)) {
skb->nh.iph->tot_len = 0;
skb->nh.iph->check = 0;
@@ -2519,7 +2519,7 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
* tso gets written back prematurely before the data is fully
* DMA'd to the controller */
if (!skb->data_len && tx_ring->last_tx_tso &&
- !skb_shinfo(skb)->tso_size) {
+ !skb_shinfo(skb)->gso_size) {
tx_ring->last_tx_tso = 0;
size -= 4;
}
@@ -2757,7 +2757,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
}
#ifdef NETIF_F_TSO
- mss = skb_shinfo(skb)->tso_size;
+ mss = skb_shinfo(skb)->gso_size;
/* The controller does a simple calculation to
* make sure there is enough room in the FIFO before
* initiating the DMA for each buffer. The calc is:
@@ -2807,7 +2807,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
#ifdef NETIF_F_TSO
/* Controller Erratum workaround */
if (!skb->data_len && tx_ring->last_tx_tso &&
- !skb_shinfo(skb)->tso_size)
+ !skb_shinfo(skb)->gso_size)
count++;
#endif
diff --git a/drivers/net/eepro.c b/drivers/net/eepro.c
index a806dfe54d23..e70f172699db 100644
--- a/drivers/net/eepro.c
+++ b/drivers/net/eepro.c
@@ -1154,8 +1154,7 @@ static int eepro_send_packet(struct sk_buff *skb, struct net_device *dev)
printk(KERN_DEBUG "%s: entering eepro_send_packet routine.\n", dev->name);
if (length < ETH_ZLEN) {
- skb = skb_padto(skb, ETH_ZLEN);
- if (skb == NULL)
+ if (skb_padto(skb, ETH_ZLEN))
return 0;
length = ETH_ZLEN;
}
diff --git a/drivers/net/eexpress.c b/drivers/net/eexpress.c
index 82bd356e4f3a..a74b20715755 100644
--- a/drivers/net/eexpress.c
+++ b/drivers/net/eexpress.c
@@ -677,8 +677,7 @@ static int eexp_xmit(struct sk_buff *buf, struct net_device *dev)
#endif
if (buf->len < ETH_ZLEN) {
- buf = skb_padto(buf, ETH_ZLEN);
- if (buf == NULL)
+ if (skb_padto(buf, ETH_ZLEN))
return 0;
length = ETH_ZLEN;
}
diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c
index 8d680ce600d7..724d7dc35fa3 100644
--- a/drivers/net/epic100.c
+++ b/drivers/net/epic100.c
@@ -1027,11 +1027,8 @@ static int epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
u32 ctrl_word;
unsigned long flags;
- if (skb->len < ETH_ZLEN) {
- skb = skb_padto(skb, ETH_ZLEN);
- if (skb == NULL)
- return 0;
- }
+ if (skb_padto(skb, ETH_ZLEN))
+ return 0;
/* Caution: the write order is important here, set the field with the
"ownership" bit last. */
diff --git a/drivers/net/eth16i.c b/drivers/net/eth16i.c
index b67545be2caa..4bf76f86d8e9 100644
--- a/drivers/net/eth16i.c
+++ b/drivers/net/eth16i.c
@@ -1064,8 +1064,7 @@ static int eth16i_tx(struct sk_buff *skb, struct net_device *dev)
unsigned long flags;
if (length < ETH_ZLEN) {
- skb = skb_padto(skb, ETH_ZLEN);
- if (skb == NULL)
+ if (skb_padto(skb, ETH_ZLEN))
return 0;
length = ETH_ZLEN;
}
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 62b38a4494b8..21be4fa071b5 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -1495,8 +1495,8 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
np->tx_skbuff[nr] = skb;
#ifdef NETIF_F_TSO
- if (skb_shinfo(skb)->tso_size)
- tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->tso_size << NV_TX2_TSO_SHIFT);
+ if (skb_shinfo(skb)->gso_size)
+ tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
else
#endif
tx_flags_extra = (skb->ip_summed == CHECKSUM_HW ? (NV_TX2_CHECKSUM_L3|NV_TX2_CHECKSUM_L4) : 0);
@@ -2076,7 +2076,7 @@ static void nv_set_multicast(struct net_device *dev)
spin_unlock_irq(&np->lock);
}
-void nv_update_pause(struct net_device *dev, u32 pause_flags)
+static void nv_update_pause(struct net_device *dev, u32 pause_flags)
{
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
diff --git a/drivers/net/hp100.c b/drivers/net/hp100.c
index 247c8ca86033..dd1dc32dc98d 100644
--- a/drivers/net/hp100.c
+++ b/drivers/net/hp100.c
@@ -1487,11 +1487,8 @@ static int hp100_start_xmit_bm(struct sk_buff *skb, struct net_device *dev)
if (skb->len <= 0)
return 0;
- if (skb->len < ETH_ZLEN && lp->chip == HP100_CHIPID_SHASTA) {
- skb = skb_padto(skb, ETH_ZLEN);
- if (skb == NULL)
- return 0;
- }
+ if (lp->chip == HP100_CHIPID_SHASTA && skb_padto(skb, ETH_ZLEN))
+ return 0;
/* Get Tx ring tail pointer */
if (lp->txrtail->next == lp->txrhead) {
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index 666346f6469e..4c2e7279ba34 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -61,7 +61,7 @@
#undef DEBUG
#define ibmveth_printk(fmt, args...) \
- printk(KERN_INFO "%s: " fmt, __FILE__, ## args)
+ printk(KERN_DEBUG "%s: " fmt, __FILE__, ## args)
#define ibmveth_error_printk(fmt, args...) \
printk(KERN_ERR "(%s:%3.3d ua:%x) ERROR: " fmt, __FILE__, __LINE__ , adapter->vdev->unit_address, ## args)
diff --git a/drivers/net/ioc3-eth.c b/drivers/net/ioc3-eth.c
index ae71ed57c12d..e76e6e7be0b1 100644
--- a/drivers/net/ioc3-eth.c
+++ b/drivers/net/ioc3-eth.c
@@ -145,7 +145,7 @@ static inline struct sk_buff * ioc3_alloc_skb(unsigned long length,
static inline unsigned long ioc3_map(void *ptr, unsigned long vdev)
{
#ifdef CONFIG_SGI_IP27
- vdev <<= 58; /* Shift to PCI64_ATTR_VIRTUAL */
+ vdev <<= 57; /* Shift to PCI64_ATTR_VIRTUAL */
return vdev | (0xaUL << PCI64_ATTR_TARG_SHFT) | PCI64_ATTR_PREF |
((unsigned long)ptr & TO_PHYS_MASK);
diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
index f0f04be989d6..93394d76587a 100644
--- a/drivers/net/iseries_veth.c
+++ b/drivers/net/iseries_veth.c
@@ -69,6 +69,7 @@
#include <linux/delay.h>
#include <linux/mm.h>
#include <linux/ethtool.h>
+#include <linux/if_ether.h>
#include <asm/abs_addr.h>
#include <asm/iseries/mf.h>
@@ -1035,11 +1036,22 @@ static struct ethtool_ops ops = {
.get_link = veth_get_link,
};
-static struct net_device * __init veth_probe_one(int vlan, struct device *vdev)
+static struct net_device * __init veth_probe_one(int vlan,
+ struct vio_dev *vio_dev)
{
struct net_device *dev;
struct veth_port *port;
+ struct device *vdev = &vio_dev->dev;
int i, rc;
+ const unsigned char *mac_addr;
+
+ mac_addr = vio_get_attribute(vio_dev, "local-mac-address", NULL);
+ if (mac_addr == NULL)
+ mac_addr = vio_get_attribute(vio_dev, "mac-address", NULL);
+ if (mac_addr == NULL) {
+ veth_error("Unable to fetch MAC address from device tree.\n");
+ return NULL;
+ }
dev = alloc_etherdev(sizeof (struct veth_port));
if (! dev) {
@@ -1064,16 +1076,11 @@ static struct net_device * __init veth_probe_one(int vlan, struct device *vdev)
}
port->dev = vdev;
- dev->dev_addr[0] = 0x02;
- dev->dev_addr[1] = 0x01;
- dev->dev_addr[2] = 0xff;
- dev->dev_addr[3] = vlan;
- dev->dev_addr[4] = 0xff;
- dev->dev_addr[5] = this_lp;
+ memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
dev->mtu = VETH_MAX_MTU;
- memcpy(&port->mac_addr, dev->dev_addr, 6);
+ memcpy(&port->mac_addr, mac_addr, ETH_ALEN);
dev->open = veth_open;
dev->hard_start_xmit = veth_start_xmit;
@@ -1608,7 +1615,7 @@ static int veth_probe(struct vio_dev *vdev, const struct vio_device_id *id)
struct net_device *dev;
struct veth_port *port;
- dev = veth_probe_one(i, &vdev->dev);
+ dev = veth_probe_one(i, vdev);
if (dev == NULL) {
veth_remove(vdev);
return 1;
@@ -1641,7 +1648,7 @@ static int veth_probe(struct vio_dev *vdev, const struct vio_device_id *id)
* support.
*/
static struct vio_device_id veth_device_table[] __devinitdata = {
- { "vlan", "" },
+ { "network", "IBM,iSeries-l-lan" },
{ "", "" }
};
MODULE_DEVICE_TABLE(vio, veth_device_table);
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index 57006fb8840e..8bb32f946993 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -1173,7 +1173,7 @@ ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
uint16_t ipcse, tucse, mss;
int err;
- if(likely(skb_shinfo(skb)->tso_size)) {
+ if(likely(skb_shinfo(skb)->gso_size)) {
if (skb_header_cloned(skb)) {
err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
if (err)
@@ -1181,7 +1181,7 @@ ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
}
hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
- mss = skb_shinfo(skb)->tso_size;
+ mss = skb_shinfo(skb)->gso_size;
skb->nh.iph->tot_len = 0;
skb->nh.iph->check = 0;
skb->h.th->check = ~csum_tcpudp_magic(skb->nh.iph->saddr,
diff --git a/drivers/net/lance.c b/drivers/net/lance.c
index bb5ad479210b..c1c3452c90ca 100644
--- a/drivers/net/lance.c
+++ b/drivers/net/lance.c
@@ -968,8 +968,7 @@ static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* The old LANCE chips doesn't automatically pad buffers to min. size. */
if (chip_table[lp->chip_version].flags & LANCE_MUST_PAD) {
if (skb->len < ETH_ZLEN) {
- skb = skb_padto(skb, ETH_ZLEN);
- if (skb == NULL)
+ if (skb_padto(skb, ETH_ZLEN))
goto out;
lp->tx_ring[entry].length = -ETH_ZLEN;
}
diff --git a/drivers/net/lasi_82596.c b/drivers/net/lasi_82596.c
index 957888de3d7e..1ab09447baa5 100644
--- a/drivers/net/lasi_82596.c
+++ b/drivers/net/lasi_82596.c
@@ -1083,8 +1083,7 @@ static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb->len, skb->data));
if (length < ETH_ZLEN) {
- skb = skb_padto(skb, ETH_ZLEN);
- if (skb == NULL)
+ if (skb_padto(skb, ETH_ZLEN))
return 0;
length = ETH_ZLEN;
}
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index b79d6e8d3045..43fef7de8cb9 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -74,7 +74,7 @@ static void emulate_large_send_offload(struct sk_buff *skb)
struct iphdr *iph = skb->nh.iph;
struct tcphdr *th = (struct tcphdr*)(skb->nh.raw + (iph->ihl * 4));
unsigned int doffset = (iph->ihl + th->doff) * 4;
- unsigned int mtu = skb_shinfo(skb)->tso_size + doffset;
+ unsigned int mtu = skb_shinfo(skb)->gso_size + doffset;
unsigned int offset = 0;
u32 seq = ntohl(th->seq);
u16 id = ntohs(iph->id);
@@ -139,7 +139,7 @@ static int loopback_xmit(struct sk_buff *skb, struct net_device *dev)
#endif
#ifdef LOOPBACK_TSO
- if (skb_shinfo(skb)->tso_size) {
+ if (skb_shinfo(skb)->gso_size) {
BUG_ON(skb->protocol != htons(ETH_P_IP));
BUG_ON(skb->nh.iph->protocol != IPPROTO_TCP);
diff --git a/drivers/net/lp486e.c b/drivers/net/lp486e.c
index 94d5ea1ce8bd..bf3f343ae715 100644
--- a/drivers/net/lp486e.c
+++ b/drivers/net/lp486e.c
@@ -877,8 +877,7 @@ static int i596_start_xmit (struct sk_buff *skb, struct net_device *dev) {
length = skb->len;
if (length < ETH_ZLEN) {
- skb = skb_padto(skb, ETH_ZLEN);
- if (skb == NULL)
+ if (skb_padto(skb, ETH_ZLEN))
return 0;
length = ETH_ZLEN;
}
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index 1a2b9785e998..dbdf189436fa 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -1879,7 +1879,7 @@ again:
#ifdef NETIF_F_TSO
if (skb->len > (dev->mtu + ETH_HLEN)) {
- mss = skb_shinfo(skb)->tso_size;
+ mss = skb_shinfo(skb)->gso_size;
if (mss != 0)
max_segments = MYRI10GE_MAX_SEND_DESC_TSO;
}
@@ -1939,8 +1939,7 @@ again:
/* pad frames to at least ETH_ZLEN bytes */
if (unlikely(skb->len < ETH_ZLEN)) {
- skb = skb_padto(skb, ETH_ZLEN);
- if (skb == NULL) {
+ if (skb_padto(skb, ETH_ZLEN)) {
/* The packet is gone, so we must
* return 0 */
mgp->stats.tx_dropped += 1;
@@ -2113,7 +2112,7 @@ abort_linearize:
}
idx = (idx + 1) & tx->mask;
} while (idx != last_idx);
- if (skb_shinfo(skb)->tso_size) {
+ if (skb_shinfo(skb)->gso_size) {
printk(KERN_ERR
"myri10ge: %s: TSO but wanted to linearize?!?!?\n",
mgp->dev->name);
@@ -2251,12 +2250,6 @@ static void myri10ge_enable_ecrc(struct myri10ge_priv *mgp)
}
cap = pci_find_ext_capability(bridge, PCI_EXT_CAP_ID_ERR);
- /* nvidia ext cap is not always linked in ext cap chain */
- if (!cap
- && bridge->vendor == PCI_VENDOR_ID_NVIDIA
- && bridge->device == PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_PCIE)
- cap = 0x160;
-
if (!cap)
return;
@@ -2732,8 +2725,6 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Save configuration space to be restored if the
* nic resets due to a parity error */
myri10ge_save_state(mgp);
- /* Restore state immediately since pci_save_msi_state disables MSI */
- myri10ge_restore_state(mgp);
/* Setup the watchdog timer */
setup_timer(&mgp->watchdog_timer, myri10ge_watchdog_timer,
diff --git a/drivers/net/netx-eth.c b/drivers/net/netx-eth.c
new file mode 100644
index 000000000000..b92430c4e3ac
--- /dev/null
+++ b/drivers/net/netx-eth.c
@@ -0,0 +1,516 @@
+/*
+ * drivers/net/netx-eth.c
+ *
+ * Copyright (c) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+
+#include <linux/netdevice.h>
+#include <linux/platform_device.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/mii.h>
+
+#include <asm/io.h>
+#include <asm/hardware.h>
+#include <asm/arch/hardware.h>
+#include <asm/arch/netx-regs.h>
+#include <asm/arch/pfifo.h>
+#include <asm/arch/xc.h>
+#include <asm/arch/eth.h>
+
+/* XC Fifo Offsets */
+#define EMPTY_PTR_FIFO(xcno) (0 + ((xcno) << 3)) /* Index of the empty pointer FIFO */
+#define IND_FIFO_PORT_HI(xcno) (1 + ((xcno) << 3)) /* Index of the FIFO where received */
+ /* Data packages are indicated by XC */
+#define IND_FIFO_PORT_LO(xcno) (2 + ((xcno) << 3)) /* Index of the FIFO where received */
+ /* Data packages are indicated by XC */
+#define REQ_FIFO_PORT_HI(xcno) (3 + ((xcno) << 3)) /* Index of the FIFO where Data packages */
+ /* have to be indicated by ARM which */
+ /* shall be sent */
+#define REQ_FIFO_PORT_LO(xcno) (4 + ((xcno) << 3)) /* Index of the FIFO where Data packages */
+ /* have to be indicated by ARM which shall */
+ /* be sent */
+#define CON_FIFO_PORT_HI(xcno) (5 + ((xcno) << 3)) /* Index of the FIFO where sent Data packages */
+ /* are confirmed */
+#define CON_FIFO_PORT_LO(xcno) (6 + ((xcno) << 3)) /* Index of the FIFO where sent Data */
+ /* packages are confirmed */
+#define PFIFO_MASK(xcno) (0x7f << (xcno*8))
+
+#define FIFO_PTR_FRAMELEN_SHIFT 0
+#define FIFO_PTR_FRAMELEN_MASK (0x7ff << 0)
+#define FIFO_PTR_FRAMELEN(len) (((len) << 0) & FIFO_PTR_FRAMELEN_MASK)
+#define FIFO_PTR_TIMETRIG (1<<11)
+#define FIFO_PTR_MULTI_REQ
+#define FIFO_PTR_ORIGIN (1<<14)
+#define FIFO_PTR_VLAN (1<<15)
+#define FIFO_PTR_FRAMENO_SHIFT 16
+#define FIFO_PTR_FRAMENO_MASK (0x3f << 16)
+#define FIFO_PTR_FRAMENO(no) (((no) << 16) & FIFO_PTR_FRAMENO_MASK)
+#define FIFO_PTR_SEGMENT_SHIFT 22
+#define FIFO_PTR_SEGMENT_MASK (0xf << 22)
+#define FIFO_PTR_SEGMENT(seg) (((seg) & 0xf) << 22)
+#define FIFO_PTR_ERROR_SHIFT 28
+#define FIFO_PTR_ERROR_MASK (0xf << 28)
+
+#define ISR_LINK_STATUS_CHANGE (1<<4)
+#define ISR_IND_LO (1<<3)
+#define ISR_CON_LO (1<<2)
+#define ISR_IND_HI (1<<1)
+#define ISR_CON_HI (1<<0)
+
+#define ETH_MAC_LOCAL_CONFIG 0x1560
+#define ETH_MAC_4321 0x1564
+#define ETH_MAC_65 0x1568
+
+#define MAC_TRAFFIC_CLASS_ARRANGEMENT_SHIFT 16
+#define MAC_TRAFFIC_CLASS_ARRANGEMENT_MASK (0xf<<MAC_TRAFFIC_CLASS_ARRANGEMENT_SHIFT)
+#define MAC_TRAFFIC_CLASS_ARRANGEMENT(x) (((x)<<MAC_TRAFFIC_CLASS_ARRANGEMENT_SHIFT) & MAC_TRAFFIC_CLASS_ARRANGEMENT_MASK)
+#define LOCAL_CONFIG_LINK_STATUS_IRQ_EN (1<<24)
+#define LOCAL_CONFIG_CON_LO_IRQ_EN (1<<23)
+#define LOCAL_CONFIG_CON_HI_IRQ_EN (1<<22)
+#define LOCAL_CONFIG_IND_LO_IRQ_EN (1<<21)
+#define LOCAL_CONFIG_IND_HI_IRQ_EN (1<<20)
+
+#define CARDNAME "netx-eth"
+
+/* LSB must be zero */
+#define INTERNAL_PHY_ADR 0x1c
+
+struct netx_eth_priv {
+ void __iomem *sram_base, *xpec_base, *xmac_base;
+ int id;
+ struct net_device_stats stats;
+ struct mii_if_info mii;
+ u32 msg_enable;
+ struct xc *xc;
+ spinlock_t lock;
+};
+
+static void netx_eth_set_multicast_list(struct net_device *ndev)
+{
+ /* implement me */
+}
+
+static int
+netx_eth_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct netx_eth_priv *priv = netdev_priv(ndev);
+ unsigned char *buf = skb->data;
+ unsigned int len = skb->len;
+
+ spin_lock_irq(&priv->lock);
+ memcpy_toio(priv->sram_base + 1560, (void *)buf, len);
+ if (len < 60) {
+ memset_io(priv->sram_base + 1560 + len, 0, 60 - len);
+ len = 60;
+ }
+
+ pfifo_push(REQ_FIFO_PORT_LO(priv->id),
+ FIFO_PTR_SEGMENT(priv->id) |
+ FIFO_PTR_FRAMENO(1) |
+ FIFO_PTR_FRAMELEN(len));
+
+ ndev->trans_start = jiffies;
+ priv->stats.tx_packets++;
+ priv->stats.tx_bytes += skb->len;
+
+ netif_stop_queue(ndev);
+ spin_unlock_irq(&priv->lock);
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+static void netx_eth_receive(struct net_device *ndev)
+{
+ struct netx_eth_priv *priv = netdev_priv(ndev);
+ unsigned int val, frameno, seg, len;
+ unsigned char *data;
+ struct sk_buff *skb;
+
+ val = pfifo_pop(IND_FIFO_PORT_LO(priv->id));
+
+ frameno = (val & FIFO_PTR_FRAMENO_MASK) >> FIFO_PTR_FRAMENO_SHIFT;
+ seg = (val & FIFO_PTR_SEGMENT_MASK) >> FIFO_PTR_SEGMENT_SHIFT;
+ len = (val & FIFO_PTR_FRAMELEN_MASK) >> FIFO_PTR_FRAMELEN_SHIFT;
+
+ skb = dev_alloc_skb(len);
+ if (unlikely(skb == NULL)) {
+ printk(KERN_NOTICE "%s: Low memory, packet dropped.\n",
+ ndev->name);
+ priv->stats.rx_dropped++;
+ return;
+ }
+
+ data = skb_put(skb, len);
+
+ memcpy_fromio(data, priv->sram_base + frameno * 1560, len);
+
+ pfifo_push(EMPTY_PTR_FIFO(priv->id),
+ FIFO_PTR_SEGMENT(seg) | FIFO_PTR_FRAMENO(frameno));
+
+ ndev->last_rx = jiffies;
+ skb->dev = ndev;
+ skb->protocol = eth_type_trans(skb, ndev);
+ netif_rx(skb);
+ priv->stats.rx_packets++;
+ priv->stats.rx_bytes += len;
+}
+
+static irqreturn_t
+netx_eth_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *ndev = dev_id;
+ struct netx_eth_priv *priv = netdev_priv(ndev);
+ int status;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ status = readl(NETX_PFIFO_XPEC_ISR(priv->id));
+ while (status) {
+ int fill_level;
+ writel(status, NETX_PFIFO_XPEC_ISR(priv->id));
+
+ if ((status & ISR_CON_HI) || (status & ISR_IND_HI))
+ printk("%s: unexpected status: 0x%08x\n",
+ __FUNCTION__, status);
+
+ fill_level =
+ readl(NETX_PFIFO_FILL_LEVEL(IND_FIFO_PORT_LO(priv->id)));
+ while (fill_level--)
+ netx_eth_receive(ndev);
+
+ if (status & ISR_CON_LO)
+ netif_wake_queue(ndev);
+
+ if (status & ISR_LINK_STATUS_CHANGE)
+ mii_check_media(&priv->mii, netif_msg_link(priv), 1);
+
+ status = readl(NETX_PFIFO_XPEC_ISR(priv->id));
+ }
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return IRQ_HANDLED;
+}
+
+static struct net_device_stats *netx_eth_query_statistics(struct net_device *ndev)
+{
+ struct netx_eth_priv *priv = netdev_priv(ndev);
+ return &priv->stats;
+}
+
+static int netx_eth_open(struct net_device *ndev)
+{
+ struct netx_eth_priv *priv = netdev_priv(ndev);
+
+ if (request_irq
+ (ndev->irq, &netx_eth_interrupt, SA_SHIRQ, ndev->name, ndev))
+ return -EAGAIN;
+
+ writel(ndev->dev_addr[0] |
+ ndev->dev_addr[1]<<8 |
+ ndev->dev_addr[2]<<16 |
+ ndev->dev_addr[3]<<24,
+ priv->xpec_base + NETX_XPEC_RAM_START_OFS + ETH_MAC_4321);
+ writel(ndev->dev_addr[4] |
+ ndev->dev_addr[5]<<8,
+ priv->xpec_base + NETX_XPEC_RAM_START_OFS + ETH_MAC_65);
+
+ writel(LOCAL_CONFIG_LINK_STATUS_IRQ_EN |
+ LOCAL_CONFIG_CON_LO_IRQ_EN |
+ LOCAL_CONFIG_CON_HI_IRQ_EN |
+ LOCAL_CONFIG_IND_LO_IRQ_EN |
+ LOCAL_CONFIG_IND_HI_IRQ_EN,
+ priv->xpec_base + NETX_XPEC_RAM_START_OFS +
+ ETH_MAC_LOCAL_CONFIG);
+
+ mii_check_media(&priv->mii, netif_msg_link(priv), 1);
+ netif_start_queue(ndev);
+
+ return 0;
+}
+
+static int netx_eth_close(struct net_device *ndev)
+{
+ struct netx_eth_priv *priv = netdev_priv(ndev);
+
+ netif_stop_queue(ndev);
+
+ writel(0,
+ priv->xpec_base + NETX_XPEC_RAM_START_OFS + ETH_MAC_LOCAL_CONFIG);
+
+ free_irq(ndev->irq, ndev);
+
+ return 0;
+}
+
+static void netx_eth_timeout(struct net_device *ndev)
+{
+ struct netx_eth_priv *priv = netdev_priv(ndev);
+ int i;
+
+ printk(KERN_ERR "%s: transmit timed out, resetting\n", ndev->name);
+
+ spin_lock_irq(&priv->lock);
+
+ xc_reset(priv->xc);
+ xc_start(priv->xc);
+
+ for (i=2; i<=18; i++)
+ pfifo_push(EMPTY_PTR_FIFO(priv->id),
+ FIFO_PTR_FRAMENO(i) | FIFO_PTR_SEGMENT(priv->id));
+
+ spin_unlock_irq(&priv->lock);
+
+ netif_wake_queue(ndev);
+}
+
+static int
+netx_eth_phy_read(struct net_device *ndev, int phy_id, int reg)
+{
+ unsigned int val;
+
+ val = MIIMU_SNRDY | MIIMU_PREAMBLE | MIIMU_PHYADDR(phy_id) |
+ MIIMU_REGADDR(reg) | MIIMU_PHY_NRES;
+
+ writel(val, NETX_MIIMU);
+ while (readl(NETX_MIIMU) & MIIMU_SNRDY);
+
+ return readl(NETX_MIIMU) >> 16;
+
+}
+
+static void
+netx_eth_phy_write(struct net_device *ndev, int phy_id, int reg, int value)
+{
+ unsigned int val;
+
+ val = MIIMU_SNRDY | MIIMU_PREAMBLE | MIIMU_PHYADDR(phy_id) |
+ MIIMU_REGADDR(reg) | MIIMU_PHY_NRES | MIIMU_OPMODE_WRITE |
+ MIIMU_DATA(value);
+
+ writel(val, NETX_MIIMU);
+ while (readl(NETX_MIIMU) & MIIMU_SNRDY);
+}
+
+static int netx_eth_enable(struct net_device *ndev)
+{
+ struct netx_eth_priv *priv = netdev_priv(ndev);
+ unsigned int mac4321, mac65;
+ int running, i;
+
+ ether_setup(ndev);
+
+ ndev->open = netx_eth_open;
+ ndev->stop = netx_eth_close;
+ ndev->hard_start_xmit = netx_eth_hard_start_xmit;
+ ndev->tx_timeout = netx_eth_timeout;
+ ndev->watchdog_timeo = msecs_to_jiffies(5000);
+ ndev->get_stats = netx_eth_query_statistics;
+ ndev->set_multicast_list = netx_eth_set_multicast_list;
+
+ priv->msg_enable = NETIF_MSG_LINK;
+ priv->mii.phy_id_mask = 0x1f;
+ priv->mii.reg_num_mask = 0x1f;
+ priv->mii.force_media = 0;
+ priv->mii.full_duplex = 0;
+ priv->mii.dev = ndev;
+ priv->mii.mdio_read = netx_eth_phy_read;
+ priv->mii.mdio_write = netx_eth_phy_write;
+ priv->mii.phy_id = INTERNAL_PHY_ADR + priv->id;
+
+ running = xc_running(priv->xc);
+ xc_stop(priv->xc);
+
+ /* if the xc engine is already running, assume the bootloader has
+ * loaded the firmware for us
+ */
+ if (running) {
+ /* get Node Address from hardware */
+ mac4321 = readl(priv->xpec_base +
+ NETX_XPEC_RAM_START_OFS + ETH_MAC_4321);
+ mac65 = readl(priv->xpec_base +
+ NETX_XPEC_RAM_START_OFS + ETH_MAC_65);
+
+ ndev->dev_addr[0] = mac4321 & 0xff;
+ ndev->dev_addr[1] = (mac4321 >> 8) & 0xff;
+ ndev->dev_addr[2] = (mac4321 >> 16) & 0xff;
+ ndev->dev_addr[3] = (mac4321 >> 24) & 0xff;
+ ndev->dev_addr[4] = mac65 & 0xff;
+ ndev->dev_addr[5] = (mac65 >> 8) & 0xff;
+ } else {
+ if (xc_request_firmware(priv->xc)) {
+ printk(CARDNAME ": requesting firmware failed\n");
+ return -ENODEV;
+ }
+ }
+
+ xc_reset(priv->xc);
+ xc_start(priv->xc);
+
+ if (!is_valid_ether_addr(ndev->dev_addr))
+ printk("%s: Invalid ethernet MAC address. Please "
+ "set using ifconfig\n", ndev->name);
+
+ for (i=2; i<=18; i++)
+ pfifo_push(EMPTY_PTR_FIFO(priv->id),
+ FIFO_PTR_FRAMENO(i) | FIFO_PTR_SEGMENT(priv->id));
+
+ return register_netdev(ndev);
+
+}
+
+static int netx_eth_drv_probe(struct platform_device *pdev)
+{
+ struct netx_eth_priv *priv;
+ struct net_device *ndev;
+ struct netxeth_platform_data *pdata;
+ int ret;
+
+ ndev = alloc_etherdev(sizeof (struct netx_eth_priv));
+ if (!ndev) {
+ printk("%s: could not allocate device.\n", CARDNAME);
+ ret = -ENOMEM;
+ goto exit;
+ }
+ SET_MODULE_OWNER(ndev);
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ platform_set_drvdata(pdev, ndev);
+
+ priv = netdev_priv(ndev);
+
+ pdata = (struct netxeth_platform_data *)pdev->dev.platform_data;
+ priv->xc = request_xc(pdata->xcno, &pdev->dev);
+ if (!priv->xc) {
+ dev_err(&pdev->dev, "unable to request xc engine\n");
+ ret = -ENODEV;
+ goto exit_free_netdev;
+ }
+
+ ndev->irq = priv->xc->irq;
+ priv->id = pdev->id;
+ priv->xpec_base = priv->xc->xpec_base;
+ priv->xmac_base = priv->xc->xmac_base;
+ priv->sram_base = priv->xc->sram_base;
+
+ ret = pfifo_request(PFIFO_MASK(priv->id));
+ if (ret) {
+ printk("unable to request PFIFO\n");
+ goto exit_free_xc;
+ }
+
+ ret = netx_eth_enable(ndev);
+ if (ret)
+ goto exit_free_pfifo;
+
+ return 0;
+exit_free_pfifo:
+ pfifo_free(PFIFO_MASK(priv->id));
+exit_free_xc:
+ free_xc(priv->xc);
+exit_free_netdev:
+ platform_set_drvdata(pdev, NULL);
+ free_netdev(ndev);
+exit:
+ return ret;
+}
+
+static int netx_eth_drv_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = dev_get_drvdata(&pdev->dev);
+ struct netx_eth_priv *priv = netdev_priv(ndev);
+
+ platform_set_drvdata(pdev, NULL);
+
+ unregister_netdev(ndev);
+ xc_stop(priv->xc);
+ free_xc(priv->xc);
+ free_netdev(ndev);
+ pfifo_free(PFIFO_MASK(priv->id));
+
+ return 0;
+}
+
+static int netx_eth_drv_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ dev_err(&pdev->dev, "suspend not implemented\n");
+ return 0;
+}
+
+static int netx_eth_drv_resume(struct platform_device *pdev)
+{
+ dev_err(&pdev->dev, "resume not implemented\n");
+ return 0;
+}
+
+static struct platform_driver netx_eth_driver = {
+ .probe = netx_eth_drv_probe,
+ .remove = netx_eth_drv_remove,
+ .suspend = netx_eth_drv_suspend,
+ .resume = netx_eth_drv_resume,
+ .driver = {
+ .name = CARDNAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init netx_eth_init(void)
+{
+ unsigned int phy_control, val;
+
+ printk("NetX Ethernet driver\n");
+
+ phy_control = PHY_CONTROL_PHY_ADDRESS(INTERNAL_PHY_ADR>>1) |
+ PHY_CONTROL_PHY1_MODE(PHY_MODE_ALL) |
+ PHY_CONTROL_PHY1_AUTOMDIX |
+ PHY_CONTROL_PHY1_EN |
+ PHY_CONTROL_PHY0_MODE(PHY_MODE_ALL) |
+ PHY_CONTROL_PHY0_AUTOMDIX |
+ PHY_CONTROL_PHY0_EN |
+ PHY_CONTROL_CLK_XLATIN;
+
+ val = readl(NETX_SYSTEM_IOC_ACCESS_KEY);
+ writel(val, NETX_SYSTEM_IOC_ACCESS_KEY);
+
+ writel(phy_control | PHY_CONTROL_RESET, NETX_SYSTEM_PHY_CONTROL);
+ udelay(100);
+
+ val = readl(NETX_SYSTEM_IOC_ACCESS_KEY);
+ writel(val, NETX_SYSTEM_IOC_ACCESS_KEY);
+
+ writel(phy_control, NETX_SYSTEM_PHY_CONTROL);
+
+ return platform_driver_register(&netx_eth_driver);
+}
+
+static void __exit netx_eth_cleanup(void)
+{
+ platform_driver_unregister(&netx_eth_driver);
+}
+
+module_init(netx_eth_init);
+module_exit(netx_eth_cleanup);
+
+MODULE_AUTHOR("Sascha Hauer, Pengutronix");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c
index 09b11761cdfa..ea93b8f18605 100644
--- a/drivers/net/pcmcia/fmvj18x_cs.c
+++ b/drivers/net/pcmcia/fmvj18x_cs.c
@@ -831,8 +831,7 @@ static int fjn_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (length < ETH_ZLEN)
{
- skb = skb_padto(skb, ETH_ZLEN);
- if (skb == NULL)
+ if (skb_padto(skb, ETH_ZLEN))
return 0;
length = ETH_ZLEN;
}
diff --git a/drivers/net/pcmcia/xirc2ps_cs.c b/drivers/net/pcmcia/xirc2ps_cs.c
index 71f45056a70c..9bae77ce1314 100644
--- a/drivers/net/pcmcia/xirc2ps_cs.c
+++ b/drivers/net/pcmcia/xirc2ps_cs.c
@@ -1359,7 +1359,7 @@ do_start_xmit(struct sk_buff *skb, struct net_device *dev)
kio_addr_t ioaddr = dev->base_addr;
int okay;
unsigned freespace;
- unsigned pktlen = skb? skb->len : 0;
+ unsigned pktlen = skb->len;
DEBUG(1, "do_start_xmit(skb=%p, dev=%p) len=%u\n",
skb, dev, pktlen);
@@ -1374,8 +1374,7 @@ do_start_xmit(struct sk_buff *skb, struct net_device *dev)
*/
if (pktlen < ETH_ZLEN)
{
- skb = skb_padto(skb, ETH_ZLEN);
- if (skb == NULL)
+ if (skb_padto(skb, ETH_ZLEN))
return 0;
pktlen = ETH_ZLEN;
}
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index cda3e53d6917..2ba6d3a40e2e 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -44,6 +44,11 @@ config CICADA_PHY
depends on PHYLIB
---help---
Currently supports the cis8204
+config VITESSE_PHY
+ tristate "Drivers for the Vitesse PHYs"
+ depends on PHYLIB
+ ---help---
+ Currently supports the vsc8244
config SMSC_PHY
tristate "Drivers for SMSC PHYs"
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index d9614134cc06..a00e61942525 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -9,3 +9,4 @@ obj-$(CONFIG_CICADA_PHY) += cicada.o
obj-$(CONFIG_LXT_PHY) += lxt.o
obj-$(CONFIG_QSEMI_PHY) += qsemi.o
obj-$(CONFIG_SMSC_PHY) += smsc.o
+obj-$(CONFIG_VITESSE_PHY) += vitesse.o
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
new file mode 100644
index 000000000000..ffd215d9a9be
--- /dev/null
+++ b/drivers/net/phy/vitesse.c
@@ -0,0 +1,112 @@
+/*
+ * Driver for Vitesse PHYs
+ *
+ * Author: Kriston Carson
+ *
+ * Copyright (c) 2005 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/phy.h>
+
+/* Vitesse Extended Control Register 1 */
+#define MII_VSC8244_EXT_CON1 0x17
+#define MII_VSC8244_EXTCON1_INIT 0x0000
+
+/* Vitesse Interrupt Mask Register */
+#define MII_VSC8244_IMASK 0x19
+#define MII_VSC8244_IMASK_IEN 0x8000
+#define MII_VSC8244_IMASK_SPEED 0x4000
+#define MII_VSC8244_IMASK_LINK 0x2000
+#define MII_VSC8244_IMASK_DUPLEX 0x1000
+#define MII_VSC8244_IMASK_MASK 0xf000
+
+/* Vitesse Interrupt Status Register */
+#define MII_VSC8244_ISTAT 0x1a
+#define MII_VSC8244_ISTAT_STATUS 0x8000
+#define MII_VSC8244_ISTAT_SPEED 0x4000
+#define MII_VSC8244_ISTAT_LINK 0x2000
+#define MII_VSC8244_ISTAT_DUPLEX 0x1000
+
+/* Vitesse Auxiliary Control/Status Register */
+#define MII_VSC8244_AUX_CONSTAT 0x1c
+#define MII_VSC8244_AUXCONSTAT_INIT 0x0004
+#define MII_VSC8244_AUXCONSTAT_DUPLEX 0x0020
+#define MII_VSC8244_AUXCONSTAT_SPEED 0x0018
+#define MII_VSC8244_AUXCONSTAT_GBIT 0x0010
+#define MII_VSC8244_AUXCONSTAT_100 0x0008
+
+MODULE_DESCRIPTION("Vitesse PHY driver");
+MODULE_AUTHOR("Kriston Carson");
+MODULE_LICENSE("GPL");
+
+static int vsc824x_config_init(struct phy_device *phydev)
+{
+ int err;
+
+ err = phy_write(phydev, MII_VSC8244_AUX_CONSTAT,
+ MII_VSC8244_AUXCONSTAT_INIT);
+ if (err < 0)
+ return err;
+
+ err = phy_write(phydev, MII_VSC8244_EXT_CON1,
+ MII_VSC8244_EXTCON1_INIT);
+ return err;
+}
+
+static int vsc824x_ack_interrupt(struct phy_device *phydev)
+{
+ int err = phy_read(phydev, MII_VSC8244_ISTAT);
+
+ return (err < 0) ? err : 0;
+}
+
+static int vsc824x_config_intr(struct phy_device *phydev)
+{
+ int err;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ err = phy_write(phydev, MII_VSC8244_IMASK,
+ MII_VSC8244_IMASK_MASK);
+ else
+ err = phy_write(phydev, MII_VSC8244_IMASK, 0);
+ return err;
+}
+
+/* Vitesse 824x */
+static struct phy_driver vsc8244_driver = {
+ .phy_id = 0x000fc6c2,
+ .name = "Vitesse VSC8244",
+ .phy_id_mask = 0x000fffc0,
+ .features = PHY_GBIT_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+ .config_init = &vsc824x_config_init,
+ .config_aneg = &genphy_config_aneg,
+ .read_status = &genphy_read_status,
+ .ack_interrupt = &vsc824x_ack_interrupt,
+ .config_intr = &vsc824x_config_intr,
+ .driver = { .owner = THIS_MODULE,},
+};
+
+static int __init vsc8244_init(void)
+{
+ return phy_driver_register(&vsc8244_driver);
+}
+
+static void __exit vsc8244_exit(void)
+{
+ phy_driver_unregister(&vsc8244_driver);
+}
+
+module_init(vsc8244_init);
+module_exit(vsc8244_exit);
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 9945cc6b8d90..12d1cb289bb0 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -2172,7 +2172,7 @@ static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
static inline u32 rtl8169_tso_csum(struct sk_buff *skb, struct net_device *dev)
{
if (dev->features & NETIF_F_TSO) {
- u32 mss = skb_shinfo(skb)->tso_size;
+ u32 mss = skb_shinfo(skb)->gso_size;
if (mss)
return LargeSend | ((mss & MSSMask) << MSSShift);
@@ -2222,8 +2222,7 @@ static int rtl8169_start_xmit(struct sk_buff *skb, struct net_device *dev)
len = skb->len;
if (unlikely(len < ETH_ZLEN)) {
- skb = skb_padto(skb, ETH_ZLEN);
- if (!skb)
+ if (skb_padto(skb, ETH_ZLEN))
goto err_update_stats;
len = ETH_ZLEN;
}
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index cac9fdd2e1d5..3defe5d4f7d3 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -2627,6 +2627,50 @@ no_rx:
#endif
/**
+ * s2io_netpoll - Rx interrupt service handler for netpoll support
+ * @dev : pointer to the device structure.
+ * Description:
+ * Polling 'interrupt' - used by things like netconsole to send skbs
+ * without having to re-enable interrupts. It's not called while
+ * the interrupt routine is executing.
+ */
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void s2io_netpoll(struct net_device *dev)
+{
+ nic_t *nic = dev->priv;
+ mac_info_t *mac_control;
+ struct config_param *config;
+ XENA_dev_config_t __iomem *bar0 = nic->bar0;
+ u64 val64;
+ int i;
+
+ disable_irq(dev->irq);
+
+ atomic_inc(&nic->isr_cnt);
+ mac_control = &nic->mac_control;
+ config = &nic->config;
+
+ val64 = readq(&bar0->rx_traffic_int);
+ writeq(val64, &bar0->rx_traffic_int);
+
+ for (i = 0; i < config->rx_ring_num; i++)
+ rx_intr_handler(&mac_control->rings[i]);
+
+ for (i = 0; i < config->rx_ring_num; i++) {
+ if (fill_rx_buffers(nic, i) == -ENOMEM) {
+ DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
+ DBG_PRINT(ERR_DBG, " in Rx Netpoll!!\n");
+ break;
+ }
+ }
+ atomic_dec(&nic->isr_cnt);
+ enable_irq(dev->irq);
+ return;
+}
+#endif
+
+/**
* rx_intr_handler - Rx interrupt handler
* @nic: device private variable.
* Description:
@@ -3915,8 +3959,8 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
txdp->Control_1 = 0;
txdp->Control_2 = 0;
#ifdef NETIF_F_TSO
- mss = skb_shinfo(skb)->tso_size;
- if (mss) {
+ mss = skb_shinfo(skb)->gso_size;
+ if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4) {
txdp->Control_1 |= TXD_TCP_LSO_EN;
txdp->Control_1 |= TXD_TCP_LSO_MSS(mss);
}
@@ -3936,10 +3980,10 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
}
frg_len = skb->len - skb->data_len;
- if (skb_shinfo(skb)->ufo_size) {
+ if (skb_shinfo(skb)->gso_type == SKB_GSO_UDPV4) {
int ufo_size;
- ufo_size = skb_shinfo(skb)->ufo_size;
+ ufo_size = skb_shinfo(skb)->gso_size;
ufo_size &= ~7;
txdp->Control_1 |= TXD_UFO_EN;
txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
@@ -3965,7 +4009,7 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
txdp->Host_Control = (unsigned long) skb;
txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
- if (skb_shinfo(skb)->ufo_size)
+ if (skb_shinfo(skb)->gso_type == SKB_GSO_UDPV4)
txdp->Control_1 |= TXD_UFO_EN;
frg_cnt = skb_shinfo(skb)->nr_frags;
@@ -3980,12 +4024,12 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
(sp->pdev, frag->page, frag->page_offset,
frag->size, PCI_DMA_TODEVICE);
txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
- if (skb_shinfo(skb)->ufo_size)
+ if (skb_shinfo(skb)->gso_type == SKB_GSO_UDPV4)
txdp->Control_1 |= TXD_UFO_EN;
}
txdp->Control_1 |= TXD_GATHER_CODE_LAST;
- if (skb_shinfo(skb)->ufo_size)
+ if (skb_shinfo(skb)->gso_type == SKB_GSO_UDPV4)
frg_cnt++; /* as Txd0 was used for inband header */
tx_fifo = mac_control->tx_FIFO_start[queue];
@@ -3999,7 +4043,7 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
if (mss)
val64 |= TX_FIFO_SPECIAL_FUNC;
#endif
- if (skb_shinfo(skb)->ufo_size)
+ if (skb_shinfo(skb)->gso_type == SKB_GSO_UDPV4)
val64 |= TX_FIFO_SPECIAL_FUNC;
writeq(val64, &tx_fifo->List_Control);
@@ -6967,6 +7011,10 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
dev->weight = 32;
#endif
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ dev->poll_controller = s2io_netpoll;
+#endif
+
dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
if (sp->high_dma_flag == TRUE)
dev->features |= NETIF_F_HIGHDMA;
diff --git a/drivers/net/seeq8005.c b/drivers/net/seeq8005.c
index bcef03feb2fc..efd0f235020f 100644
--- a/drivers/net/seeq8005.c
+++ b/drivers/net/seeq8005.c
@@ -396,8 +396,7 @@ static int seeq8005_send_packet(struct sk_buff *skb, struct net_device *dev)
unsigned char *buf;
if (length < ETH_ZLEN) {
- skb = skb_padto(skb, ETH_ZLEN);
- if (skb == NULL)
+ if (skb_padto(skb, ETH_ZLEN))
return 0;
length = ETH_ZLEN;
}
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
index 31dd3f036fa8..df39f3447655 100644
--- a/drivers/net/sis190.c
+++ b/drivers/net/sis190.c
@@ -1156,8 +1156,7 @@ static int sis190_start_xmit(struct sk_buff *skb, struct net_device *dev)
dma_addr_t mapping;
if (unlikely(skb->len < ETH_ZLEN)) {
- skb = skb_padto(skb, ETH_ZLEN);
- if (!skb) {
+ if (skb_padto(skb, ETH_ZLEN)) {
tp->stats.tx_dropped++;
goto out;
}
diff --git a/drivers/net/sk98lin/skge.c b/drivers/net/sk98lin/skge.c
index 38a26df4095f..f3efbd177ae7 100644
--- a/drivers/net/sk98lin/skge.c
+++ b/drivers/net/sk98lin/skge.c
@@ -1525,7 +1525,7 @@ struct sk_buff *pMessage) /* pointer to send-message */
** This is to resolve faulty padding by the HW with 0xaa bytes.
*/
if (BytesSend < C_LEN_ETHERNET_MINSIZE) {
- if ((pMessage = skb_padto(pMessage, C_LEN_ETHERNET_MINSIZE)) == NULL) {
+ if (skb_padto(pMessage, C_LEN_ETHERNET_MINSIZE)) {
spin_unlock_irqrestore(&pTxPort->TxDesRingLock, Flags);
return 0;
}
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 536dd1cf7f79..19a4a16055dc 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -2310,8 +2310,7 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev)
u64 map;
unsigned long flags;
- skb = skb_padto(skb, ETH_ZLEN);
- if (!skb)
+ if (skb_padto(skb, ETH_ZLEN))
return NETDEV_TX_OK;
if (!spin_trylock_irqsave(&skge->tx_lock, flags))
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index fba1e4d4d83d..d3577871be28 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -1160,7 +1160,7 @@ static unsigned tx_le_req(const struct sk_buff *skb)
count = sizeof(dma_addr_t) / sizeof(u32);
count += skb_shinfo(skb)->nr_frags * count;
- if (skb_shinfo(skb)->tso_size)
+ if (skb_shinfo(skb)->gso_size)
++count;
if (skb->ip_summed == CHECKSUM_HW)
@@ -1232,7 +1232,7 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
}
/* Check for TCP Segmentation Offload */
- mss = skb_shinfo(skb)->tso_size;
+ mss = skb_shinfo(skb)->gso_size;
if (mss != 0) {
/* just drop the packet if non-linear expansion fails */
if (skb_header_cloned(skb) &&
diff --git a/drivers/net/smc9194.c b/drivers/net/smc9194.c
index 6cf16f322ad5..8b0321f1976c 100644
--- a/drivers/net/smc9194.c
+++ b/drivers/net/smc9194.c
@@ -523,8 +523,7 @@ static int smc_wait_to_send_packet( struct sk_buff * skb, struct net_device * de
length = skb->len;
if (length < ETH_ZLEN) {
- skb = skb_padto(skb, ETH_ZLEN);
- if (skb == NULL) {
+ if (skb_padto(skb, ETH_ZLEN)) {
netif_wake_queue(dev);
return 0;
}
diff --git a/drivers/net/sonic.c b/drivers/net/sonic.c
index 90b818a8de6e..cab0dd958492 100644
--- a/drivers/net/sonic.c
+++ b/drivers/net/sonic.c
@@ -231,8 +231,7 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
length = skb->len;
if (length < ETH_ZLEN) {
- skb = skb_padto(skb, ETH_ZLEN);
- if (skb == NULL)
+ if (skb_padto(skb, ETH_ZLEN))
return 0;
length = ETH_ZLEN;
}
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c
index 9b7805be21da..c158eedc7813 100644
--- a/drivers/net/starfire.c
+++ b/drivers/net/starfire.c
@@ -1349,8 +1349,7 @@ static int start_tx(struct sk_buff *skb, struct net_device *dev)
#if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
if (skb->ip_summed == CHECKSUM_HW) {
- skb = skb_padto(skb, (skb->len + PADDING_MASK) & ~PADDING_MASK);
- if (skb == NULL)
+ if (skb_padto(skb, (skb->len + PADDING_MASK) & ~PADDING_MASK))
return NETDEV_TX_OK;
}
#endif /* ZEROCOPY && HAS_BROKEN_FIRMWARE */
diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
index 61eec46cb111..f13b2a195c70 100644
--- a/drivers/net/sundance.c
+++ b/drivers/net/sundance.c
@@ -94,11 +94,13 @@
Version LK1.10 (Philippe De Muyter phdm@macqel.be):
- Make 'unblock interface after Tx underrun' work
+ Version LK1.11 (Pedro Alejandro Lopez-Valencia palopezv at gmail.com):
+ - Add support for IC Plus Corporation IP100A chipset
*/
#define DRV_NAME "sundance"
-#define DRV_VERSION "1.01+LK1.10"
-#define DRV_RELDATE "28-Oct-2005"
+#define DRV_VERSION "1.01+LK1.11"
+#define DRV_RELDATE "14-Jun-2006"
/* The user-configurable values.
@@ -287,6 +289,7 @@ static struct pci_device_id sundance_pci_tbl[] = {
{0x1186, 0x1002, 0x1186, 0x1040, 0, 0, 3},
{0x1186, 0x1002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4},
{0x13F0, 0x0201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5},
+ {0x13F0, 0x0200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6},
{0,}
};
MODULE_DEVICE_TABLE(pci, sundance_pci_tbl);
@@ -305,6 +308,7 @@ static const struct pci_id_info pci_id_tbl[] = {
{"D-Link DFE-530TXS FAST Ethernet Adapter"},
{"D-Link DL10050-based FAST Ethernet Adapter"},
{"Sundance Technology Alta"},
+ {"IC Plus Corporation IP100A FAST Ethernet Adapter"},
{NULL,}, /* 0 terminated list. */
};
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index b2ddd4522a87..e3e380f90f86 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -3780,7 +3780,7 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
#if TG3_TSO_SUPPORT != 0
mss = 0;
if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
- (mss = skb_shinfo(skb)->tso_size) != 0) {
+ (mss = skb_shinfo(skb)->gso_size) != 0) {
int tcp_opt_len, ip_tcp_len;
if (skb_header_cloned(skb) &&
@@ -3905,7 +3905,7 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
#if TG3_TSO_SUPPORT != 0
mss = 0;
if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
- (mss = skb_shinfo(skb)->tso_size) != 0) {
+ (mss = skb_shinfo(skb)->gso_size) != 0) {
int tcp_opt_len, ip_tcp_len;
if (skb_header_cloned(skb) &&
diff --git a/drivers/net/tokenring/olympic.c b/drivers/net/tokenring/olympic.c
index 23032a7bc0a9..c3cb8d26cfe3 100644
--- a/drivers/net/tokenring/olympic.c
+++ b/drivers/net/tokenring/olympic.c
@@ -217,7 +217,7 @@ static int __devinit olympic_probe(struct pci_dev *pdev, const struct pci_device
dev = alloc_trdev(sizeof(struct olympic_private)) ;
if (!dev) {
i = -ENOMEM;
- goto op_free_dev;
+ goto op_release_dev;
}
olympic_priv = dev->priv ;
@@ -282,8 +282,8 @@ op_free_iomap:
if (olympic_priv->olympic_lap)
iounmap(olympic_priv->olympic_lap);
-op_free_dev:
free_netdev(dev);
+op_release_dev:
pci_release_regions(pdev);
op_disable_dev:
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index a1ed2d983740..6c62d5c88268 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -490,6 +490,9 @@ static int tun_set_iff(struct file *file, struct ifreq *ifr)
err = -EINVAL;
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
/* Set dev type */
if (ifr->ifr_flags & IFF_TUN) {
/* TUN device */
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index d9258d42090c..e49e8b520c28 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -340,7 +340,7 @@ enum state_values {
#endif
#if defined(NETIF_F_TSO)
-#define skb_tso_size(x) (skb_shinfo(x)->tso_size)
+#define skb_tso_size(x) (skb_shinfo(x)->gso_size)
#define TSO_NUM_DESCRIPTORS 2
#define TSO_OFFLOAD_ON TYPHOON_OFFLOAD_TCP_SEGMENT
#else
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index fdc21037f6dc..c80a4f1d5f7a 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -1284,11 +1284,8 @@ static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev)
/* Calculate the next Tx descriptor entry. */
entry = rp->cur_tx % TX_RING_SIZE;
- if (skb->len < ETH_ZLEN) {
- skb = skb_padto(skb, ETH_ZLEN);
- if (skb == NULL)
- return 0;
- }
+ if (skb_padto(skb, ETH_ZLEN))
+ return 0;
rp->tx_skbuff[entry] = skb;
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index 2eb6b5f9ba0d..09e05fe40c38 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -248,6 +248,7 @@ static void velocity_free_rd_ring(struct velocity_info *vptr);
static void velocity_free_tx_buf(struct velocity_info *vptr, struct velocity_td_info *);
static int velocity_soft_reset(struct velocity_info *vptr);
static void mii_init(struct velocity_info *vptr, u32 mii_status);
+static u32 velocity_get_link(struct net_device *dev);
static u32 velocity_get_opt_media_mode(struct velocity_info *vptr);
static void velocity_print_link_status(struct velocity_info *vptr);
static void safe_disable_mii_autopoll(struct mac_regs __iomem * regs);
@@ -798,6 +799,9 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi
if (ret < 0)
goto err_iounmap;
+ if (velocity_get_link(dev))
+ netif_carrier_off(dev);
+
velocity_print_info(vptr);
pci_set_drvdata(pdev, dev);
@@ -1653,8 +1657,10 @@ static void velocity_error(struct velocity_info *vptr, int status)
if (linked) {
vptr->mii_status &= ~VELOCITY_LINK_FAIL;
+ netif_carrier_on(vptr->dev);
} else {
vptr->mii_status |= VELOCITY_LINK_FAIL;
+ netif_carrier_off(vptr->dev);
}
velocity_print_link_status(vptr);
diff --git a/drivers/net/wan/c101.c b/drivers/net/wan/c101.c
index 43d854ace233..b60ef02db7b0 100644
--- a/drivers/net/wan/c101.c
+++ b/drivers/net/wan/c101.c
@@ -326,21 +326,21 @@ static int __init c101_run(unsigned long irq, unsigned long winbase)
if (request_irq(irq, sca_intr, 0, devname, card)) {
printk(KERN_ERR "c101: could not allocate IRQ\n");
c101_destroy_card(card);
- return(-EBUSY);
+ return -EBUSY;
}
card->irq = irq;
if (!request_mem_region(winbase, C101_MAPPED_RAM_SIZE, devname)) {
printk(KERN_ERR "c101: could not request RAM window\n");
c101_destroy_card(card);
- return(-EBUSY);
+ return -EBUSY;
}
card->phy_winbase = winbase;
card->win0base = ioremap(winbase, C101_MAPPED_RAM_SIZE);
if (!card->win0base) {
printk(KERN_ERR "c101: could not map I/O address\n");
c101_destroy_card(card);
- return -EBUSY;
+ return -EFAULT;
}
card->tx_ring_buffers = TX_RING_BUFFERS;
diff --git a/drivers/net/wan/hdlc_generic.c b/drivers/net/wan/hdlc_generic.c
index 46cef8f92133..57f9538b8fb5 100644
--- a/drivers/net/wan/hdlc_generic.c
+++ b/drivers/net/wan/hdlc_generic.c
@@ -259,7 +259,7 @@ int hdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
}
}
-static void hdlc_setup(struct net_device *dev)
+void hdlc_setup(struct net_device *dev)
{
hdlc_device *hdlc = dev_to_hdlc(dev);
@@ -288,26 +288,6 @@ struct net_device *alloc_hdlcdev(void *priv)
return dev;
}
-int register_hdlc_device(struct net_device *dev)
-{
- int result = dev_alloc_name(dev, "hdlc%d");
- if (result < 0)
- return result;
-
- result = register_netdev(dev);
- if (result != 0)
- return -EIO;
-
-#if 0
- if (netif_carrier_ok(dev))
- netif_carrier_off(dev); /* no carrier until DCD goes up */
-#endif
-
- return 0;
-}
-
-
-
void unregister_hdlc_device(struct net_device *dev)
{
rtnl_lock();
@@ -326,8 +306,8 @@ EXPORT_SYMBOL(hdlc_open);
EXPORT_SYMBOL(hdlc_close);
EXPORT_SYMBOL(hdlc_set_carrier);
EXPORT_SYMBOL(hdlc_ioctl);
+EXPORT_SYMBOL(hdlc_setup);
EXPORT_SYMBOL(alloc_hdlcdev);
-EXPORT_SYMBOL(register_hdlc_device);
EXPORT_SYMBOL(unregister_hdlc_device);
static struct packet_type hdlc_packet_type = {
diff --git a/drivers/net/wan/n2.c b/drivers/net/wan/n2.c
index cd32751b64eb..b7d88db89a5c 100644
--- a/drivers/net/wan/n2.c
+++ b/drivers/net/wan/n2.c
@@ -387,6 +387,11 @@ static int __init n2_run(unsigned long io, unsigned long irq,
}
card->phy_winbase = winbase;
card->winbase = ioremap(winbase, USE_WINDOWSIZE);
+ if (!card->winbase) {
+ printk(KERN_ERR "n2: ioremap() failed\n");
+ n2_destroy_card(card);
+ return -EFAULT;
+ }
outb(0, io + N2_PCR);
outb(winbase >> 12, io + N2_BAR);
diff --git a/drivers/net/wan/pci200syn.c b/drivers/net/wan/pci200syn.c
index f485a97844cc..670e8bd2245c 100644
--- a/drivers/net/wan/pci200syn.c
+++ b/drivers/net/wan/pci200syn.c
@@ -354,6 +354,7 @@ static int __devinit pci200_pci_init_one(struct pci_dev *pdev,
card->rambase == NULL) {
printk(KERN_ERR "pci200syn: ioremap() failed\n");
pci200_pci_remove_one(pdev);
+ return -EFAULT;
}
/* Reset PLX */
diff --git a/drivers/net/wan/wanxl.c b/drivers/net/wan/wanxl.c
index 29a756dd979b..437e0e938e38 100644
--- a/drivers/net/wan/wanxl.c
+++ b/drivers/net/wan/wanxl.c
@@ -634,7 +634,13 @@ static int __devinit wanxl_pci_init_one(struct pci_dev *pdev,
/* set up PLX mapping */
plx_phy = pci_resource_start(pdev, 0);
+
card->plx = ioremap_nocache(plx_phy, 0x70);
+ if (!card->plx) {
+ printk(KERN_ERR "wanxl: ioremap() failed\n");
+ wanxl_pci_remove_one(pdev);
+ return -EFAULT;
+ }
#if RESET_WHILE_LOADING
wanxl_reset(card);
@@ -700,6 +706,12 @@ static int __devinit wanxl_pci_init_one(struct pci_dev *pdev,
}
mem = ioremap_nocache(mem_phy, PDM_OFFSET + sizeof(firmware));
+ if (!mem) {
+ printk(KERN_ERR "wanxl: ioremap() failed\n");
+ wanxl_pci_remove_one(pdev);
+ return -EFAULT;
+ }
+
for (i = 0; i < sizeof(firmware); i += 4)
writel(htonl(*(u32*)(firmware + i)), mem + PDM_OFFSET + i);
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx.h b/drivers/net/wireless/bcm43xx/bcm43xx.h
index e66fdb1f3cfd..d8f917c21ea4 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx.h
+++ b/drivers/net/wireless/bcm43xx/bcm43xx.h
@@ -636,6 +636,17 @@ struct bcm43xx_key {
u8 algorithm;
};
+/* Driver initialization status. */
+enum {
+ BCM43xx_STAT_UNINIT, /* Uninitialized. */
+ BCM43xx_STAT_INITIALIZING, /* init_board() in progress. */
+ BCM43xx_STAT_INITIALIZED, /* Fully operational. */
+ BCM43xx_STAT_SHUTTINGDOWN, /* free_board() in progress. */
+ BCM43xx_STAT_RESTARTING, /* controller_restart() called. */
+};
+#define bcm43xx_status(bcm) atomic_read(&(bcm)->init_status)
+#define bcm43xx_set_status(bcm, stat) atomic_set(&(bcm)->init_status, (stat))
+
struct bcm43xx_private {
struct ieee80211_device *ieee;
struct ieee80211softmac_device *softmac;
@@ -646,18 +657,17 @@ struct bcm43xx_private {
void __iomem *mmio_addr;
- /* Do not use the lock directly. Use the bcm43xx_lock* helper
- * functions, to be MMIO-safe. */
- spinlock_t _lock;
+ /* Locking, see "theory of locking" text below. */
+ spinlock_t irq_lock;
+ struct mutex mutex;
- /* Driver status flags. */
- u32 initialized:1, /* init_board() succeed */
- was_initialized:1, /* for PCI suspend/resume. */
- shutting_down:1, /* free_board() in progress */
+ /* Driver initialization status BCM43xx_STAT_*** */
+ atomic_t init_status;
+
+ u16 was_initialized:1, /* for PCI suspend/resume. */
__using_pio:1, /* Internal, use bcm43xx_using_pio(). */
bad_frames_preempt:1, /* Use "Bad Frames Preemption" (default off) */
reg124_set_0x4:1, /* Some variable to keep track of IRQ stuff. */
- powersaving:1, /* TRUE if we are in PowerSaving mode. FALSE otherwise. */
short_preamble:1, /* TRUE, if short preamble is enabled. */
firmware_norelease:1; /* Do not release the firmware. Used on suspend. */
@@ -721,7 +731,7 @@ struct bcm43xx_private {
struct tasklet_struct isr_tasklet;
/* Periodic tasks */
- struct timer_list periodic_tasks;
+ struct work_struct periodic_work;
unsigned int periodic_state;
struct work_struct restart_work;
@@ -746,21 +756,55 @@ struct bcm43xx_private {
#endif
};
-/* bcm43xx_(un)lock() protect struct bcm43xx_private.
- * Note that _NO_ MMIO writes are allowed. If you want to
- * write to the device through MMIO in the critical section, use
- * the *_mmio lock functions.
- * MMIO read-access is allowed, though.
- */
-#define bcm43xx_lock(bcm, flags) spin_lock_irqsave(&(bcm)->_lock, flags)
-#define bcm43xx_unlock(bcm, flags) spin_unlock_irqrestore(&(bcm)->_lock, flags)
-/* bcm43xx_(un)lock_mmio() protect struct bcm43xx_private and MMIO.
- * MMIO write-access to the device is allowed.
- * All MMIO writes are flushed on unlock, so it is guaranteed to not
- * interfere with other threads writing MMIO registers.
+
+/* *** THEORY OF LOCKING ***
+ *
+ * We have two different locks in the bcm43xx driver.
+ * => bcm->mutex: General sleeping mutex. Protects struct bcm43xx_private
+ * and the device registers.
+ * => bcm->irq_lock: IRQ spinlock. Protects against IRQ handler concurrency.
+ *
+ * We have three types of helper function pairs to utilize these locks.
+ * (Always use the helper functions.)
+ * 1) bcm43xx_{un}lock_noirq():
+ * Takes bcm->mutex. Does _not_ protect against IRQ concurrency,
+ * so it is almost always unsafe, if device IRQs are enabled.
+ * So only use this, if device IRQs are masked.
+ * Locking may sleep.
+ * You can sleep within the critical section.
+ * 2) bcm43xx_{un}lock_irqonly():
+ * Takes bcm->irq_lock. Does _not_ protect against
+ * bcm43xx_lock_noirq() critical sections.
+ * Does only protect against the IRQ handler path and other
+ * irqonly() critical sections.
+ * Locking does not sleep.
+ * You must not sleep within the critical section.
+ * 3) bcm43xx_{un}lock_irqsafe():
+ * This is the cummulative lock and takes both, mutex and irq_lock.
+ * Protects against noirq() and irqonly() critical sections (and
+ * the IRQ handler path).
+ * Locking may sleep.
+ * You must not sleep within the critical section.
*/
-#define bcm43xx_lock_mmio(bcm, flags) bcm43xx_lock(bcm, flags)
-#define bcm43xx_unlock_mmio(bcm, flags) do { mmiowb(); bcm43xx_unlock(bcm, flags); } while (0)
+
+/* Lock type 1 */
+#define bcm43xx_lock_noirq(bcm) mutex_lock(&(bcm)->mutex)
+#define bcm43xx_unlock_noirq(bcm) mutex_unlock(&(bcm)->mutex)
+/* Lock type 2 */
+#define bcm43xx_lock_irqonly(bcm, flags) \
+ spin_lock_irqsave(&(bcm)->irq_lock, flags)
+#define bcm43xx_unlock_irqonly(bcm, flags) \
+ spin_unlock_irqrestore(&(bcm)->irq_lock, flags)
+/* Lock type 3 */
+#define bcm43xx_lock_irqsafe(bcm, flags) do { \
+ bcm43xx_lock_noirq(bcm); \
+ bcm43xx_lock_irqonly(bcm, flags); \
+ } while (0)
+#define bcm43xx_unlock_irqsafe(bcm, flags) do { \
+ bcm43xx_unlock_irqonly(bcm, flags); \
+ bcm43xx_unlock_noirq(bcm); \
+ } while (0)
+
static inline
struct bcm43xx_private * bcm43xx_priv(struct net_device *dev)
@@ -843,16 +887,6 @@ struct bcm43xx_radioinfo * bcm43xx_current_radio(struct bcm43xx_private *bcm)
return &(bcm->core_80211_ext[bcm->current_80211_core_idx].radio);
}
-/* Are we running in init_board() context? */
-static inline
-int bcm43xx_is_initializing(struct bcm43xx_private *bcm)
-{
- if (bcm->initialized)
- return 0;
- if (bcm->shutting_down)
- return 0;
- return 1;
-}
static inline
struct bcm43xx_lopair * bcm43xx_get_lopair(struct bcm43xx_phyinfo *phy,
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_debugfs.c b/drivers/net/wireless/bcm43xx/bcm43xx_debugfs.c
index 7497fb16076e..ce2e40b29b4f 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_debugfs.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_debugfs.c
@@ -77,8 +77,8 @@ static ssize_t devinfo_read_file(struct file *file, char __user *userbuf,
down(&big_buffer_sem);
- bcm43xx_lock_mmio(bcm, flags);
- if (!bcm->initialized) {
+ bcm43xx_lock_irqsafe(bcm, flags);
+ if (bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED) {
fappend("Board not initialized.\n");
goto out;
}
@@ -121,7 +121,7 @@ static ssize_t devinfo_read_file(struct file *file, char __user *userbuf,
fappend("\n");
out:
- bcm43xx_unlock_mmio(bcm, flags);
+ bcm43xx_unlock_irqsafe(bcm, flags);
res = simple_read_from_buffer(userbuf, count, ppos, buf, pos);
up(&big_buffer_sem);
return res;
@@ -159,8 +159,8 @@ static ssize_t spromdump_read_file(struct file *file, char __user *userbuf,
unsigned long flags;
down(&big_buffer_sem);
- bcm43xx_lock_mmio(bcm, flags);
- if (!bcm->initialized) {
+ bcm43xx_lock_irqsafe(bcm, flags);
+ if (bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED) {
fappend("Board not initialized.\n");
goto out;
}
@@ -169,7 +169,7 @@ static ssize_t spromdump_read_file(struct file *file, char __user *userbuf,
fappend("boardflags: 0x%04x\n", bcm->sprom.boardflags);
out:
- bcm43xx_unlock_mmio(bcm, flags);
+ bcm43xx_unlock_irqsafe(bcm, flags);
res = simple_read_from_buffer(userbuf, count, ppos, buf, pos);
up(&big_buffer_sem);
return res;
@@ -188,8 +188,8 @@ static ssize_t tsf_read_file(struct file *file, char __user *userbuf,
u64 tsf;
down(&big_buffer_sem);
- bcm43xx_lock_mmio(bcm, flags);
- if (!bcm->initialized) {
+ bcm43xx_lock_irqsafe(bcm, flags);
+ if (bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED) {
fappend("Board not initialized.\n");
goto out;
}
@@ -199,7 +199,7 @@ static ssize_t tsf_read_file(struct file *file, char __user *userbuf,
(unsigned int)(tsf & 0xFFFFFFFFULL));
out:
- bcm43xx_unlock_mmio(bcm, flags);
+ bcm43xx_unlock_irqsafe(bcm, flags);
res = simple_read_from_buffer(userbuf, count, ppos, buf, pos);
up(&big_buffer_sem);
return res;
@@ -221,8 +221,8 @@ static ssize_t tsf_write_file(struct file *file, const char __user *user_buf,
res = -EFAULT;
goto out_up;
}
- bcm43xx_lock_mmio(bcm, flags);
- if (!bcm->initialized) {
+ bcm43xx_lock_irqsafe(bcm, flags);
+ if (bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED) {
printk(KERN_INFO PFX "debugfs: Board not initialized.\n");
res = -EFAULT;
goto out_unlock;
@@ -233,10 +233,11 @@ static ssize_t tsf_write_file(struct file *file, const char __user *user_buf,
goto out_unlock;
}
bcm43xx_tsf_write(bcm, tsf);
+ mmiowb();
res = buf_size;
out_unlock:
- bcm43xx_unlock_mmio(bcm, flags);
+ bcm43xx_unlock_irqsafe(bcm, flags);
out_up:
up(&big_buffer_sem);
return res;
@@ -257,7 +258,7 @@ static ssize_t txstat_read_file(struct file *file, char __user *userbuf,
int i, cnt, j = 0;
down(&big_buffer_sem);
- bcm43xx_lock(bcm, flags);
+ bcm43xx_lock_irqsafe(bcm, flags);
fappend("Last %d logged xmitstatus blobs (Latest first):\n\n",
BCM43xx_NR_LOGGED_XMITSTATUS);
@@ -293,14 +294,14 @@ static ssize_t txstat_read_file(struct file *file, char __user *userbuf,
i = BCM43xx_NR_LOGGED_XMITSTATUS - 1;
}
- bcm43xx_unlock(bcm, flags);
+ bcm43xx_unlock_irqsafe(bcm, flags);
res = simple_read_from_buffer(userbuf, count, ppos, buf, pos);
- bcm43xx_lock(bcm, flags);
+ bcm43xx_lock_irqsafe(bcm, flags);
if (*ppos == pos) {
/* Done. Drop the copied data. */
e->xmitstatus_printing = 0;
}
- bcm43xx_unlock(bcm, flags);
+ bcm43xx_unlock_irqsafe(bcm, flags);
up(&big_buffer_sem);
return res;
}
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_leds.c b/drivers/net/wireless/bcm43xx/bcm43xx_leds.c
index 4b2c02c0b31e..ec80692d638a 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_leds.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_leds.c
@@ -51,12 +51,12 @@ static void bcm43xx_led_blink(unsigned long d)
struct bcm43xx_private *bcm = led->bcm;
unsigned long flags;
- bcm43xx_lock_mmio(bcm, flags);
+ bcm43xx_lock_irqonly(bcm, flags);
if (led->blink_interval) {
bcm43xx_led_changestate(led);
mod_timer(&led->blink_timer, jiffies + led->blink_interval);
}
- bcm43xx_unlock_mmio(bcm, flags);
+ bcm43xx_unlock_irqonly(bcm, flags);
}
static void bcm43xx_led_blink_start(struct bcm43xx_led *led,
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_main.c b/drivers/net/wireless/bcm43xx/bcm43xx_main.c
index 736dde96c4a3..085d7857fe31 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_main.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_main.c
@@ -498,20 +498,31 @@ static inline u32 bcm43xx_interrupt_disable(struct bcm43xx_private *bcm, u32 mas
return old_mask;
}
+/* Synchronize IRQ top- and bottom-half.
+ * IRQs must be masked before calling this.
+ * This must not be called with the irq_lock held.
+ */
+static void bcm43xx_synchronize_irq(struct bcm43xx_private *bcm)
+{
+ synchronize_irq(bcm->irq);
+ tasklet_disable(&bcm->isr_tasklet);
+}
+
/* Make sure we don't receive more data from the device. */
static int bcm43xx_disable_interrupts_sync(struct bcm43xx_private *bcm, u32 *oldstate)
{
- u32 old;
unsigned long flags;
+ u32 old;
- bcm43xx_lock_mmio(bcm, flags);
- if (bcm43xx_is_initializing(bcm) || bcm->shutting_down) {
- bcm43xx_unlock_mmio(bcm, flags);
+ bcm43xx_lock_irqonly(bcm, flags);
+ if (unlikely(bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED)) {
+ bcm43xx_unlock_irqonly(bcm, flags);
return -EBUSY;
}
old = bcm43xx_interrupt_disable(bcm, BCM43xx_IRQ_ALL);
- tasklet_disable(&bcm->isr_tasklet);
- bcm43xx_unlock_mmio(bcm, flags);
+ bcm43xx_unlock_irqonly(bcm, flags);
+ bcm43xx_synchronize_irq(bcm);
+
if (oldstate)
*oldstate = old;
@@ -1389,7 +1400,7 @@ void bcm43xx_wireless_core_reset(struct bcm43xx_private *bcm, int connect_phy)
bcm43xx_dmacontroller_rx_reset(bcm, BCM43xx_MMIO_DMA4_BASE);
#endif
}
- if (bcm->shutting_down) {
+ if (bcm43xx_status(bcm) == BCM43xx_STAT_SHUTTINGDOWN) {
bcm43xx_write32(bcm, BCM43xx_MMIO_STATUS_BITFIELD,
bcm43xx_read32(bcm, BCM43xx_MMIO_STATUS_BITFIELD)
& ~(BCM43xx_SBF_MAC_ENABLED | 0x00000002));
@@ -1709,7 +1720,7 @@ static void bcm43xx_interrupt_tasklet(struct bcm43xx_private *bcm)
# define bcmirq_handled(irq) do { /* nothing */ } while (0)
#endif /* CONFIG_BCM43XX_DEBUG*/
- bcm43xx_lock_mmio(bcm, flags);
+ bcm43xx_lock_irqonly(bcm, flags);
reason = bcm->irq_reason;
dma_reason[0] = bcm->dma_reason[0];
dma_reason[1] = bcm->dma_reason[1];
@@ -1734,7 +1745,8 @@ static void bcm43xx_interrupt_tasklet(struct bcm43xx_private *bcm)
dma_reason[0], dma_reason[1],
dma_reason[2], dma_reason[3]);
bcm43xx_controller_restart(bcm, "DMA error");
- bcm43xx_unlock_mmio(bcm, flags);
+ mmiowb();
+ bcm43xx_unlock_irqonly(bcm, flags);
return;
}
if (unlikely((dma_reason[0] & BCM43xx_DMAIRQ_NONFATALMASK) |
@@ -1821,7 +1833,8 @@ static void bcm43xx_interrupt_tasklet(struct bcm43xx_private *bcm)
if (!modparam_noleds)
bcm43xx_leds_update(bcm, activity);
bcm43xx_interrupt_enable(bcm, bcm->irq_savedstate);
- bcm43xx_unlock_mmio(bcm, flags);
+ mmiowb();
+ bcm43xx_unlock_irqonly(bcm, flags);
}
static void pio_irq_workaround(struct bcm43xx_private *bcm,
@@ -1870,7 +1883,7 @@ static irqreturn_t bcm43xx_interrupt_handler(int irq, void *dev_id, struct pt_re
if (!bcm)
return IRQ_NONE;
- spin_lock(&bcm->_lock);
+ spin_lock(&bcm->irq_lock);
reason = bcm43xx_read32(bcm, BCM43xx_MMIO_GEN_IRQ_REASON);
if (reason == 0xffffffff) {
@@ -1899,7 +1912,7 @@ static irqreturn_t bcm43xx_interrupt_handler(int irq, void *dev_id, struct pt_re
* completely, but some careful work is needed to fix this. I think it
* is best to stay with this cheap workaround for now... .
*/
- if (likely(bcm->initialized)) {
+ if (likely(bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED)) {
/* disable all IRQs. They are enabled again in the bottom half. */
bcm->irq_savedstate = bcm43xx_interrupt_disable(bcm, BCM43xx_IRQ_ALL);
/* save the reason code and call our bottom half. */
@@ -1909,7 +1922,7 @@ static irqreturn_t bcm43xx_interrupt_handler(int irq, void *dev_id, struct pt_re
out:
mmiowb();
- spin_unlock(&bcm->_lock);
+ spin_unlock(&bcm->irq_lock);
return ret;
}
@@ -2133,6 +2146,13 @@ out:
return err;
}
+#ifdef CONFIG_BCM947XX
+static struct pci_device_id bcm43xx_47xx_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4324) },
+ { 0 }
+};
+#endif
+
static int bcm43xx_initialize_irq(struct bcm43xx_private *bcm)
{
int res;
@@ -2142,11 +2162,15 @@ static int bcm43xx_initialize_irq(struct bcm43xx_private *bcm)
bcm->irq = bcm->pci_dev->irq;
#ifdef CONFIG_BCM947XX
if (bcm->pci_dev->bus->number == 0) {
- struct pci_dev *d = NULL;
- /* FIXME: we will probably need more device IDs here... */
- d = pci_find_device(PCI_VENDOR_ID_BROADCOM, 0x4324, NULL);
- if (d != NULL) {
- bcm->irq = d->irq;
+ struct pci_dev *d;
+ struct pci_device_id *id;
+ for (id = bcm43xx_47xx_ids; id->vendor; id++) {
+ d = pci_get_device(id->vendor, id->device, NULL);
+ if (d != NULL) {
+ bcm->irq = d->irq;
+ pci_dev_put(d);
+ break;
+ }
}
}
#endif
@@ -3106,15 +3130,10 @@ static void bcm43xx_periodic_every15sec(struct bcm43xx_private *bcm)
//TODO for APHY (temperature?)
}
-static void bcm43xx_periodic_task_handler(unsigned long d)
+static void do_periodic_work(struct bcm43xx_private *bcm)
{
- struct bcm43xx_private *bcm = (struct bcm43xx_private *)d;
- unsigned long flags;
unsigned int state;
- bcm43xx_lock_mmio(bcm, flags);
-
- assert(bcm->initialized);
state = bcm->periodic_state;
if (state % 8 == 0)
bcm43xx_periodic_every120sec(bcm);
@@ -3122,29 +3141,93 @@ static void bcm43xx_periodic_task_handler(unsigned long d)
bcm43xx_periodic_every60sec(bcm);
if (state % 2 == 0)
bcm43xx_periodic_every30sec(bcm);
- bcm43xx_periodic_every15sec(bcm);
+ if (state % 1 == 0)
+ bcm43xx_periodic_every15sec(bcm);
bcm->periodic_state = state + 1;
- mod_timer(&bcm->periodic_tasks, jiffies + (HZ * 15));
+ schedule_delayed_work(&bcm->periodic_work, HZ * 15);
+}
+
+/* Estimate a "Badness" value based on the periodic work
+ * state-machine state. "Badness" is worse (bigger), if the
+ * periodic work will take longer.
+ */
+static int estimate_periodic_work_badness(unsigned int state)
+{
+ int badness = 0;
+
+ if (state % 8 == 0) /* every 120 sec */
+ badness += 10;
+ if (state % 4 == 0) /* every 60 sec */
+ badness += 5;
+ if (state % 2 == 0) /* every 30 sec */
+ badness += 1;
+ if (state % 1 == 0) /* every 15 sec */
+ badness += 1;
- bcm43xx_unlock_mmio(bcm, flags);
+#define BADNESS_LIMIT 4
+ return badness;
+}
+
+static void bcm43xx_periodic_work_handler(void *d)
+{
+ struct bcm43xx_private *bcm = d;
+ unsigned long flags;
+ u32 savedirqs = 0;
+ int badness;
+
+ badness = estimate_periodic_work_badness(bcm->periodic_state);
+ if (badness > BADNESS_LIMIT) {
+ /* Periodic work will take a long time, so we want it to
+ * be preemtible.
+ */
+ bcm43xx_lock_irqonly(bcm, flags);
+ netif_stop_queue(bcm->net_dev);
+ if (bcm43xx_using_pio(bcm))
+ bcm43xx_pio_freeze_txqueues(bcm);
+ savedirqs = bcm43xx_interrupt_disable(bcm, BCM43xx_IRQ_ALL);
+ bcm43xx_unlock_irqonly(bcm, flags);
+ bcm43xx_lock_noirq(bcm);
+ bcm43xx_synchronize_irq(bcm);
+ } else {
+ /* Periodic work should take short time, so we want low
+ * locking overhead.
+ */
+ bcm43xx_lock_irqsafe(bcm, flags);
+ }
+
+ do_periodic_work(bcm);
+
+ if (badness > BADNESS_LIMIT) {
+ bcm43xx_lock_irqonly(bcm, flags);
+ if (likely(bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED)) {
+ tasklet_enable(&bcm->isr_tasklet);
+ bcm43xx_interrupt_enable(bcm, savedirqs);
+ if (bcm43xx_using_pio(bcm))
+ bcm43xx_pio_thaw_txqueues(bcm);
+ }
+ netif_wake_queue(bcm->net_dev);
+ mmiowb();
+ bcm43xx_unlock_irqonly(bcm, flags);
+ bcm43xx_unlock_noirq(bcm);
+ } else {
+ mmiowb();
+ bcm43xx_unlock_irqsafe(bcm, flags);
+ }
}
static void bcm43xx_periodic_tasks_delete(struct bcm43xx_private *bcm)
{
- del_timer_sync(&bcm->periodic_tasks);
+ cancel_rearming_delayed_work(&bcm->periodic_work);
}
static void bcm43xx_periodic_tasks_setup(struct bcm43xx_private *bcm)
{
- struct timer_list *timer = &(bcm->periodic_tasks);
+ struct work_struct *work = &(bcm->periodic_work);
- assert(bcm->initialized);
- setup_timer(timer,
- bcm43xx_periodic_task_handler,
- (unsigned long)bcm);
- timer->expires = jiffies;
- add_timer(timer);
+ assert(bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED);
+ INIT_WORK(work, bcm43xx_periodic_work_handler, bcm);
+ schedule_work(work);
}
static void bcm43xx_security_init(struct bcm43xx_private *bcm)
@@ -3158,16 +3241,12 @@ static void bcm43xx_security_init(struct bcm43xx_private *bcm)
static void bcm43xx_free_board(struct bcm43xx_private *bcm)
{
int i, err;
- unsigned long flags;
+ bcm43xx_lock_noirq(bcm);
bcm43xx_sysfs_unregister(bcm);
-
bcm43xx_periodic_tasks_delete(bcm);
- bcm43xx_lock(bcm, flags);
- bcm->initialized = 0;
- bcm->shutting_down = 1;
- bcm43xx_unlock(bcm, flags);
+ bcm43xx_set_status(bcm, BCM43xx_STAT_SHUTTINGDOWN);
for (i = 0; i < BCM43xx_MAX_80211_CORES; i++) {
if (!bcm->core_80211[i].available)
@@ -3182,23 +3261,19 @@ static void bcm43xx_free_board(struct bcm43xx_private *bcm)
bcm43xx_pctl_set_crystal(bcm, 0);
- bcm43xx_lock(bcm, flags);
- bcm->shutting_down = 0;
- bcm43xx_unlock(bcm, flags);
+ bcm43xx_set_status(bcm, BCM43xx_STAT_UNINIT);
+ bcm43xx_unlock_noirq(bcm);
}
static int bcm43xx_init_board(struct bcm43xx_private *bcm)
{
int i, err;
int connect_phy;
- unsigned long flags;
might_sleep();
- bcm43xx_lock(bcm, flags);
- bcm->initialized = 0;
- bcm->shutting_down = 0;
- bcm43xx_unlock(bcm, flags);
+ bcm43xx_lock_noirq(bcm);
+ bcm43xx_set_status(bcm, BCM43xx_STAT_INITIALIZING);
err = bcm43xx_pctl_set_crystal(bcm, 1);
if (err)
@@ -3265,9 +3340,7 @@ static int bcm43xx_init_board(struct bcm43xx_private *bcm)
}
/* Initialization of the board is done. Flag it as such. */
- bcm43xx_lock(bcm, flags);
- bcm->initialized = 1;
- bcm43xx_unlock(bcm, flags);
+ bcm43xx_set_status(bcm, BCM43xx_STAT_INITIALIZED);
bcm43xx_periodic_tasks_setup(bcm);
bcm43xx_sysfs_register(bcm);
@@ -3278,6 +3351,8 @@ static int bcm43xx_init_board(struct bcm43xx_private *bcm)
assert(err == 0);
out:
+ bcm43xx_unlock_noirq(bcm);
+
return err;
err_80211_unwind:
@@ -3534,8 +3609,8 @@ static void bcm43xx_ieee80211_set_chan(struct net_device *net_dev,
struct bcm43xx_radioinfo *radio;
unsigned long flags;
- bcm43xx_lock_mmio(bcm, flags);
- if (bcm->initialized) {
+ bcm43xx_lock_irqsafe(bcm, flags);
+ if (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED) {
bcm43xx_mac_suspend(bcm);
bcm43xx_radio_selectchannel(bcm, channel, 0);
bcm43xx_mac_enable(bcm);
@@ -3543,7 +3618,7 @@ static void bcm43xx_ieee80211_set_chan(struct net_device *net_dev,
radio = bcm43xx_current_radio(bcm);
radio->initial_channel = channel;
}
- bcm43xx_unlock_mmio(bcm, flags);
+ bcm43xx_unlock_irqsafe(bcm, flags);
}
/* set_security() callback in struct ieee80211_device */
@@ -3557,7 +3632,7 @@ static void bcm43xx_ieee80211_set_security(struct net_device *net_dev,
dprintk(KERN_INFO PFX "set security called");
- bcm43xx_lock_mmio(bcm, flags);
+ bcm43xx_lock_irqsafe(bcm, flags);
for (keyidx = 0; keyidx<WEP_KEYS; keyidx++)
if (sec->flags & (1<<keyidx)) {
@@ -3587,7 +3662,8 @@ static void bcm43xx_ieee80211_set_security(struct net_device *net_dev,
dprintk(", .encrypt = %d", sec->encrypt);
}
dprintk("\n");
- if (bcm->initialized && !bcm->ieee->host_encrypt) {
+ if (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED &&
+ !bcm->ieee->host_encrypt) {
if (secinfo->enabled) {
/* upload WEP keys to hardware */
char null_address[6] = { 0 };
@@ -3621,7 +3697,7 @@ static void bcm43xx_ieee80211_set_security(struct net_device *net_dev,
} else
bcm43xx_clear_keys(bcm);
}
- bcm43xx_unlock_mmio(bcm, flags);
+ bcm43xx_unlock_irqsafe(bcm, flags);
}
/* hard_start_xmit() callback in struct ieee80211_device */
@@ -3633,10 +3709,10 @@ static int bcm43xx_ieee80211_hard_start_xmit(struct ieee80211_txb *txb,
int err = -ENODEV;
unsigned long flags;
- bcm43xx_lock_mmio(bcm, flags);
- if (likely(bcm->initialized))
+ bcm43xx_lock_irqonly(bcm, flags);
+ if (likely(bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED))
err = bcm43xx_tx(bcm, txb);
- bcm43xx_unlock_mmio(bcm, flags);
+ bcm43xx_unlock_irqonly(bcm, flags);
return err;
}
@@ -3651,9 +3727,9 @@ static void bcm43xx_net_tx_timeout(struct net_device *net_dev)
struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
unsigned long flags;
- bcm43xx_lock_mmio(bcm, flags);
+ bcm43xx_lock_irqonly(bcm, flags);
bcm43xx_controller_restart(bcm, "TX timeout");
- bcm43xx_unlock_mmio(bcm, flags);
+ bcm43xx_unlock_irqonly(bcm, flags);
}
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -3678,9 +3754,11 @@ static int bcm43xx_net_open(struct net_device *net_dev)
static int bcm43xx_net_stop(struct net_device *net_dev)
{
struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
+ int err;
ieee80211softmac_stop(net_dev);
- bcm43xx_disable_interrupts_sync(bcm, NULL);
+ err = bcm43xx_disable_interrupts_sync(bcm, NULL);
+ assert(!err);
bcm43xx_free_board(bcm);
return 0;
@@ -3692,6 +3770,7 @@ static int bcm43xx_init_private(struct bcm43xx_private *bcm,
{
int err;
+ bcm43xx_set_status(bcm, BCM43xx_STAT_UNINIT);
bcm->ieee = netdev_priv(net_dev);
bcm->softmac = ieee80211_priv(net_dev);
bcm->softmac->set_channel = bcm43xx_ieee80211_set_chan;
@@ -3700,7 +3779,8 @@ static int bcm43xx_init_private(struct bcm43xx_private *bcm,
bcm->pci_dev = pci_dev;
bcm->net_dev = net_dev;
bcm->bad_frames_preempt = modparam_bad_frames_preempt;
- spin_lock_init(&bcm->_lock);
+ spin_lock_init(&bcm->irq_lock);
+ mutex_init(&bcm->mutex);
tasklet_init(&bcm->isr_tasklet,
(void (*)(unsigned long))bcm43xx_interrupt_tasklet,
(unsigned long)bcm);
@@ -3831,7 +3911,7 @@ static void bcm43xx_chip_reset(void *_bcm)
struct net_device *net_dev = bcm->net_dev;
struct pci_dev *pci_dev = bcm->pci_dev;
int err;
- int was_initialized = bcm->initialized;
+ int was_initialized = (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED);
netif_stop_queue(bcm->net_dev);
tasklet_disable(&bcm->isr_tasklet);
@@ -3866,6 +3946,7 @@ failure:
*/
void bcm43xx_controller_restart(struct bcm43xx_private *bcm, const char *reason)
{
+ bcm43xx_set_status(bcm, BCM43xx_STAT_RESTARTING);
bcm43xx_interrupt_disable(bcm, BCM43xx_IRQ_ALL);
bcm43xx_read32(bcm, BCM43xx_MMIO_STATUS_BITFIELD); /* dummy read */
printk(KERN_ERR PFX "Controller RESET (%s) ...\n", reason);
@@ -3884,11 +3965,11 @@ static int bcm43xx_suspend(struct pci_dev *pdev, pm_message_t state)
dprintk(KERN_INFO PFX "Suspending...\n");
- bcm43xx_lock(bcm, flags);
- bcm->was_initialized = bcm->initialized;
- if (bcm->initialized)
+ bcm43xx_lock_irqsafe(bcm, flags);
+ bcm->was_initialized = (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED);
+ if (bcm->was_initialized)
try_to_shutdown = 1;
- bcm43xx_unlock(bcm, flags);
+ bcm43xx_unlock_irqsafe(bcm, flags);
netif_device_detach(net_dev);
if (try_to_shutdown) {
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_phy.c b/drivers/net/wireless/bcm43xx/bcm43xx_phy.c
index b0abac515530..f8200deecc8a 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_phy.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_phy.c
@@ -1410,7 +1410,10 @@ static inline
u16 bcm43xx_phy_lo_g_deviation_subval(struct bcm43xx_private *bcm, u16 control)
{
struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
+ u16 ret;
+ unsigned long flags;
+ local_irq_save(flags);
if (phy->connected) {
bcm43xx_phy_write(bcm, 0x15, 0xE300);
control <<= 8;
@@ -1430,8 +1433,10 @@ u16 bcm43xx_phy_lo_g_deviation_subval(struct bcm43xx_private *bcm, u16 control)
bcm43xx_phy_write(bcm, 0x0015, control | 0xFFE0);
udelay(8);
}
+ ret = bcm43xx_phy_read(bcm, 0x002D);
+ local_irq_restore(flags);
- return bcm43xx_phy_read(bcm, 0x002D);
+ return ret;
}
static u32 bcm43xx_phy_lo_g_singledeviation(struct bcm43xx_private *bcm, u16 control)
@@ -1648,7 +1653,7 @@ void bcm43xx_phy_set_baseband_attenuation(struct bcm43xx_private *bcm,
void bcm43xx_phy_lo_g_measure(struct bcm43xx_private *bcm)
{
static const u8 pairorder[10] = { 3, 1, 5, 7, 9, 2, 0, 4, 6, 8 };
- const int is_initializing = bcm43xx_is_initializing(bcm);
+ const int is_initializing = (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZING);
struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
struct bcm43xx_radioinfo *radio = bcm43xx_current_radio(bcm);
u16 h, i, oldi = 0, j;
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_pio.c b/drivers/net/wireless/bcm43xx/bcm43xx_pio.c
index 0aa1bd269a25..574085c46152 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_pio.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_pio.c
@@ -262,8 +262,10 @@ static void tx_tasklet(unsigned long d)
int err;
u16 txctl;
- bcm43xx_lock_mmio(bcm, flags);
+ bcm43xx_lock_irqonly(bcm, flags);
+ if (queue->tx_frozen)
+ goto out_unlock;
txctl = bcm43xx_pio_read(queue, BCM43xx_PIO_TXCTL);
if (txctl & BCM43xx_PIO_TXCTL_SUSPEND)
goto out_unlock;
@@ -298,7 +300,7 @@ static void tx_tasklet(unsigned long d)
continue;
}
out_unlock:
- bcm43xx_unlock_mmio(bcm, flags);
+ bcm43xx_unlock_irqonly(bcm, flags);
}
static void setup_txqueues(struct bcm43xx_pioqueue *queue)
@@ -374,7 +376,6 @@ static void cancel_transfers(struct bcm43xx_pioqueue *queue)
struct bcm43xx_pio_txpacket *packet, *tmp_packet;
netif_tx_disable(queue->bcm->net_dev);
- assert(queue->bcm->shutting_down);
tasklet_disable(&queue->txtask);
list_for_each_entry_safe(packet, tmp_packet, &queue->txrunning, list)
@@ -634,5 +635,40 @@ void bcm43xx_pio_tx_resume(struct bcm43xx_pioqueue *queue)
bcm43xx_pio_read(queue, BCM43xx_PIO_TXCTL)
& ~BCM43xx_PIO_TXCTL_SUSPEND);
bcm43xx_power_saving_ctl_bits(queue->bcm, -1, -1);
- tasklet_schedule(&queue->txtask);
+ if (!list_empty(&queue->txqueue))
+ tasklet_schedule(&queue->txtask);
+}
+
+void bcm43xx_pio_freeze_txqueues(struct bcm43xx_private *bcm)
+{
+ struct bcm43xx_pio *pio;
+
+ assert(bcm43xx_using_pio(bcm));
+ pio = bcm43xx_current_pio(bcm);
+ pio->queue0->tx_frozen = 1;
+ pio->queue1->tx_frozen = 1;
+ pio->queue2->tx_frozen = 1;
+ pio->queue3->tx_frozen = 1;
}
+
+void bcm43xx_pio_thaw_txqueues(struct bcm43xx_private *bcm)
+{
+ struct bcm43xx_pio *pio;
+
+ assert(bcm43xx_using_pio(bcm));
+ pio = bcm43xx_current_pio(bcm);
+ pio->queue0->tx_frozen = 0;
+ pio->queue1->tx_frozen = 0;
+ pio->queue2->tx_frozen = 0;
+ pio->queue3->tx_frozen = 0;
+ if (!list_empty(&pio->queue0->txqueue))
+ tasklet_schedule(&pio->queue0->txtask);
+ if (!list_empty(&pio->queue1->txqueue))
+ tasklet_schedule(&pio->queue1->txtask);
+ if (!list_empty(&pio->queue2->txqueue))
+ tasklet_schedule(&pio->queue2->txtask);
+ if (!list_empty(&pio->queue3->txqueue))
+ tasklet_schedule(&pio->queue3->txtask);
+}
+
+
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_pio.h b/drivers/net/wireless/bcm43xx/bcm43xx_pio.h
index dfc78209e3a3..bc78a3c2cafb 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_pio.h
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_pio.h
@@ -54,6 +54,7 @@ struct bcm43xx_pioqueue {
u16 mmio_base;
u8 tx_suspended:1,
+ tx_frozen:1,
need_workarounds:1; /* Workarounds needed for core.rev < 3 */
/* Adjusted size of the device internal TX buffer. */
@@ -108,8 +109,12 @@ void bcm43xx_pio_handle_xmitstatus(struct bcm43xx_private *bcm,
struct bcm43xx_xmitstatus *status);
void bcm43xx_pio_rx(struct bcm43xx_pioqueue *queue);
+/* Suspend a TX queue on hardware level. */
void bcm43xx_pio_tx_suspend(struct bcm43xx_pioqueue *queue);
void bcm43xx_pio_tx_resume(struct bcm43xx_pioqueue *queue);
+/* Suspend (freeze) the TX tasklet (software level). */
+void bcm43xx_pio_freeze_txqueues(struct bcm43xx_private *bcm);
+void bcm43xx_pio_thaw_txqueues(struct bcm43xx_private *bcm);
#else /* CONFIG_BCM43XX_PIO */
@@ -145,6 +150,14 @@ static inline
void bcm43xx_pio_tx_resume(struct bcm43xx_pioqueue *queue)
{
}
+static inline
+void bcm43xx_pio_freeze_txqueues(struct bcm43xx_private *bcm)
+{
+}
+static inline
+void bcm43xx_pio_thaw_txqueues(struct bcm43xx_private *bcm)
+{
+}
#endif /* CONFIG_BCM43XX_PIO */
#endif /* BCM43xx_PIO_H_ */
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_sysfs.c b/drivers/net/wireless/bcm43xx/bcm43xx_sysfs.c
index b438f48e891d..6a23bdc75412 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_sysfs.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_sysfs.c
@@ -120,12 +120,12 @@ static ssize_t bcm43xx_attr_sprom_show(struct device *dev,
GFP_KERNEL);
if (!sprom)
return -ENOMEM;
- bcm43xx_lock_mmio(bcm, flags);
- assert(bcm->initialized);
+ bcm43xx_lock_irqsafe(bcm, flags);
err = bcm43xx_sprom_read(bcm, sprom);
if (!err)
err = sprom2hex(sprom, buf, PAGE_SIZE);
- bcm43xx_unlock_mmio(bcm, flags);
+ mmiowb();
+ bcm43xx_unlock_irqsafe(bcm, flags);
kfree(sprom);
return err;
@@ -150,10 +150,10 @@ static ssize_t bcm43xx_attr_sprom_store(struct device *dev,
err = hex2sprom(sprom, buf, count);
if (err)
goto out_kfree;
- bcm43xx_lock_mmio(bcm, flags);
- assert(bcm->initialized);
+ bcm43xx_lock_irqsafe(bcm, flags);
err = bcm43xx_sprom_write(bcm, sprom);
- bcm43xx_unlock_mmio(bcm, flags);
+ mmiowb();
+ bcm43xx_unlock_irqsafe(bcm, flags);
out_kfree:
kfree(sprom);
@@ -170,15 +170,13 @@ static ssize_t bcm43xx_attr_interfmode_show(struct device *dev,
char *buf)
{
struct bcm43xx_private *bcm = dev_to_bcm(dev);
- unsigned long flags;
int err;
ssize_t count = 0;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
- bcm43xx_lock(bcm, flags);
- assert(bcm->initialized);
+ bcm43xx_lock_noirq(bcm);
switch (bcm43xx_current_radio(bcm)->interfmode) {
case BCM43xx_RADIO_INTERFMODE_NONE:
@@ -195,7 +193,7 @@ static ssize_t bcm43xx_attr_interfmode_show(struct device *dev,
}
err = 0;
- bcm43xx_unlock(bcm, flags);
+ bcm43xx_unlock_noirq(bcm);
return err ? err : count;
@@ -231,16 +229,15 @@ static ssize_t bcm43xx_attr_interfmode_store(struct device *dev,
return -EINVAL;
}
- bcm43xx_lock_mmio(bcm, flags);
- assert(bcm->initialized);
+ bcm43xx_lock_irqsafe(bcm, flags);
err = bcm43xx_radio_set_interference_mitigation(bcm, mode);
if (err) {
printk(KERN_ERR PFX "Interference Mitigation not "
"supported by device\n");
}
-
- bcm43xx_unlock_mmio(bcm, flags);
+ mmiowb();
+ bcm43xx_unlock_irqsafe(bcm, flags);
return err ? err : count;
}
@@ -254,15 +251,13 @@ static ssize_t bcm43xx_attr_preamble_show(struct device *dev,
char *buf)
{
struct bcm43xx_private *bcm = dev_to_bcm(dev);
- unsigned long flags;
int err;
ssize_t count;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
- bcm43xx_lock(bcm, flags);
- assert(bcm->initialized);
+ bcm43xx_lock_noirq(bcm);
if (bcm->short_preamble)
count = snprintf(buf, PAGE_SIZE, "1 (Short Preamble enabled)\n");
@@ -270,7 +265,7 @@ static ssize_t bcm43xx_attr_preamble_show(struct device *dev,
count = snprintf(buf, PAGE_SIZE, "0 (Short Preamble disabled)\n");
err = 0;
- bcm43xx_unlock(bcm, flags);
+ bcm43xx_unlock_noirq(bcm);
return err ? err : count;
}
@@ -290,13 +285,12 @@ static ssize_t bcm43xx_attr_preamble_store(struct device *dev,
value = get_boolean(buf, count);
if (value < 0)
return value;
- bcm43xx_lock(bcm, flags);
- assert(bcm->initialized);
+ bcm43xx_lock_irqsafe(bcm, flags);
bcm->short_preamble = !!value;
err = 0;
- bcm43xx_unlock(bcm, flags);
+ bcm43xx_unlock_irqsafe(bcm, flags);
return err ? err : count;
}
@@ -310,7 +304,7 @@ int bcm43xx_sysfs_register(struct bcm43xx_private *bcm)
struct device *dev = &bcm->pci_dev->dev;
int err;
- assert(bcm->initialized);
+ assert(bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED);
err = device_create_file(dev, &dev_attr_sprom);
if (err)
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_wx.c b/drivers/net/wireless/bcm43xx/bcm43xx_wx.c
index b45063974ae9..c35cb3a0777e 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_wx.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_wx.c
@@ -55,13 +55,13 @@ static int bcm43xx_wx_get_name(struct net_device *net_dev,
char *extra)
{
struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
- unsigned long flags;
int i;
+ unsigned long flags;
struct bcm43xx_phyinfo *phy;
char suffix[7] = { 0 };
int have_a = 0, have_b = 0, have_g = 0;
- bcm43xx_lock(bcm, flags);
+ bcm43xx_lock_irqsafe(bcm, flags);
for (i = 0; i < bcm->nr_80211_available; i++) {
phy = &(bcm->core_80211_ext[i].phy);
switch (phy->type) {
@@ -77,7 +77,7 @@ static int bcm43xx_wx_get_name(struct net_device *net_dev,
assert(0);
}
}
- bcm43xx_unlock(bcm, flags);
+ bcm43xx_unlock_irqsafe(bcm, flags);
i = 0;
if (have_a) {
@@ -111,7 +111,7 @@ static int bcm43xx_wx_set_channelfreq(struct net_device *net_dev,
int freq;
int err = -EINVAL;
- bcm43xx_lock_mmio(bcm, flags);
+ bcm43xx_lock_irqsafe(bcm, flags);
if ((data->freq.m >= 0) && (data->freq.m <= 1000)) {
channel = data->freq.m;
freq = bcm43xx_channel_to_freq(bcm, channel);
@@ -121,7 +121,7 @@ static int bcm43xx_wx_set_channelfreq(struct net_device *net_dev,
}
if (!bcm43xx_is_valid_channel(bcm, channel))
goto out_unlock;
- if (bcm->initialized) {
+ if (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED) {
//ieee80211softmac_disassoc(softmac, $REASON);
bcm43xx_mac_suspend(bcm);
err = bcm43xx_radio_selectchannel(bcm, channel, 0);
@@ -131,7 +131,7 @@ static int bcm43xx_wx_set_channelfreq(struct net_device *net_dev,
err = 0;
}
out_unlock:
- bcm43xx_unlock_mmio(bcm, flags);
+ bcm43xx_unlock_irqsafe(bcm, flags);
return err;
}
@@ -147,11 +147,10 @@ static int bcm43xx_wx_get_channelfreq(struct net_device *net_dev,
int err = -ENODEV;
u16 channel;
- bcm43xx_lock(bcm, flags);
+ bcm43xx_lock_irqsafe(bcm, flags);
radio = bcm43xx_current_radio(bcm);
channel = radio->channel;
if (channel == 0xFF) {
- assert(!bcm->initialized);
channel = radio->initial_channel;
if (channel == 0xFF)
goto out_unlock;
@@ -163,7 +162,7 @@ static int bcm43xx_wx_get_channelfreq(struct net_device *net_dev,
err = 0;
out_unlock:
- bcm43xx_unlock(bcm, flags);
+ bcm43xx_unlock_irqsafe(bcm, flags);
return err;
}
@@ -181,13 +180,13 @@ static int bcm43xx_wx_set_mode(struct net_device *net_dev,
if (mode == IW_MODE_AUTO)
mode = BCM43xx_INITIAL_IWMODE;
- bcm43xx_lock_mmio(bcm, flags);
- if (bcm->initialized) {
+ bcm43xx_lock_irqsafe(bcm, flags);
+ if (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED) {
if (bcm->ieee->iw_mode != mode)
bcm43xx_set_iwmode(bcm, mode);
} else
bcm->ieee->iw_mode = mode;
- bcm43xx_unlock_mmio(bcm, flags);
+ bcm43xx_unlock_irqsafe(bcm, flags);
return 0;
}
@@ -200,9 +199,9 @@ static int bcm43xx_wx_get_mode(struct net_device *net_dev,
struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
unsigned long flags;
- bcm43xx_lock(bcm, flags);
+ bcm43xx_lock_irqsafe(bcm, flags);
data->mode = bcm->ieee->iw_mode;
- bcm43xx_unlock(bcm, flags);
+ bcm43xx_unlock_irqsafe(bcm, flags);
return 0;
}
@@ -255,7 +254,7 @@ static int bcm43xx_wx_get_rangeparams(struct net_device *net_dev,
IW_ENC_CAPA_CIPHER_TKIP |
IW_ENC_CAPA_CIPHER_CCMP;
- bcm43xx_lock(bcm, flags);
+ bcm43xx_lock_irqsafe(bcm, flags);
phy = bcm43xx_current_phy(bcm);
range->num_bitrates = 0;
@@ -302,7 +301,7 @@ static int bcm43xx_wx_get_rangeparams(struct net_device *net_dev,
}
range->num_frequency = j;
- bcm43xx_unlock(bcm, flags);
+ bcm43xx_unlock_irqsafe(bcm, flags);
return 0;
}
@@ -313,14 +312,13 @@ static int bcm43xx_wx_set_nick(struct net_device *net_dev,
char *extra)
{
struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
- unsigned long flags;
size_t len;
- bcm43xx_lock(bcm, flags);
+ bcm43xx_lock_noirq(bcm);
len = min((size_t)data->data.length, (size_t)IW_ESSID_MAX_SIZE);
memcpy(bcm->nick, extra, len);
bcm->nick[len] = '\0';
- bcm43xx_unlock(bcm, flags);
+ bcm43xx_unlock_noirq(bcm);
return 0;
}
@@ -331,15 +329,14 @@ static int bcm43xx_wx_get_nick(struct net_device *net_dev,
char *extra)
{
struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
- unsigned long flags;
size_t len;
- bcm43xx_lock(bcm, flags);
+ bcm43xx_lock_noirq(bcm);
len = strlen(bcm->nick) + 1;
memcpy(extra, bcm->nick, len);
data->data.length = (__u16)len;
data->data.flags = 1;
- bcm43xx_unlock(bcm, flags);
+ bcm43xx_unlock_noirq(bcm);
return 0;
}
@@ -353,7 +350,7 @@ static int bcm43xx_wx_set_rts(struct net_device *net_dev,
unsigned long flags;
int err = -EINVAL;
- bcm43xx_lock(bcm, flags);
+ bcm43xx_lock_irqsafe(bcm, flags);
if (data->rts.disabled) {
bcm->rts_threshold = BCM43xx_MAX_RTS_THRESHOLD;
err = 0;
@@ -364,7 +361,7 @@ static int bcm43xx_wx_set_rts(struct net_device *net_dev,
err = 0;
}
}
- bcm43xx_unlock(bcm, flags);
+ bcm43xx_unlock_irqsafe(bcm, flags);
return err;
}
@@ -377,11 +374,11 @@ static int bcm43xx_wx_get_rts(struct net_device *net_dev,
struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
unsigned long flags;
- bcm43xx_lock(bcm, flags);
+ bcm43xx_lock_irqsafe(bcm, flags);
data->rts.value = bcm->rts_threshold;
data->rts.fixed = 0;
data->rts.disabled = (bcm->rts_threshold == BCM43xx_MAX_RTS_THRESHOLD);
- bcm43xx_unlock(bcm, flags);
+ bcm43xx_unlock_irqsafe(bcm, flags);
return 0;
}
@@ -395,7 +392,7 @@ static int bcm43xx_wx_set_frag(struct net_device *net_dev,
unsigned long flags;
int err = -EINVAL;
- bcm43xx_lock(bcm, flags);
+ bcm43xx_lock_irqsafe(bcm, flags);
if (data->frag.disabled) {
bcm->ieee->fts = MAX_FRAG_THRESHOLD;
err = 0;
@@ -406,7 +403,7 @@ static int bcm43xx_wx_set_frag(struct net_device *net_dev,
err = 0;
}
}
- bcm43xx_unlock(bcm, flags);
+ bcm43xx_unlock_irqsafe(bcm, flags);
return err;
}
@@ -419,11 +416,11 @@ static int bcm43xx_wx_get_frag(struct net_device *net_dev,
struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
unsigned long flags;
- bcm43xx_lock(bcm, flags);
+ bcm43xx_lock_irqsafe(bcm, flags);
data->frag.value = bcm->ieee->fts;
data->frag.fixed = 0;
data->frag.disabled = (bcm->ieee->fts == MAX_FRAG_THRESHOLD);
- bcm43xx_unlock(bcm, flags);
+ bcm43xx_unlock_irqsafe(bcm, flags);
return 0;
}
@@ -445,8 +442,8 @@ static int bcm43xx_wx_set_xmitpower(struct net_device *net_dev,
return -EOPNOTSUPP;
}
- bcm43xx_lock_mmio(bcm, flags);
- if (!bcm->initialized)
+ bcm43xx_lock_irqsafe(bcm, flags);
+ if (bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED)
goto out_unlock;
radio = bcm43xx_current_radio(bcm);
phy = bcm43xx_current_phy(bcm);
@@ -469,7 +466,7 @@ static int bcm43xx_wx_set_xmitpower(struct net_device *net_dev,
err = 0;
out_unlock:
- bcm43xx_unlock_mmio(bcm, flags);
+ bcm43xx_unlock_irqsafe(bcm, flags);
return err;
}
@@ -484,8 +481,8 @@ static int bcm43xx_wx_get_xmitpower(struct net_device *net_dev,
unsigned long flags;
int err = -ENODEV;
- bcm43xx_lock(bcm, flags);
- if (!bcm->initialized)
+ bcm43xx_lock_irqsafe(bcm, flags);
+ if (bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED)
goto out_unlock;
radio = bcm43xx_current_radio(bcm);
/* desired dBm value is in Q5.2 */
@@ -496,7 +493,7 @@ static int bcm43xx_wx_get_xmitpower(struct net_device *net_dev,
err = 0;
out_unlock:
- bcm43xx_unlock(bcm, flags);
+ bcm43xx_unlock_irqsafe(bcm, flags);
return err;
}
@@ -583,8 +580,8 @@ static int bcm43xx_wx_set_interfmode(struct net_device *net_dev,
return -EINVAL;
}
- bcm43xx_lock_mmio(bcm, flags);
- if (bcm->initialized) {
+ bcm43xx_lock_irqsafe(bcm, flags);
+ if (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED) {
err = bcm43xx_radio_set_interference_mitigation(bcm, mode);
if (err) {
printk(KERN_ERR PFX "Interference Mitigation not "
@@ -598,7 +595,7 @@ static int bcm43xx_wx_set_interfmode(struct net_device *net_dev,
} else
bcm43xx_current_radio(bcm)->interfmode = mode;
}
- bcm43xx_unlock_mmio(bcm, flags);
+ bcm43xx_unlock_irqsafe(bcm, flags);
return err;
}
@@ -612,9 +609,9 @@ static int bcm43xx_wx_get_interfmode(struct net_device *net_dev,
unsigned long flags;
int mode;
- bcm43xx_lock(bcm, flags);
+ bcm43xx_lock_irqsafe(bcm, flags);
mode = bcm43xx_current_radio(bcm)->interfmode;
- bcm43xx_unlock(bcm, flags);
+ bcm43xx_unlock_irqsafe(bcm, flags);
switch (mode) {
case BCM43xx_RADIO_INTERFMODE_NONE:
@@ -644,9 +641,9 @@ static int bcm43xx_wx_set_shortpreamble(struct net_device *net_dev,
int on;
on = *((int *)extra);
- bcm43xx_lock(bcm, flags);
+ bcm43xx_lock_irqsafe(bcm, flags);
bcm->short_preamble = !!on;
- bcm43xx_unlock(bcm, flags);
+ bcm43xx_unlock_irqsafe(bcm, flags);
return 0;
}
@@ -660,9 +657,9 @@ static int bcm43xx_wx_get_shortpreamble(struct net_device *net_dev,
unsigned long flags;
int on;
- bcm43xx_lock(bcm, flags);
+ bcm43xx_lock_irqsafe(bcm, flags);
on = bcm->short_preamble;
- bcm43xx_unlock(bcm, flags);
+ bcm43xx_unlock_irqsafe(bcm, flags);
if (on)
strncpy(extra, "1 (Short Preamble enabled)", MAX_WX_STRING);
@@ -684,11 +681,11 @@ static int bcm43xx_wx_set_swencryption(struct net_device *net_dev,
on = *((int *)extra);
- bcm43xx_lock(bcm, flags);
+ bcm43xx_lock_irqsafe(bcm, flags);
bcm->ieee->host_encrypt = !!on;
bcm->ieee->host_decrypt = !!on;
bcm->ieee->host_build_iv = !on;
- bcm43xx_unlock(bcm, flags);
+ bcm43xx_unlock_irqsafe(bcm, flags);
return 0;
}
@@ -702,9 +699,9 @@ static int bcm43xx_wx_get_swencryption(struct net_device *net_dev,
unsigned long flags;
int on;
- bcm43xx_lock(bcm, flags);
+ bcm43xx_lock_irqsafe(bcm, flags);
on = bcm->ieee->host_encrypt;
- bcm43xx_unlock(bcm, flags);
+ bcm43xx_unlock_irqsafe(bcm, flags);
if (on)
strncpy(extra, "1 (SW encryption enabled) ", MAX_WX_STRING);
@@ -767,11 +764,11 @@ static int bcm43xx_wx_sprom_read(struct net_device *net_dev,
if (!sprom)
goto out;
- bcm43xx_lock_mmio(bcm, flags);
+ bcm43xx_lock_irqsafe(bcm, flags);
err = -ENODEV;
- if (bcm->initialized)
+ if (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED)
err = bcm43xx_sprom_read(bcm, sprom);
- bcm43xx_unlock_mmio(bcm, flags);
+ bcm43xx_unlock_irqsafe(bcm, flags);
if (!err)
data->data.length = sprom2hex(sprom, extra);
kfree(sprom);
@@ -812,11 +809,11 @@ static int bcm43xx_wx_sprom_write(struct net_device *net_dev,
if (err)
goto out_kfree;
- bcm43xx_lock_mmio(bcm, flags);
+ bcm43xx_lock_irqsafe(bcm, flags);
err = -ENODEV;
- if (bcm->initialized)
+ if (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED)
err = bcm43xx_sprom_write(bcm, sprom);
- bcm43xx_unlock_mmio(bcm, flags);
+ bcm43xx_unlock_irqsafe(bcm, flags);
out_kfree:
kfree(sprom);
out:
diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c
index 39f82f219749..081a8999666e 100644
--- a/drivers/net/wireless/ipw2200.c
+++ b/drivers/net/wireless/ipw2200.c
@@ -533,7 +533,7 @@ static inline void ipw_clear_bit(struct ipw_priv *priv, u32 reg, u32 mask)
ipw_write32(priv, reg, ipw_read32(priv, reg) & ~mask);
}
-static inline void ipw_enable_interrupts(struct ipw_priv *priv)
+static inline void __ipw_enable_interrupts(struct ipw_priv *priv)
{
if (priv->status & STATUS_INT_ENABLED)
return;
@@ -541,7 +541,7 @@ static inline void ipw_enable_interrupts(struct ipw_priv *priv)
ipw_write32(priv, IPW_INTA_MASK_R, IPW_INTA_MASK_ALL);
}
-static inline void ipw_disable_interrupts(struct ipw_priv *priv)
+static inline void __ipw_disable_interrupts(struct ipw_priv *priv)
{
if (!(priv->status & STATUS_INT_ENABLED))
return;
@@ -549,6 +549,24 @@ static inline void ipw_disable_interrupts(struct ipw_priv *priv)
ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
}
+static inline void ipw_enable_interrupts(struct ipw_priv *priv)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->irq_lock, flags);
+ __ipw_enable_interrupts(priv);
+ spin_unlock_irqrestore(&priv->irq_lock, flags);
+}
+
+static inline void ipw_disable_interrupts(struct ipw_priv *priv)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->irq_lock, flags);
+ __ipw_disable_interrupts(priv);
+ spin_unlock_irqrestore(&priv->irq_lock, flags);
+}
+
#ifdef CONFIG_IPW2200_DEBUG
static char *ipw_error_desc(u32 val)
{
@@ -1856,7 +1874,7 @@ static void ipw_irq_tasklet(struct ipw_priv *priv)
unsigned long flags;
int rc = 0;
- spin_lock_irqsave(&priv->lock, flags);
+ spin_lock_irqsave(&priv->irq_lock, flags);
inta = ipw_read32(priv, IPW_INTA_RW);
inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
@@ -1865,6 +1883,10 @@ static void ipw_irq_tasklet(struct ipw_priv *priv)
/* Add any cached INTA values that need to be handled */
inta |= priv->isr_inta;
+ spin_unlock_irqrestore(&priv->irq_lock, flags);
+
+ spin_lock_irqsave(&priv->lock, flags);
+
/* handle all the justifications for the interrupt */
if (inta & IPW_INTA_BIT_RX_TRANSFER) {
ipw_rx(priv);
@@ -1993,10 +2015,10 @@ static void ipw_irq_tasklet(struct ipw_priv *priv)
IPW_ERROR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
}
+ spin_unlock_irqrestore(&priv->lock, flags);
+
/* enable all interrupts */
ipw_enable_interrupts(priv);
-
- spin_unlock_irqrestore(&priv->lock, flags);
}
#define IPW_CMD(x) case IPW_CMD_ ## x : return #x
@@ -10460,7 +10482,7 @@ static irqreturn_t ipw_isr(int irq, void *data, struct pt_regs *regs)
if (!priv)
return IRQ_NONE;
- spin_lock(&priv->lock);
+ spin_lock(&priv->irq_lock);
if (!(priv->status & STATUS_INT_ENABLED)) {
/* Shared IRQ */
@@ -10482,7 +10504,7 @@ static irqreturn_t ipw_isr(int irq, void *data, struct pt_regs *regs)
}
/* tell the device to stop sending interrupts */
- ipw_disable_interrupts(priv);
+ __ipw_disable_interrupts(priv);
/* ack current interrupts */
inta &= (IPW_INTA_MASK_ALL & inta_mask);
@@ -10493,11 +10515,11 @@ static irqreturn_t ipw_isr(int irq, void *data, struct pt_regs *regs)
tasklet_schedule(&priv->irq_tasklet);
- spin_unlock(&priv->lock);
+ spin_unlock(&priv->irq_lock);
return IRQ_HANDLED;
none:
- spin_unlock(&priv->lock);
+ spin_unlock(&priv->irq_lock);
return IRQ_NONE;
}
@@ -11477,6 +11499,7 @@ static int ipw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
#ifdef CONFIG_IPW2200_DEBUG
ipw_debug_level = debug;
#endif
+ spin_lock_init(&priv->irq_lock);
spin_lock_init(&priv->lock);
for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++)
INIT_LIST_HEAD(&priv->ibss_mac_hash[i]);
diff --git a/drivers/net/wireless/ipw2200.h b/drivers/net/wireless/ipw2200.h
index 6044c0be2c80..ea12ad66b8e8 100644
--- a/drivers/net/wireless/ipw2200.h
+++ b/drivers/net/wireless/ipw2200.h
@@ -1173,6 +1173,7 @@ struct ipw_priv {
struct ieee80211_device *ieee;
spinlock_t lock;
+ spinlock_t irq_lock;
struct mutex mutex;
/* basic pci-network driver stuff */
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 879eb427607c..a915fe6c6aa5 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -924,8 +924,7 @@ static int ray_dev_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (length < ETH_ZLEN)
{
- skb = skb_padto(skb, ETH_ZLEN);
- if (skb == NULL)
+ if (skb_padto(skb, ETH_ZLEN))
return 0;
length = ETH_ZLEN;
}
diff --git a/drivers/net/wireless/wavelan.c b/drivers/net/wireless/wavelan.c
index dade4b903579..5b69befdab74 100644
--- a/drivers/net/wireless/wavelan.c
+++ b/drivers/net/wireless/wavelan.c
@@ -1695,8 +1695,8 @@ static int wv_frequency_list(unsigned long ioaddr, /* I/O port of the card */
/* Look in the table if the frequency is allowed */
if (table[9 - (freq / 16)] & (1 << (freq % 16))) {
/* Compute approximate channel number */
- while ((((channel_bands[c] >> 1) - 24) < freq) &&
- (c < NELS(channel_bands)))
+ while ((c < NELS(channel_bands)) &&
+ (((channel_bands[c] >> 1) - 24) < freq))
c++;
list[i].i = c; /* Set the list index */
@@ -2903,6 +2903,7 @@ static int wavelan_packet_xmit(struct sk_buff *skb, struct net_device * dev)
{
net_local *lp = (net_local *) dev->priv;
unsigned long flags;
+ char data[ETH_ZLEN];
#ifdef DEBUG_TX_TRACE
printk(KERN_DEBUG "%s: ->wavelan_packet_xmit(0x%X)\n", dev->name,
@@ -2937,15 +2938,16 @@ static int wavelan_packet_xmit(struct sk_buff *skb, struct net_device * dev)
* able to detect collisions, therefore in theory we don't really
* need to pad. Jean II */
if (skb->len < ETH_ZLEN) {
- skb = skb_padto(skb, ETH_ZLEN);
- if (skb == NULL)
- return 0;
+ memset(data, 0, ETH_ZLEN);
+ memcpy(data, skb->data, skb->len);
+ /* Write packet on the card */
+ if(wv_packet_write(dev, data, ETH_ZLEN))
+ return 1; /* We failed */
}
-
- /* Write packet on the card */
- if(wv_packet_write(dev, skb->data, skb->len))
+ else if(wv_packet_write(dev, skb->data, skb->len))
return 1; /* We failed */
+
dev_kfree_skb(skb);
#ifdef DEBUG_TX_TRACE
diff --git a/drivers/net/wireless/wavelan_cs.c b/drivers/net/wireless/wavelan_cs.c
index f7724eb2fa7e..561250f73fd3 100644
--- a/drivers/net/wireless/wavelan_cs.c
+++ b/drivers/net/wireless/wavelan_cs.c
@@ -3194,11 +3194,8 @@ wavelan_packet_xmit(struct sk_buff * skb,
* and we don't have the Ethernet specific requirement of beeing
* able to detect collisions, therefore in theory we don't really
* need to pad. Jean II */
- if (skb->len < ETH_ZLEN) {
- skb = skb_padto(skb, ETH_ZLEN);
- if (skb == NULL)
- return 0;
- }
+ if (skb_padto(skb, ETH_ZLEN))
+ return 0;
wv_packet_write(dev, skb->data, skb->len);
diff --git a/drivers/net/yellowfin.c b/drivers/net/yellowfin.c
index fd0f43b7db5b..ecec8e5db786 100644
--- a/drivers/net/yellowfin.c
+++ b/drivers/net/yellowfin.c
@@ -862,13 +862,11 @@ static int yellowfin_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* Fix GX chipset errata. */
if (cacheline_end > 24 || cacheline_end == 0) {
len = skb->len + 32 - cacheline_end + 1;
- if (len != skb->len)
- skb = skb_padto(skb, len);
- }
- if (skb == NULL) {
- yp->tx_skbuff[entry] = NULL;
- netif_wake_queue(dev);
- return 0;
+ if (skb_padto(skb, len)) {
+ yp->tx_skbuff[entry] = NULL;
+ netif_wake_queue(dev);
+ return 0;
+ }
}
}
yp->tx_skbuff[entry] = skb;
diff --git a/drivers/net/znet.c b/drivers/net/znet.c
index 3ac047bc727d..a7c089df66e6 100644
--- a/drivers/net/znet.c
+++ b/drivers/net/znet.c
@@ -544,8 +544,7 @@ static int znet_send_packet(struct sk_buff *skb, struct net_device *dev)
printk(KERN_DEBUG "%s: ZNet_send_packet.\n", dev->name);
if (length < ETH_ZLEN) {
- skb = skb_padto(skb, ETH_ZLEN);
- if (skb == NULL)
+ if (skb_padto(skb, ETH_ZLEN))
return 0;
length = ETH_ZLEN;
}
diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
index b62da9b0cbf0..71c2da277d6e 100644
--- a/drivers/oprofile/oprofilefs.c
+++ b/drivers/oprofile/oprofilefs.c
@@ -272,10 +272,10 @@ static int oprofilefs_fill_super(struct super_block * sb, void * data, int silen
}
-static struct super_block *oprofilefs_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int oprofilefs_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_single(fs_type, flags, data, oprofilefs_fill_super);
+ return get_sb_single(fs_type, flags, data, oprofilefs_fill_super, mnt);
}
diff --git a/drivers/parport/Kconfig b/drivers/parport/Kconfig
index 6c8452ede0e1..4d8dc27ea9d1 100644
--- a/drivers/parport/Kconfig
+++ b/drivers/parport/Kconfig
@@ -85,11 +85,6 @@ config PARPORT_PC_PCMCIA
config PARPORT_NOT_PC
bool
-config PARPORT_ARC
- tristate "Archimedes hardware"
- depends on ARM && PARPORT
- select PARPORT_NOT_PC
-
config PARPORT_IP32
tristate "SGI IP32 builtin port (EXPERIMENTAL)"
depends on SGI_IP32 && PARPORT && EXPERIMENTAL
diff --git a/drivers/parport/parport_arc.c b/drivers/parport/parport_arc.c
deleted file mode 100644
index b35bb4f48d62..000000000000
--- a/drivers/parport/parport_arc.c
+++ /dev/null
@@ -1,139 +0,0 @@
-/* Low-level parallel port routines for Archimedes onboard hardware
- *
- * Author: Phil Blundell <philb@gnu.org>
- */
-
-/* This driver is for the parallel port hardware found on Acorn's old
- * range of Archimedes machines. The A5000 and newer systems have PC-style
- * I/O hardware and should use the parport_pc driver instead.
- *
- * The Acorn printer port hardware is very simple. There is a single 8-bit
- * write-only latch for the data port and control/status bits are handled
- * with various auxilliary input and output lines. The port is not
- * bidirectional, does not support any modes other than SPP, and has only
- * a subset of the standard printer control lines connected.
- */
-
-#include <linux/threads.h>
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/parport.h>
-
-#include <asm/ptrace.h>
-#include <asm/io.h>
-#include <asm/arch/oldlatches.h>
-#include <asm/arch/irqs.h>
-
-#define DATA_ADDRESS 0x3350010
-
-/* This is equivalent to the above and only used for request_region. */
-#define PORT_BASE 0x80000000 | ((DATA_ADDRESS - IO_BASE) >> 2)
-
-/* The hardware can't read from the data latch, so we must use a soft
- copy. */
-static unsigned char data_copy;
-
-/* These are pretty simple. We know the irq is never shared and the
- kernel does all the magic that's required. */
-static void arc_enable_irq(struct parport *p)
-{
- enable_irq(p->irq);
-}
-
-static void arc_disable_irq(struct parport *p)
-{
- disable_irq(p->irq);
-}
-
-static void arc_interrupt(int irq, void *dev_id, struct pt_regs *regs)
-{
- parport_generic_irq(irq, (struct parport *) dev_id, regs);
-}
-
-static void arc_write_data(struct parport *p, unsigned char data)
-{
- data_copy = data;
- outb_t(data, DATA_LATCH);
-}
-
-static unsigned char arc_read_data(struct parport *p)
-{
- return data_copy;
-}
-
-static struct parport_operations parport_arc_ops =
-{
- .write_data = arc_write_data,
- .read_data = arc_read_data,
-
- .write_control = arc_write_control,
- .read_control = arc_read_control,
- .frob_control = arc_frob_control,
-
- .read_status = arc_read_status,
-
- .enable_irq = arc_enable_irq,
- .disable_irq = arc_disable_irq,
-
- .data_forward = arc_data_forward,
- .data_reverse = arc_data_reverse,
-
- .init_state = arc_init_state,
- .save_state = arc_save_state,
- .restore_state = arc_restore_state,
-
- .epp_write_data = parport_ieee1284_epp_write_data,
- .epp_read_data = parport_ieee1284_epp_read_data,
- .epp_write_addr = parport_ieee1284_epp_write_addr,
- .epp_read_addr = parport_ieee1284_epp_read_addr,
-
- .ecp_write_data = parport_ieee1284_ecp_write_data,
- .ecp_read_data = parport_ieee1284_ecp_read_data,
- .ecp_write_addr = parport_ieee1284_ecp_write_addr,
-
- .compat_write_data = parport_ieee1284_write_compat,
- .nibble_read_data = parport_ieee1284_read_nibble,
- .byte_read_data = parport_ieee1284_read_byte,
-
- .owner = THIS_MODULE,
-};
-
-/* --- Initialisation code -------------------------------- */
-
-static int parport_arc_init(void)
-{
- /* Archimedes hardware provides only one port, at a fixed address */
- struct parport *p;
- struct resource res;
- char *fake_name = "parport probe");
-
- res = request_region(PORT_BASE, 1, fake_name);
- if (res == NULL)
- return 0;
-
- p = parport_register_port (PORT_BASE, IRQ_PRINTERACK,
- PARPORT_DMA_NONE, &parport_arc_ops);
-
- if (!p) {
- release_region(PORT_BASE, 1);
- return 0;
- }
-
- p->modes = PARPORT_MODE_ARCSPP;
- p->size = 1;
- rename_region(res, p->name);
-
- printk(KERN_INFO "%s: Archimedes on-board port, using irq %d\n",
- p->irq);
-
- /* Tell the high-level drivers about the port. */
- parport_announce_port (p);
-
- return 1;
-}
-
-module_init(parport_arc_init)
diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c
index 407b4eaddcbf..3a4a644c2686 100644
--- a/drivers/pnp/pnpacpi/rsparser.c
+++ b/drivers/pnp/pnpacpi/rsparser.c
@@ -36,13 +36,13 @@ static int irq_flags(int triggering, int polarity)
{
int flag;
if (triggering == ACPI_LEVEL_SENSITIVE) {
- if(polarity == ACPI_ACTIVE_LOW)
+ if (polarity == ACPI_ACTIVE_LOW)
flag = IORESOURCE_IRQ_LOWLEVEL;
else
flag = IORESOURCE_IRQ_HIGHLEVEL;
}
else {
- if(polarity == ACPI_ACTIVE_LOW)
+ if (polarity == ACPI_ACTIVE_LOW)
flag = IORESOURCE_IRQ_LOWEDGE;
else
flag = IORESOURCE_IRQ_HIGHEDGE;
@@ -57,7 +57,7 @@ static void decode_irq_flags(int flag, int *triggering, int *polarity)
*triggering = ACPI_LEVEL_SENSITIVE;
*polarity = ACPI_ACTIVE_LOW;
break;
- case IORESOURCE_IRQ_HIGHLEVEL:
+ case IORESOURCE_IRQ_HIGHLEVEL:
*triggering = ACPI_LEVEL_SENSITIVE;
*polarity = ACPI_ACTIVE_HIGH;
break;
@@ -73,7 +73,7 @@ static void decode_irq_flags(int flag, int *triggering, int *polarity)
}
static void
-pnpacpi_parse_allocated_irqresource(struct pnp_resource_table * res, u32 gsi,
+pnpacpi_parse_allocated_irqresource(struct pnp_resource_table *res, u32 gsi,
int triggering, int polarity)
{
int i = 0;
@@ -101,7 +101,7 @@ pnpacpi_parse_allocated_irqresource(struct pnp_resource_table * res, u32 gsi,
}
static void
-pnpacpi_parse_allocated_dmaresource(struct pnp_resource_table * res, u32 dma)
+pnpacpi_parse_allocated_dmaresource(struct pnp_resource_table *res, u32 dma)
{
int i = 0;
while (i < PNP_MAX_DMA &&
@@ -119,8 +119,8 @@ pnpacpi_parse_allocated_dmaresource(struct pnp_resource_table * res, u32 dma)
}
static void
-pnpacpi_parse_allocated_ioresource(struct pnp_resource_table * res,
- u32 io, u32 len)
+pnpacpi_parse_allocated_ioresource(struct pnp_resource_table *res,
+ u64 io, u64 len)
{
int i = 0;
while (!(res->port_resource[i].flags & IORESOURCE_UNSET) &&
@@ -138,7 +138,7 @@ pnpacpi_parse_allocated_ioresource(struct pnp_resource_table * res,
}
static void
-pnpacpi_parse_allocated_memresource(struct pnp_resource_table * res,
+pnpacpi_parse_allocated_memresource(struct pnp_resource_table *res,
u64 mem, u64 len)
{
int i = 0;
@@ -156,11 +156,32 @@ pnpacpi_parse_allocated_memresource(struct pnp_resource_table * res,
}
}
+static void
+pnpacpi_parse_allocated_address_space(struct pnp_resource_table *res_table,
+ struct acpi_resource *res)
+{
+ struct acpi_resource_address64 addr, *p = &addr;
+ acpi_status status;
+
+ status = acpi_resource_to_address64(res, p);
+ if (!ACPI_SUCCESS(status)) {
+ pnp_warn("PnPACPI: failed to convert resource type %d",
+ res->type);
+ return;
+ }
+
+ if (p->resource_type == ACPI_MEMORY_RANGE)
+ pnpacpi_parse_allocated_memresource(res_table,
+ p->minimum, p->address_length);
+ else if (p->resource_type == ACPI_IO_RANGE)
+ pnpacpi_parse_allocated_ioresource(res_table,
+ p->minimum, p->address_length);
+}
static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res,
void *data)
{
- struct pnp_resource_table * res_table = (struct pnp_resource_table *)data;
+ struct pnp_resource_table *res_table = (struct pnp_resource_table *)data;
int i;
switch (res->type) {
@@ -221,19 +242,9 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res,
res->data.fixed_memory32.address_length);
break;
case ACPI_RESOURCE_TYPE_ADDRESS16:
- pnpacpi_parse_allocated_memresource(res_table,
- res->data.address16.minimum,
- res->data.address16.address_length);
- break;
case ACPI_RESOURCE_TYPE_ADDRESS32:
- pnpacpi_parse_allocated_memresource(res_table,
- res->data.address32.minimum,
- res->data.address32.address_length);
- break;
case ACPI_RESOURCE_TYPE_ADDRESS64:
- pnpacpi_parse_allocated_memresource(res_table,
- res->data.address64.minimum,
- res->data.address64.address_length);
+ pnpacpi_parse_allocated_address_space(res_table, res);
break;
case ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64:
@@ -255,11 +266,11 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res,
pnp_warn("PnPACPI: unknown resource type %d", res->type);
return AE_ERROR;
}
-
+
return AE_OK;
}
-acpi_status pnpacpi_parse_allocated_resource(acpi_handle handle, struct pnp_resource_table * res)
+acpi_status pnpacpi_parse_allocated_resource(acpi_handle handle, struct pnp_resource_table *res)
{
/* Blank the resource table values */
pnp_init_resource_table(res);
@@ -317,17 +328,17 @@ static void pnpacpi_parse_dma_option(struct pnp_option *option, struct acpi_reso
pnp_err("Invalid DMA transfer type");
}
- pnp_register_dma_resource(option,dma);
+ pnp_register_dma_resource(option, dma);
return;
}
-
+
static void pnpacpi_parse_irq_option(struct pnp_option *option,
struct acpi_resource_irq *p)
{
int i;
- struct pnp_irq * irq;
-
+ struct pnp_irq *irq;
+
if (p->interrupt_count == 0)
return;
irq = kcalloc(1, sizeof(struct pnp_irq), GFP_KERNEL);
@@ -347,7 +358,7 @@ static void pnpacpi_parse_ext_irq_option(struct pnp_option *option,
struct acpi_resource_extended_irq *p)
{
int i;
- struct pnp_irq * irq;
+ struct pnp_irq *irq;
if (p->interrupt_count == 0)
return;
@@ -368,7 +379,7 @@ static void
pnpacpi_parse_port_option(struct pnp_option *option,
struct acpi_resource_io *io)
{
- struct pnp_port * port;
+ struct pnp_port *port;
if (io->address_length == 0)
return;
@@ -381,7 +392,7 @@ pnpacpi_parse_port_option(struct pnp_option *option,
port->size = io->address_length;
port->flags = ACPI_DECODE_16 == io->io_decode ?
PNP_PORT_FLAG_16BITADDR : 0;
- pnp_register_port_resource(option,port);
+ pnp_register_port_resource(option, port);
return;
}
@@ -389,7 +400,7 @@ static void
pnpacpi_parse_fixed_port_option(struct pnp_option *option,
struct acpi_resource_fixed_io *io)
{
- struct pnp_port * port;
+ struct pnp_port *port;
if (io->address_length == 0)
return;
@@ -400,7 +411,7 @@ pnpacpi_parse_fixed_port_option(struct pnp_option *option,
port->size = io->address_length;
port->align = 0;
port->flags = PNP_PORT_FLAG_FIXED;
- pnp_register_port_resource(option,port);
+ pnp_register_port_resource(option, port);
return;
}
@@ -408,7 +419,7 @@ static void
pnpacpi_parse_mem24_option(struct pnp_option *option,
struct acpi_resource_memory24 *p)
{
- struct pnp_mem * mem;
+ struct pnp_mem *mem;
if (p->address_length == 0)
return;
@@ -423,7 +434,7 @@ pnpacpi_parse_mem24_option(struct pnp_option *option,
mem->flags = (ACPI_READ_WRITE_MEMORY == p->write_protect) ?
IORESOURCE_MEM_WRITEABLE : 0;
- pnp_register_mem_resource(option,mem);
+ pnp_register_mem_resource(option, mem);
return;
}
@@ -431,7 +442,7 @@ static void
pnpacpi_parse_mem32_option(struct pnp_option *option,
struct acpi_resource_memory32 *p)
{
- struct pnp_mem * mem;
+ struct pnp_mem *mem;
if (p->address_length == 0)
return;
@@ -446,7 +457,7 @@ pnpacpi_parse_mem32_option(struct pnp_option *option,
mem->flags = (ACPI_READ_WRITE_MEMORY == p->write_protect) ?
IORESOURCE_MEM_WRITEABLE : 0;
- pnp_register_mem_resource(option,mem);
+ pnp_register_mem_resource(option, mem);
return;
}
@@ -454,7 +465,7 @@ static void
pnpacpi_parse_fixed_mem32_option(struct pnp_option *option,
struct acpi_resource_fixed_memory32 *p)
{
- struct pnp_mem * mem;
+ struct pnp_mem *mem;
if (p->address_length == 0)
return;
@@ -468,7 +479,7 @@ pnpacpi_parse_fixed_mem32_option(struct pnp_option *option,
mem->flags = (ACPI_READ_WRITE_MEMORY == p->write_protect) ?
IORESOURCE_MEM_WRITEABLE : 0;
- pnp_register_mem_resource(option,mem);
+ pnp_register_mem_resource(option, mem);
return;
}
@@ -477,8 +488,8 @@ pnpacpi_parse_address_option(struct pnp_option *option, struct acpi_resource *r)
{
struct acpi_resource_address64 addr, *p = &addr;
acpi_status status;
- struct pnp_mem * mem;
- struct pnp_port * port;
+ struct pnp_mem *mem;
+ struct pnp_port *port;
status = acpi_resource_to_address64(r, p);
if (!ACPI_SUCCESS(status)) {
@@ -498,7 +509,7 @@ pnpacpi_parse_address_option(struct pnp_option *option, struct acpi_resource *r)
mem->align = 0;
mem->flags = (p->info.mem.write_protect ==
ACPI_READ_WRITE_MEMORY) ? IORESOURCE_MEM_WRITEABLE : 0;
- pnp_register_mem_resource(option,mem);
+ pnp_register_mem_resource(option, mem);
} else if (p->resource_type == ACPI_IO_RANGE) {
port = kcalloc(1, sizeof(struct pnp_port), GFP_KERNEL);
if (!port)
@@ -507,7 +518,7 @@ pnpacpi_parse_address_option(struct pnp_option *option, struct acpi_resource *r)
port->size = p->address_length;
port->align = 0;
port->flags = PNP_PORT_FLAG_FIXED;
- pnp_register_port_resource(option,port);
+ pnp_register_port_resource(option, port);
}
}
@@ -531,7 +542,7 @@ static acpi_status pnpacpi_option_resource(struct acpi_resource *res,
break;
case ACPI_RESOURCE_TYPE_DMA:
- pnpacpi_parse_dma_option(option, &res->data.dma);
+ pnpacpi_parse_dma_option(option, &res->data.dma);
break;
case ACPI_RESOURCE_TYPE_START_DEPENDENT:
@@ -539,7 +550,7 @@ static acpi_status pnpacpi_option_resource(struct acpi_resource *res,
case ACPI_GOOD_CONFIGURATION:
priority = PNP_RES_PRIORITY_PREFERRED;
break;
-
+
case ACPI_ACCEPTABLE_CONFIGURATION:
priority = PNP_RES_PRIORITY_ACCEPTABLE;
break;
@@ -555,7 +566,7 @@ static acpi_status pnpacpi_option_resource(struct acpi_resource *res,
option = pnp_register_dependent_option(dev, priority);
if (!option)
return AE_ERROR;
- parse_data->option = option;
+ parse_data->option = option;
break;
case ACPI_RESOURCE_TYPE_END_DEPENDENT:
@@ -615,7 +626,7 @@ static acpi_status pnpacpi_option_resource(struct acpi_resource *res,
pnp_warn("PnPACPI: unknown resource type %d", res->type);
return AE_ERROR;
}
-
+
return AE_OK;
}
@@ -636,13 +647,8 @@ acpi_status pnpacpi_parse_resource_option_data(acpi_handle handle,
return status;
}
-/*
- * Set resource
- */
-static acpi_status pnpacpi_count_resources(struct acpi_resource *res,
- void *data)
+static int pnpacpi_supported_resource(struct acpi_resource *res)
{
- int *res_cnt = (int *)data;
switch (res->type) {
case ACPI_RESOURCE_TYPE_IRQ:
case ACPI_RESOURCE_TYPE_DMA:
@@ -655,43 +661,32 @@ static acpi_status pnpacpi_count_resources(struct acpi_resource *res,
case ACPI_RESOURCE_TYPE_ADDRESS32:
case ACPI_RESOURCE_TYPE_ADDRESS64:
case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
- (*res_cnt) ++;
- case ACPI_RESOURCE_TYPE_START_DEPENDENT:
- case ACPI_RESOURCE_TYPE_END_DEPENDENT:
- case ACPI_RESOURCE_TYPE_VENDOR:
- case ACPI_RESOURCE_TYPE_END_TAG:
- case ACPI_RESOURCE_TYPE_GENERIC_REGISTER:
- default:
- return AE_OK;
+ return 1;
}
- return AE_OK;
+ return 0;
}
-static acpi_status pnpacpi_type_resources(struct acpi_resource *res,
+/*
+ * Set resource
+ */
+static acpi_status pnpacpi_count_resources(struct acpi_resource *res,
void *data)
{
- struct acpi_resource **resource = (struct acpi_resource **)data;
- switch (res->type) {
- case ACPI_RESOURCE_TYPE_IRQ:
- case ACPI_RESOURCE_TYPE_DMA:
- case ACPI_RESOURCE_TYPE_IO:
- case ACPI_RESOURCE_TYPE_FIXED_IO:
- case ACPI_RESOURCE_TYPE_MEMORY24:
- case ACPI_RESOURCE_TYPE_MEMORY32:
- case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
- case ACPI_RESOURCE_TYPE_ADDRESS16:
- case ACPI_RESOURCE_TYPE_ADDRESS32:
- case ACPI_RESOURCE_TYPE_ADDRESS64:
- case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
+ int *res_cnt = (int *)data;
+
+ if (pnpacpi_supported_resource(res))
+ (*res_cnt)++;
+ return AE_OK;
+}
+
+static acpi_status pnpacpi_type_resources(struct acpi_resource *res, void *data)
+{
+ struct acpi_resource **resource = (struct acpi_resource **)data;
+
+ if (pnpacpi_supported_resource(res)) {
(*resource)->type = res->type;
+ (*resource)->length = sizeof(struct acpi_resource);
(*resource)++;
- case ACPI_RESOURCE_TYPE_START_DEPENDENT:
- case ACPI_RESOURCE_TYPE_END_DEPENDENT:
- case ACPI_RESOURCE_TYPE_VENDOR:
- case ACPI_RESOURCE_TYPE_END_TAG:
- case ACPI_RESOURCE_TYPE_GENERIC_REGISTER:
- default:
- return AE_OK;
}
return AE_OK;
@@ -735,11 +730,8 @@ static void pnpacpi_encode_irq(struct acpi_resource *resource,
struct resource *p)
{
int triggering, polarity;
-
- decode_irq_flags(p->flags & IORESOURCE_BITS, &triggering,
- &polarity);
- resource->type = ACPI_RESOURCE_TYPE_IRQ;
- resource->length = sizeof(struct acpi_resource);
+
+ decode_irq_flags(p->flags & IORESOURCE_BITS, &triggering, &polarity);
resource->data.irq.triggering = triggering;
resource->data.irq.polarity = polarity;
if (triggering == ACPI_EDGE_SENSITIVE)
@@ -754,11 +746,8 @@ static void pnpacpi_encode_ext_irq(struct acpi_resource *resource,
struct resource *p)
{
int triggering, polarity;
-
- decode_irq_flags(p->flags & IORESOURCE_BITS, &triggering,
- &polarity);
- resource->type = ACPI_RESOURCE_TYPE_EXTENDED_IRQ;
- resource->length = sizeof(struct acpi_resource);
+
+ decode_irq_flags(p->flags & IORESOURCE_BITS, &triggering, &polarity);
resource->data.extended_irq.producer_consumer = ACPI_CONSUMER;
resource->data.extended_irq.triggering = triggering;
resource->data.extended_irq.polarity = polarity;
@@ -773,8 +762,6 @@ static void pnpacpi_encode_ext_irq(struct acpi_resource *resource,
static void pnpacpi_encode_dma(struct acpi_resource *resource,
struct resource *p)
{
- resource->type = ACPI_RESOURCE_TYPE_DMA;
- resource->length = sizeof(struct acpi_resource);
/* Note: pnp_assign_dma will copy pnp_dma->flags into p->flags */
if (p->flags & IORESOURCE_DMA_COMPATIBLE)
resource->data.dma.type = ACPI_COMPATIBILITY;
@@ -798,8 +785,6 @@ static void pnpacpi_encode_dma(struct acpi_resource *resource,
static void pnpacpi_encode_io(struct acpi_resource *resource,
struct resource *p)
{
- resource->type = ACPI_RESOURCE_TYPE_IO;
- resource->length = sizeof(struct acpi_resource);
/* Note: pnp_assign_port will copy pnp_port->flags into p->flags */
resource->data.io.io_decode = (p->flags & PNP_PORT_FLAG_16BITADDR)?
ACPI_DECODE_16 : ACPI_DECODE_10;
@@ -812,8 +797,6 @@ static void pnpacpi_encode_io(struct acpi_resource *resource,
static void pnpacpi_encode_fixed_io(struct acpi_resource *resource,
struct resource *p)
{
- resource->type = ACPI_RESOURCE_TYPE_FIXED_IO;
- resource->length = sizeof(struct acpi_resource);
resource->data.fixed_io.address = p->start;
resource->data.fixed_io.address_length = p->end - p->start + 1;
}
@@ -821,8 +804,6 @@ static void pnpacpi_encode_fixed_io(struct acpi_resource *resource,
static void pnpacpi_encode_mem24(struct acpi_resource *resource,
struct resource *p)
{
- resource->type = ACPI_RESOURCE_TYPE_MEMORY24;
- resource->length = sizeof(struct acpi_resource);
/* Note: pnp_assign_mem will copy pnp_mem->flags into p->flags */
resource->data.memory24.write_protect =
(p->flags & IORESOURCE_MEM_WRITEABLE) ?
@@ -836,8 +817,6 @@ static void pnpacpi_encode_mem24(struct acpi_resource *resource,
static void pnpacpi_encode_mem32(struct acpi_resource *resource,
struct resource *p)
{
- resource->type = ACPI_RESOURCE_TYPE_MEMORY32;
- resource->length = sizeof(struct acpi_resource);
resource->data.memory32.write_protect =
(p->flags & IORESOURCE_MEM_WRITEABLE) ?
ACPI_READ_WRITE_MEMORY : ACPI_READ_ONLY_MEMORY;
@@ -850,8 +829,6 @@ static void pnpacpi_encode_mem32(struct acpi_resource *resource,
static void pnpacpi_encode_fixed_mem32(struct acpi_resource *resource,
struct resource *p)
{
- resource->type = ACPI_RESOURCE_TYPE_FIXED_MEMORY32;
- resource->length = sizeof(struct acpi_resource);
resource->data.fixed_memory32.write_protect =
(p->flags & IORESOURCE_MEM_WRITEABLE) ?
ACPI_READ_WRITE_MEMORY : ACPI_READ_ONLY_MEMORY;
@@ -882,37 +859,37 @@ int pnpacpi_encode_resources(struct pnp_resource_table *res_table,
pnp_dbg("Encode dma");
pnpacpi_encode_dma(resource,
&res_table->dma_resource[dma]);
- dma ++;
+ dma++;
break;
case ACPI_RESOURCE_TYPE_IO:
pnp_dbg("Encode io");
pnpacpi_encode_io(resource,
&res_table->port_resource[port]);
- port ++;
+ port++;
break;
case ACPI_RESOURCE_TYPE_FIXED_IO:
pnp_dbg("Encode fixed io");
pnpacpi_encode_fixed_io(resource,
&res_table->port_resource[port]);
- port ++;
+ port++;
break;
case ACPI_RESOURCE_TYPE_MEMORY24:
pnp_dbg("Encode mem24");
pnpacpi_encode_mem24(resource,
&res_table->mem_resource[mem]);
- mem ++;
+ mem++;
break;
case ACPI_RESOURCE_TYPE_MEMORY32:
pnp_dbg("Encode mem32");
pnpacpi_encode_mem32(resource,
&res_table->mem_resource[mem]);
- mem ++;
+ mem++;
break;
case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
pnp_dbg("Encode fixed mem32");
pnpacpi_encode_fixed_mem32(resource,
&res_table->mem_resource[mem]);
- mem ++;
+ mem++;
break;
case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
pnp_dbg("Encode ext irq");
@@ -933,8 +910,8 @@ int pnpacpi_encode_resources(struct pnp_resource_table *res_table,
pnp_warn("unknown resource type %d", resource->type);
return -EINVAL;
}
- resource ++;
- i ++;
+ resource++;
+ i++;
}
return 0;
}
diff --git a/drivers/s390/net/qeth_eddp.c b/drivers/s390/net/qeth_eddp.c
index 0bab60a20309..38aad8321456 100644
--- a/drivers/s390/net/qeth_eddp.c
+++ b/drivers/s390/net/qeth_eddp.c
@@ -420,7 +420,7 @@ __qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
}
tcph = eddp->skb->h.th;
while (eddp->skb_offset < eddp->skb->len) {
- data_len = min((int)skb_shinfo(eddp->skb)->tso_size,
+ data_len = min((int)skb_shinfo(eddp->skb)->gso_size,
(int)(eddp->skb->len - eddp->skb_offset));
/* prepare qdio hdr */
if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2){
@@ -515,20 +515,20 @@ qeth_eddp_calc_num_pages(struct qeth_eddp_context *ctx, struct sk_buff *skb,
QETH_DBF_TEXT(trace, 5, "eddpcanp");
/* can we put multiple skbs in one page? */
- skbs_per_page = PAGE_SIZE / (skb_shinfo(skb)->tso_size + hdr_len);
+ skbs_per_page = PAGE_SIZE / (skb_shinfo(skb)->gso_size + hdr_len);
if (skbs_per_page > 1){
- ctx->num_pages = (skb_shinfo(skb)->tso_segs + 1) /
+ ctx->num_pages = (skb_shinfo(skb)->gso_segs + 1) /
skbs_per_page + 1;
ctx->elements_per_skb = 1;
} else {
/* no -> how many elements per skb? */
- ctx->elements_per_skb = (skb_shinfo(skb)->tso_size + hdr_len +
+ ctx->elements_per_skb = (skb_shinfo(skb)->gso_size + hdr_len +
PAGE_SIZE) >> PAGE_SHIFT;
ctx->num_pages = ctx->elements_per_skb *
- (skb_shinfo(skb)->tso_segs + 1);
+ (skb_shinfo(skb)->gso_segs + 1);
}
ctx->num_elements = ctx->elements_per_skb *
- (skb_shinfo(skb)->tso_segs + 1);
+ (skb_shinfo(skb)->gso_segs + 1);
}
static inline struct qeth_eddp_context *
diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c
index 9e671a48cd2f..56009d768326 100644
--- a/drivers/s390/net/qeth_main.c
+++ b/drivers/s390/net/qeth_main.c
@@ -4417,7 +4417,7 @@ qeth_send_packet(struct qeth_card *card, struct sk_buff *skb)
struct qeth_eddp_context *ctx = NULL;
int tx_bytes = skb->len;
unsigned short nr_frags = skb_shinfo(skb)->nr_frags;
- unsigned short tso_size = skb_shinfo(skb)->tso_size;
+ unsigned short tso_size = skb_shinfo(skb)->gso_size;
int rc;
QETH_DBF_TEXT(trace, 6, "sendpkt");
@@ -4453,7 +4453,7 @@ qeth_send_packet(struct qeth_card *card, struct sk_buff *skb)
queue = card->qdio.out_qs
[qeth_get_priority_queue(card, skb, ipv, cast_type)];
- if (skb_shinfo(skb)->tso_size)
+ if (skb_shinfo(skb)->gso_size)
large_send = card->options.large_send;
/*are we able to do TSO ? If so ,prepare and send it from here */
diff --git a/drivers/s390/net/qeth_tso.h b/drivers/s390/net/qeth_tso.h
index 24ef40ca9562..593f298142c1 100644
--- a/drivers/s390/net/qeth_tso.h
+++ b/drivers/s390/net/qeth_tso.h
@@ -51,7 +51,7 @@ qeth_tso_fill_header(struct qeth_card *card, struct sk_buff *skb)
hdr->ext.hdr_version = 1;
hdr->ext.hdr_len = 28;
/*insert non-fix values */
- hdr->ext.mss = skb_shinfo(skb)->tso_size;
+ hdr->ext.mss = skb_shinfo(skb)->gso_size;
hdr->ext.dg_hdr_len = (__u16)(iph->ihl*4 + tcph->doff*4);
hdr->ext.payload_len = (__u16)(skb->len - hdr->ext.dg_hdr_len -
sizeof(struct qeth_hdr_tso));
diff --git a/drivers/scsi/53c700.h b/drivers/scsi/53c700.h
index d8041952b1b5..7f22a06fe5ec 100644
--- a/drivers/scsi/53c700.h
+++ b/drivers/scsi/53c700.h
@@ -474,8 +474,7 @@ NCR_700_readl(struct Scsi_Host *host, __u32 reg)
ioread32(hostdata->base + reg);
#if 1
/* sanity check the register */
- if((reg & 0x3) != 0)
- BUG();
+ BUG_ON((reg & 0x3) != 0);
#endif
return value;
@@ -498,8 +497,7 @@ NCR_700_writel(__u32 value, struct Scsi_Host *host, __u32 reg)
#if 1
/* sanity check the register */
- if((reg & 0x3) != 0)
- BUG();
+ BUG_ON((reg & 0x3) != 0);
#endif
bEBus ? iowrite32be(value, hostdata->base + reg):
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index ea9e038813eb..83b5c7d085f2 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -396,8 +396,7 @@ static void get_container_name_callback(void *context, struct fib * fibptr)
scsicmd->SCp.phase = AAC_OWNER_MIDLEVEL;
dprintk((KERN_DEBUG "get_container_name_callback[cpu %d]: t = %ld.\n", smp_processor_id(), jiffies));
- if (fibptr == NULL)
- BUG();
+ BUG_ON(fibptr == NULL);
get_name_reply = (struct aac_get_name_resp *) fib_data(fibptr);
/* Failure is irrelevant, using default value instead */
@@ -956,8 +955,7 @@ static void io_callback(void *context, struct fib * fibptr)
smp_processor_id(), (unsigned long long)lba, jiffies);
}
- if (fibptr == NULL)
- BUG();
+ BUG_ON(fibptr == NULL);
if(scsicmd->use_sg)
pci_unmap_sg(dev->pdev,
@@ -1092,8 +1090,7 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
aac_build_sgraw(scsicmd, &readcmd->sg);
fibsize = sizeof(struct aac_raw_io) + ((le32_to_cpu(readcmd->sg.count) - 1) * sizeof (struct sgentryraw));
- if (fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr)))
- BUG();
+ BUG_ON(fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr)));
/*
* Now send the Fib to the adapter
*/
@@ -1261,8 +1258,7 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid)
aac_build_sgraw(scsicmd, &writecmd->sg);
fibsize = sizeof(struct aac_raw_io) + ((le32_to_cpu(writecmd->sg.count) - 1) * sizeof (struct sgentryraw));
- if (fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr)))
- BUG();
+ BUG_ON(fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr)));
/*
* Now send the Fib to the adapter
*/
@@ -1904,8 +1900,7 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
scsicmd->SCp.phase = AAC_OWNER_MIDLEVEL;
dev = (struct aac_dev *)scsicmd->device->host->hostdata;
- if (fibptr == NULL)
- BUG();
+ BUG_ON(fibptr == NULL);
srbreply = (struct aac_srb_reply *) fib_data(fibptr);
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index d2ef17ea44fa..3f27419c66af 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -229,8 +229,7 @@ void aac_fib_init(struct fib *fibptr)
static void fib_dealloc(struct fib * fibptr)
{
struct hw_fib *hw_fib = fibptr->hw_fib;
- if(hw_fib->header.StructType != FIB_MAGIC)
- BUG();
+ BUG_ON(hw_fib->header.StructType != FIB_MAGIC);
hw_fib->header.XferState = 0;
}
@@ -530,8 +529,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
}
} else
down(&fibptr->event_wait);
- if(fibptr->done == 0)
- BUG();
+ BUG_ON(fibptr->done == 0);
if((fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT)){
return -ETIMEDOUT;
diff --git a/drivers/scsi/arm/queue.c b/drivers/scsi/arm/queue.c
index b10750bb5c09..8caa5903ce38 100644
--- a/drivers/scsi/arm/queue.c
+++ b/drivers/scsi/arm/queue.c
@@ -118,8 +118,7 @@ int __queue_add(Queue_t *queue, Scsi_Cmnd *SCpnt, int head)
list_del(l);
q = list_entry(l, QE_t, list);
- if (BAD_MAGIC(q, QUEUE_MAGIC_FREE))
- BUG();
+ BUG_ON(BAD_MAGIC(q, QUEUE_MAGIC_FREE));
SET_MAGIC(q, QUEUE_MAGIC_USED);
q->SCpnt = SCpnt;
@@ -144,8 +143,7 @@ static Scsi_Cmnd *__queue_remove(Queue_t *queue, struct list_head *ent)
*/
list_del(ent);
q = list_entry(ent, QE_t, list);
- if (BAD_MAGIC(q, QUEUE_MAGIC_USED))
- BUG();
+ BUG_ON(BAD_MAGIC(q, QUEUE_MAGIC_USED));
SET_MAGIC(q, QUEUE_MAGIC_FREE);
list_add(ent, &queue->free);
diff --git a/drivers/scsi/ibmmca.c b/drivers/scsi/ibmmca.c
index 0c6dc31bb14d..115f55471ed3 100644
--- a/drivers/scsi/ibmmca.c
+++ b/drivers/scsi/ibmmca.c
@@ -2241,8 +2241,7 @@ static int __ibmmca_host_reset(Scsi_Cmnd * cmd)
int host_index;
unsigned long imm_command;
- if (cmd == NULL)
- BUG();
+ BUG_ON(cmd == NULL);
ticks = IM_RESET_DELAY * HZ;
shpnt = cmd->device->host;
diff --git a/drivers/scsi/ide-scsi.c b/drivers/scsi/ide-scsi.c
index 39b760a24241..988e6f7af01a 100644
--- a/drivers/scsi/ide-scsi.c
+++ b/drivers/scsi/ide-scsi.c
@@ -600,8 +600,7 @@ static ide_startstop_t idescsi_transfer_pc(ide_drive_t *drive)
"issuing a packet command\n");
return ide_do_reset (drive);
}
- if (HWGROUP(drive)->handler != NULL)
- BUG();
+ BUG_ON(HWGROUP(drive)->handler != NULL);
/* Set the interrupt routine */
ide_set_handler(drive, &idescsi_pc_intr, get_timeout(pc), idescsi_expiry);
/* Send the actual packet */
@@ -691,8 +690,7 @@ static ide_startstop_t idescsi_issue_pc (ide_drive_t *drive, idescsi_pc_t *pc)
set_bit(PC_DMA_OK, &pc->flags);
if (test_bit(IDESCSI_DRQ_INTERRUPT, &scsi->flags)) {
- if (HWGROUP(drive)->handler != NULL)
- BUG();
+ BUG_ON(HWGROUP(drive)->handler != NULL);
ide_set_handler(drive, &idescsi_transfer_pc,
get_timeout(pc), idescsi_expiry);
/* Issue the packet command */
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index c33857e7b33b..5d2cefb5e52d 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -1828,7 +1828,7 @@ mega_build_sglist(adapter_t *adapter, scb_t *scb, u32 *buf, u32 *len)
scb->dma_type = MEGA_SGLIST;
- if( sgcnt > adapter->sglen ) BUG();
+ BUG_ON(sgcnt > adapter->sglen);
*len = 0;
diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c
index ce0ba3a174f9..4a2fed350d4e 100644
--- a/drivers/scsi/osst.c
+++ b/drivers/scsi/osst.c
@@ -4724,7 +4724,7 @@ err_out:
/* Flush the tape buffer before close */
-static int os_scsi_tape_flush(struct file * filp)
+static int os_scsi_tape_flush(struct file * filp, fl_owner_t id)
{
int result = 0, result2;
struct osst_tape * STp = filp->private_data;
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index ad87d73f88ee..1272dd249af3 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -1193,7 +1193,7 @@ static int st_open(struct inode *inode, struct file *filp)
/* Flush the tape buffer before close */
-static int st_flush(struct file *filp)
+static int st_flush(struct file *filp, fl_owner_t id)
{
int result = 0, result2;
unsigned char cmd[MAX_COMMAND_SIZE];
diff --git a/drivers/scsi/wd33c93.c b/drivers/scsi/wd33c93.c
index 9341703dee00..27307fe5a4c8 100644
--- a/drivers/scsi/wd33c93.c
+++ b/drivers/scsi/wd33c93.c
@@ -939,6 +939,7 @@ wd33c93_intr(struct Scsi_Host *instance)
DB(DB_INTR, printk("%02x", cmd->SCp.Status))
if (hostdata->level2 >= L2_BASIC) {
sr = read_wd33c93(regs, WD_SCSI_STATUS); /* clear interrupt */
+ udelay(7);
hostdata->state = S_RUNNING_LEVEL2;
write_wd33c93(regs, WD_COMMAND_PHASE, 0x50);
write_wd33c93_cmd(regs, WD_CMD_SEL_ATN_XFER);
@@ -955,6 +956,7 @@ wd33c93_intr(struct Scsi_Host *instance)
msg = read_1_byte(regs);
sr = read_wd33c93(regs, WD_SCSI_STATUS); /* clear interrupt */
+ udelay(7);
hostdata->incoming_msg[hostdata->incoming_ptr] = msg;
if (hostdata->incoming_msg[0] == EXTENDED_MESSAGE)
@@ -1358,6 +1360,7 @@ wd33c93_intr(struct Scsi_Host *instance)
} else {
/* Verify this is a change to MSG_IN and read the message */
sr = read_wd33c93(regs, WD_SCSI_STATUS);
+ udelay(7);
if (sr == (CSR_ABORT | PHS_MESS_IN) ||
sr == (CSR_UNEXP | PHS_MESS_IN) ||
sr == (CSR_SRV_REQ | PHS_MESS_IN)) {
@@ -1374,6 +1377,7 @@ wd33c93_intr(struct Scsi_Host *instance)
asr);
}
sr = read_wd33c93(regs, WD_SCSI_STATUS);
+ udelay(7);
if (sr != CSR_MSGIN)
printk
("wd33c93: Not paused with ACK on RESEL (%02x)\n",
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index 5ea778fc1caa..bef4a9622ed7 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -937,4 +937,23 @@ config SERIAL_SGI_IOC3
If you have an SGI Altix with an IOC3 serial card,
say Y or M. Otherwise, say N.
+config SERIAL_NETX
+ bool "NetX serial port support"
+ depends on ARM && ARCH_NETX
+ select SERIAL_CORE
+ help
+ If you have a machine based on a Hilscher NetX SoC you
+ can enable its onboard serial port by enabling this option.
+
+ To compile this driver as a module, choose M here: the
+ module will be called netx-serial.
+
+config SERIAL_NETX_CONSOLE
+ bool "Console on NetX serial port"
+ depends on SERIAL_NETX
+ select SERIAL_CORE_CONSOLE
+ help
+ If you have enabled the serial port on the Motorola IMX
+ CPU you can make it the console by answering Y to this option.
+
endmenu
diff --git a/drivers/serial/Makefile b/drivers/serial/Makefile
index 0a71bf68a03f..927faee0362e 100644
--- a/drivers/serial/Makefile
+++ b/drivers/serial/Makefile
@@ -55,3 +55,4 @@ obj-$(CONFIG_SERIAL_VR41XX) += vr41xx_siu.o
obj-$(CONFIG_SERIAL_SGI_IOC4) += ioc4_serial.o
obj-$(CONFIG_SERIAL_SGI_IOC3) += ioc3_serial.o
obj-$(CONFIG_SERIAL_AT91) += at91_serial.o
+obj-$(CONFIG_SERIAL_NETX) += netx-serial.o
diff --git a/drivers/serial/amba-pl010.c b/drivers/serial/amba-pl010.c
index 1631414000a2..e920d196d0b1 100644
--- a/drivers/serial/amba-pl010.c
+++ b/drivers/serial/amba-pl010.c
@@ -52,7 +52,7 @@
#include <asm/io.h>
-#define UART_NR 2
+#define UART_NR 8
#define SERIAL_AMBA_MAJOR 204
#define SERIAL_AMBA_MINOR 16
diff --git a/drivers/serial/ioc4_serial.c b/drivers/serial/ioc4_serial.c
index c620209d7b9a..717e47bbd784 100644
--- a/drivers/serial/ioc4_serial.c
+++ b/drivers/serial/ioc4_serial.c
@@ -2646,7 +2646,10 @@ static int ioc4_serial_remove_one(struct ioc4_driver_data *idd)
struct ioc4_port *port;
struct ioc4_soft *soft;
+ /* If serial driver did not attach, don't try to detach */
control = idd->idd_serial_data;
+ if (!control)
+ return 0;
for (port_num = 0; port_num < IOC4_NUM_SERIAL_PORTS; port_num++) {
for (port_type = UART_PORT_MIN;
@@ -2778,6 +2781,12 @@ ioc4_serial_attach_one(struct ioc4_driver_data *idd)
DPRINT_CONFIG(("%s (0x%p, 0x%p)\n", __FUNCTION__, idd->idd_pdev,
idd->idd_pci_id));
+ /* PCI-RT does not bring out serial connections.
+ * Do not attach to this particular IOC4.
+ */
+ if (idd->idd_variant == IOC4_VARIANT_PCI_RT)
+ return 0;
+
/* request serial registers */
tmp_addr1 = idd->idd_bar0 + IOC4_SERIAL_OFFSET;
diff --git a/drivers/serial/netx-serial.c b/drivers/serial/netx-serial.c
new file mode 100644
index 000000000000..c1adc9e4b239
--- /dev/null
+++ b/drivers/serial/netx-serial.c
@@ -0,0 +1,749 @@
+/*
+ * drivers/serial/netx-serial.c
+ *
+ * Copyright (c) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/config.h>
+
+#if defined(CONFIG_SERIAL_NETX_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+#define SUPPORT_SYSRQ
+#endif
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/sysrq.h>
+#include <linux/platform_device.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/serial_core.h>
+#include <linux/serial.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/hardware.h>
+#include <asm/arch/netx-regs.h>
+
+/* We've been assigned a range on the "Low-density serial ports" major */
+#define SERIAL_NX_MAJOR 204
+#define MINOR_START 170
+
+#ifdef CONFIG_SERIAL_NETX_CONSOLE
+
+enum uart_regs {
+ UART_DR = 0x00,
+ UART_SR = 0x04,
+ UART_LINE_CR = 0x08,
+ UART_BAUDDIV_MSB = 0x0c,
+ UART_BAUDDIV_LSB = 0x10,
+ UART_CR = 0x14,
+ UART_FR = 0x18,
+ UART_IIR = 0x1c,
+ UART_ILPR = 0x20,
+ UART_RTS_CR = 0x24,
+ UART_RTS_LEAD = 0x28,
+ UART_RTS_TRAIL = 0x2c,
+ UART_DRV_ENABLE = 0x30,
+ UART_BRM_CR = 0x34,
+ UART_RXFIFO_IRQLEVEL = 0x38,
+ UART_TXFIFO_IRQLEVEL = 0x3c,
+};
+
+#define SR_FE (1<<0)
+#define SR_PE (1<<1)
+#define SR_BE (1<<2)
+#define SR_OE (1<<3)
+
+#define LINE_CR_BRK (1<<0)
+#define LINE_CR_PEN (1<<1)
+#define LINE_CR_EPS (1<<2)
+#define LINE_CR_STP2 (1<<3)
+#define LINE_CR_FEN (1<<4)
+#define LINE_CR_5BIT (0<<5)
+#define LINE_CR_6BIT (1<<5)
+#define LINE_CR_7BIT (2<<5)
+#define LINE_CR_8BIT (3<<5)
+#define LINE_CR_BITS_MASK (3<<5)
+
+#define CR_UART_EN (1<<0)
+#define CR_SIREN (1<<1)
+#define CR_SIRLP (1<<2)
+#define CR_MSIE (1<<3)
+#define CR_RIE (1<<4)
+#define CR_TIE (1<<5)
+#define CR_RTIE (1<<6)
+#define CR_LBE (1<<7)
+
+#define FR_CTS (1<<0)
+#define FR_DSR (1<<1)
+#define FR_DCD (1<<2)
+#define FR_BUSY (1<<3)
+#define FR_RXFE (1<<4)
+#define FR_TXFF (1<<5)
+#define FR_RXFF (1<<6)
+#define FR_TXFE (1<<7)
+
+#define IIR_MIS (1<<0)
+#define IIR_RIS (1<<1)
+#define IIR_TIS (1<<2)
+#define IIR_RTIS (1<<3)
+#define IIR_MASK 0xf
+
+#define RTS_CR_AUTO (1<<0)
+#define RTS_CR_RTS (1<<1)
+#define RTS_CR_COUNT (1<<2)
+#define RTS_CR_MOD2 (1<<3)
+#define RTS_CR_RTS_POL (1<<4)
+#define RTS_CR_CTS_CTR (1<<5)
+#define RTS_CR_CTS_POL (1<<6)
+#define RTS_CR_STICK (1<<7)
+
+#define UART_PORT_SIZE 0x40
+#define DRIVER_NAME "netx-uart"
+
+struct netx_port {
+ struct uart_port port;
+};
+
+static void netx_stop_tx(struct uart_port *port)
+{
+ unsigned int val;
+ val = readl(port->membase + UART_CR);
+ writel(val & ~CR_TIE, port->membase + UART_CR);
+}
+
+static void netx_stop_rx(struct uart_port *port)
+{
+ unsigned int val;
+ val = readl(port->membase + UART_CR);
+ writel(val & ~CR_RIE, port->membase + UART_CR);
+}
+
+static void netx_enable_ms(struct uart_port *port)
+{
+ unsigned int val;
+ val = readl(port->membase + UART_CR);
+ writel(val | CR_MSIE, port->membase + UART_CR);
+}
+
+static inline void netx_transmit_buffer(struct uart_port *port)
+{
+ struct circ_buf *xmit = &port->info->xmit;
+
+ if (port->x_char) {
+ writel(port->x_char, port->membase + UART_DR);
+ port->icount.tx++;
+ port->x_char = 0;
+ return;
+ }
+
+ if (uart_tx_stopped(port) || uart_circ_empty(xmit)) {
+ netx_stop_tx(port);
+ return;
+ }
+
+ do {
+ /* send xmit->buf[xmit->tail]
+ * out the port here */
+ writel(xmit->buf[xmit->tail], port->membase + UART_DR);
+ xmit->tail = (xmit->tail + 1) &
+ (UART_XMIT_SIZE - 1);
+ port->icount.tx++;
+ if (uart_circ_empty(xmit))
+ break;
+ } while (!(readl(port->membase + UART_FR) & FR_TXFF));
+
+ if (uart_circ_empty(xmit))
+ netx_stop_tx(port);
+}
+
+static void netx_start_tx(struct uart_port *port)
+{
+ writel(
+ readl(port->membase + UART_CR) | CR_TIE, port->membase + UART_CR);
+
+ if (!(readl(port->membase + UART_FR) & FR_TXFF))
+ netx_transmit_buffer(port);
+}
+
+static unsigned int netx_tx_empty(struct uart_port *port)
+{
+ return readl(port->membase + UART_FR) & FR_BUSY ? 0 : TIOCSER_TEMT;
+}
+
+static void netx_txint(struct uart_port *port)
+{
+ struct circ_buf *xmit = &port->info->xmit;
+
+ if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
+ netx_stop_tx(port);
+ return;
+ }
+
+ netx_transmit_buffer(port);
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(port);
+}
+
+static void netx_rxint(struct uart_port *port, struct pt_regs *regs)
+{
+ unsigned char rx, flg, status;
+ struct tty_struct *tty = port->info->tty;
+
+ while (!(readl(port->membase + UART_FR) & FR_RXFE)) {
+ rx = readl(port->membase + UART_DR);
+ flg = TTY_NORMAL;
+ port->icount.rx++;
+ status = readl(port->membase + UART_SR);
+ if (status & SR_BE) {
+ writel(0, port->membase + UART_SR);
+ if (uart_handle_break(port))
+ continue;
+ }
+
+ if (unlikely(status & (SR_FE | SR_PE | SR_OE))) {
+
+ if (status & SR_PE)
+ port->icount.parity++;
+ else if (status & SR_FE)
+ port->icount.frame++;
+ if (status & SR_OE)
+ port->icount.overrun++;
+
+ status &= port->read_status_mask;
+
+ if (status & SR_BE)
+ flg = TTY_BREAK;
+ else if (status & SR_PE)
+ flg = TTY_PARITY;
+ else if (status & SR_FE)
+ flg = TTY_FRAME;
+ }
+
+ if (uart_handle_sysrq_char(port, rx, regs))
+ continue;
+
+ uart_insert_char(port, status, SR_OE, rx, flg);
+ }
+
+ tty_flip_buffer_push(tty);
+ return;
+}
+
+static irqreturn_t netx_int(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct uart_port *port = (struct uart_port *)dev_id;
+ unsigned long flags;
+ unsigned char status;
+
+ spin_lock_irqsave(&port->lock,flags);
+
+ status = readl(port->membase + UART_IIR) & IIR_MASK;
+ while (status) {
+ if (status & IIR_RIS)
+ netx_rxint(port, regs);
+ if (status & IIR_TIS)
+ netx_txint(port);
+ if (status & IIR_MIS) {
+ if (readl(port->membase + UART_FR) & FR_CTS)
+ uart_handle_cts_change(port, 1);
+ else
+ uart_handle_cts_change(port, 0);
+ }
+ writel(0, port->membase + UART_IIR);
+ status = readl(port->membase + UART_IIR) & IIR_MASK;
+ }
+
+ spin_unlock_irqrestore(&port->lock,flags);
+ return IRQ_HANDLED;
+}
+
+static unsigned int netx_get_mctrl(struct uart_port *port)
+{
+ unsigned int ret = TIOCM_DSR | TIOCM_CAR;
+
+ if (readl(port->membase + UART_FR) & FR_CTS)
+ ret |= TIOCM_CTS;
+
+ return ret;
+}
+
+static void netx_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ unsigned int val;
+
+ if (mctrl & TIOCM_RTS) {
+ val = readl(port->membase + UART_RTS_CR);
+ writel(val | RTS_CR_RTS, port->membase + UART_RTS_CR);
+ }
+}
+
+static void netx_break_ctl(struct uart_port *port, int break_state)
+{
+ unsigned int line_cr;
+ spin_lock_irq(&port->lock);
+
+ line_cr = readl(port->membase + UART_LINE_CR);
+ if (break_state != 0)
+ line_cr |= LINE_CR_BRK;
+ else
+ line_cr &= ~LINE_CR_BRK;
+ writel(line_cr, port->membase + UART_LINE_CR);
+
+ spin_unlock_irq(&port->lock);
+}
+
+static int netx_startup(struct uart_port *port)
+{
+ int ret;
+
+ ret = request_irq(port->irq, netx_int, 0,
+ DRIVER_NAME, port);
+ if (ret) {
+ dev_err(port->dev, "unable to grab irq%d\n",port->irq);
+ goto exit;
+ }
+
+ writel(readl(port->membase + UART_LINE_CR) | LINE_CR_FEN,
+ port->membase + UART_LINE_CR);
+
+ writel(CR_MSIE | CR_RIE | CR_TIE | CR_RTIE | CR_UART_EN,
+ port->membase + UART_CR);
+
+exit:
+ return ret;
+}
+
+static void netx_shutdown(struct uart_port *port)
+{
+ writel(0, port->membase + UART_CR) ;
+
+ free_irq(port->irq, port);
+}
+
+static void
+netx_set_termios(struct uart_port *port, struct termios *termios,
+ struct termios *old)
+{
+ unsigned int baud, quot;
+ unsigned char old_cr;
+ unsigned char line_cr = LINE_CR_FEN;
+ unsigned char rts_cr = 0;
+
+ switch (termios->c_cflag & CSIZE) {
+ case CS5:
+ line_cr |= LINE_CR_5BIT;
+ break;
+ case CS6:
+ line_cr |= LINE_CR_6BIT;
+ break;
+ case CS7:
+ line_cr |= LINE_CR_7BIT;
+ break;
+ case CS8:
+ line_cr |= LINE_CR_8BIT;
+ break;
+ }
+
+ if (termios->c_cflag & CSTOPB)
+ line_cr |= LINE_CR_STP2;
+
+ if (termios->c_cflag & PARENB) {
+ line_cr |= LINE_CR_PEN;
+ if (!(termios->c_cflag & PARODD))
+ line_cr |= LINE_CR_EPS;
+ }
+
+ if (termios->c_cflag & CRTSCTS)
+ rts_cr = RTS_CR_AUTO | RTS_CR_CTS_CTR | RTS_CR_RTS_POL;
+
+ baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16);
+ quot = baud * 4096;
+ quot /= 1000;
+ quot *= 256;
+ quot /= 100000;
+
+ spin_lock_irq(&port->lock);
+
+ uart_update_timeout(port, termios->c_cflag, baud);
+
+ old_cr = readl(port->membase + UART_CR);
+
+ /* disable interrupts */
+ writel(old_cr & ~(CR_MSIE | CR_RIE | CR_TIE | CR_RTIE),
+ port->membase + UART_CR);
+
+ /* drain transmitter */
+ while (readl(port->membase + UART_FR) & FR_BUSY);
+
+ /* disable UART */
+ writel(old_cr & ~CR_UART_EN, port->membase + UART_CR);
+
+ /* modem status interrupts */
+ old_cr &= ~CR_MSIE;
+ if (UART_ENABLE_MS(port, termios->c_cflag))
+ old_cr |= CR_MSIE;
+
+ writel((quot>>8) & 0xff, port->membase + UART_BAUDDIV_MSB);
+ writel(quot & 0xff, port->membase + UART_BAUDDIV_LSB);
+ writel(line_cr, port->membase + UART_LINE_CR);
+
+ writel(rts_cr, port->membase + UART_RTS_CR);
+
+ /*
+ * Characters to ignore
+ */
+ port->ignore_status_mask = 0;
+ if (termios->c_iflag & IGNPAR)
+ port->ignore_status_mask |= SR_PE;
+ if (termios->c_iflag & IGNBRK) {
+ port->ignore_status_mask |= SR_BE;
+ /*
+ * If we're ignoring parity and break indicators,
+ * ignore overruns too (for real raw support).
+ */
+ if (termios->c_iflag & IGNPAR)
+ port->ignore_status_mask |= SR_PE;
+ }
+
+ port->read_status_mask = 0;
+ if (termios->c_iflag & (BRKINT | PARMRK))
+ port->read_status_mask |= SR_BE;
+ if (termios->c_iflag & INPCK)
+ port->read_status_mask |= SR_PE | SR_FE;
+
+ writel(old_cr, port->membase + UART_CR);
+
+ spin_unlock_irq(&port->lock);
+}
+
+static const char *netx_type(struct uart_port *port)
+{
+ return port->type == PORT_NETX ? "NETX" : NULL;
+}
+
+static void netx_release_port(struct uart_port *port)
+{
+ release_mem_region(port->mapbase, UART_PORT_SIZE);
+}
+
+static int netx_request_port(struct uart_port *port)
+{
+ return request_mem_region(port->mapbase, UART_PORT_SIZE,
+ DRIVER_NAME) != NULL ? 0 : -EBUSY;
+}
+
+static void netx_config_port(struct uart_port *port, int flags)
+{
+ if (flags & UART_CONFIG_TYPE && netx_request_port(port) == 0)
+ port->type = PORT_NETX;
+}
+
+static int
+netx_verify_port(struct uart_port *port, struct serial_struct *ser)
+{
+ int ret = 0;
+
+ if (ser->type != PORT_UNKNOWN && ser->type != PORT_NETX)
+ ret = -EINVAL;
+
+ return ret;
+}
+
+static struct uart_ops netx_pops = {
+ .tx_empty = netx_tx_empty,
+ .set_mctrl = netx_set_mctrl,
+ .get_mctrl = netx_get_mctrl,
+ .stop_tx = netx_stop_tx,
+ .start_tx = netx_start_tx,
+ .stop_rx = netx_stop_rx,
+ .enable_ms = netx_enable_ms,
+ .break_ctl = netx_break_ctl,
+ .startup = netx_startup,
+ .shutdown = netx_shutdown,
+ .set_termios = netx_set_termios,
+ .type = netx_type,
+ .release_port = netx_release_port,
+ .request_port = netx_request_port,
+ .config_port = netx_config_port,
+ .verify_port = netx_verify_port,
+};
+
+static struct netx_port netx_ports[] = {
+ {
+ .port = {
+ .type = PORT_NETX,
+ .iotype = UPIO_MEM,
+ .membase = (char __iomem *)io_p2v(NETX_PA_UART0),
+ .mapbase = NETX_PA_UART0,
+ .irq = NETX_IRQ_UART0,
+ .uartclk = 100000000,
+ .fifosize = 16,
+ .flags = UPF_BOOT_AUTOCONF,
+ .ops = &netx_pops,
+ .line = 0,
+ },
+ }, {
+ .port = {
+ .type = PORT_NETX,
+ .iotype = UPIO_MEM,
+ .membase = (char __iomem *)io_p2v(NETX_PA_UART1),
+ .mapbase = NETX_PA_UART1,
+ .irq = NETX_IRQ_UART1,
+ .uartclk = 100000000,
+ .fifosize = 16,
+ .flags = UPF_BOOT_AUTOCONF,
+ .ops = &netx_pops,
+ .line = 1,
+ },
+ }, {
+ .port = {
+ .type = PORT_NETX,
+ .iotype = UPIO_MEM,
+ .membase = (char __iomem *)io_p2v(NETX_PA_UART2),
+ .mapbase = NETX_PA_UART2,
+ .irq = NETX_IRQ_UART2,
+ .uartclk = 100000000,
+ .fifosize = 16,
+ .flags = UPF_BOOT_AUTOCONF,
+ .ops = &netx_pops,
+ .line = 2,
+ },
+ }
+};
+
+static void netx_console_putchar(struct uart_port *port, int ch)
+{
+ while (readl(port->membase + UART_FR) & FR_BUSY);
+ writel(ch, port->membase + UART_DR);
+}
+
+static void
+netx_console_write(struct console *co, const char *s, unsigned int count)
+{
+ struct uart_port *port = &netx_ports[co->index].port;
+ unsigned char cr_save;
+
+ cr_save = readl(port->membase + UART_CR);
+ writel(cr_save | CR_UART_EN, port->membase + UART_CR);
+
+ uart_console_write(port, s, count, netx_console_putchar);
+
+ while (readl(port->membase + UART_FR) & FR_BUSY);
+ writel(cr_save, port->membase + UART_CR);
+}
+
+static void __init
+netx_console_get_options(struct uart_port *port, int *baud,
+ int *parity, int *bits, int *flow)
+{
+ unsigned char line_cr;
+
+ *baud = (readl(port->membase + UART_BAUDDIV_MSB) << 8) |
+ readl(port->membase + UART_BAUDDIV_LSB);
+ *baud *= 1000;
+ *baud /= 4096;
+ *baud *= 1000;
+ *baud /= 256;
+ *baud *= 100;
+
+ line_cr = readl(port->membase + UART_LINE_CR);
+ *parity = 'n';
+ if (line_cr & LINE_CR_PEN) {
+ if (line_cr & LINE_CR_EPS)
+ *parity = 'e';
+ else
+ *parity = 'o';
+ }
+
+ switch (line_cr & LINE_CR_BITS_MASK) {
+ case LINE_CR_8BIT:
+ *bits = 8;
+ break;
+ case LINE_CR_7BIT:
+ *bits = 7;
+ break;
+ case LINE_CR_6BIT:
+ *bits = 6;
+ break;
+ case LINE_CR_5BIT:
+ *bits = 5;
+ break;
+ }
+
+ if (readl(port->membase + UART_RTS_CR) & RTS_CR_AUTO)
+ *flow = 'r';
+}
+
+static int __init
+netx_console_setup(struct console *co, char *options)
+{
+ struct netx_port *sport;
+ int baud = 9600;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+
+ /*
+ * Check whether an invalid uart number has been specified, and
+ * if so, search for the first available port that does have
+ * console support.
+ */
+ if (co->index == -1 || co->index >= ARRAY_SIZE(netx_ports))
+ co->index = 0;
+ sport = &netx_ports[co->index];
+
+ if (options) {
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+ } else {
+ /* if the UART is enabled, assume it has been correctly setup
+ * by the bootloader and get the options
+ */
+ if (readl(sport->port.membase + UART_CR) & CR_UART_EN) {
+ netx_console_get_options(&sport->port, &baud,
+ &parity, &bits, &flow);
+ }
+
+ }
+
+ return uart_set_options(&sport->port, co, baud, parity, bits, flow);
+}
+
+static struct uart_driver netx_reg;
+static struct console netx_console = {
+ .name = "ttyNX",
+ .write = netx_console_write,
+ .device = uart_console_device,
+ .setup = netx_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &netx_reg,
+};
+
+static int __init netx_console_init(void)
+{
+ register_console(&netx_console);
+ return 0;
+}
+console_initcall(netx_console_init);
+
+#define NETX_CONSOLE &netx_console
+#else
+#define NETX_CONSOLE NULL
+#endif
+
+static struct uart_driver netx_reg = {
+ .owner = THIS_MODULE,
+ .driver_name = DRIVER_NAME,
+ .dev_name = "ttyNX",
+ .major = SERIAL_NX_MAJOR,
+ .minor = MINOR_START,
+ .nr = ARRAY_SIZE(netx_ports),
+ .cons = NETX_CONSOLE,
+};
+
+static int serial_netx_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct netx_port *sport = platform_get_drvdata(pdev);
+
+ if (sport)
+ uart_suspend_port(&netx_reg, &sport->port);
+
+ return 0;
+}
+
+static int serial_netx_resume(struct platform_device *pdev)
+{
+ struct netx_port *sport = platform_get_drvdata(pdev);
+
+ if (sport)
+ uart_resume_port(&netx_reg, &sport->port);
+
+ return 0;
+}
+
+static int serial_netx_probe(struct platform_device *pdev)
+{
+ struct uart_port *port = &netx_ports[pdev->id].port;
+
+ dev_info(&pdev->dev, "initialising\n");
+
+ port->dev = &pdev->dev;
+
+ writel(1, port->membase + UART_RXFIFO_IRQLEVEL);
+ uart_add_one_port(&netx_reg, &netx_ports[pdev->id].port);
+ platform_set_drvdata(pdev, &netx_ports[pdev->id]);
+
+ return 0;
+}
+
+static int serial_netx_remove(struct platform_device *pdev)
+{
+ struct netx_port *sport = platform_get_drvdata(pdev);
+
+ platform_set_drvdata(pdev, NULL);
+
+ if (sport)
+ uart_remove_one_port(&netx_reg, &sport->port);
+
+ return 0;
+}
+
+static struct platform_driver serial_netx_driver = {
+ .probe = serial_netx_probe,
+ .remove = serial_netx_remove,
+
+ .suspend = serial_netx_suspend,
+ .resume = serial_netx_resume,
+
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+};
+
+static int __init netx_serial_init(void)
+{
+ int ret;
+
+ printk(KERN_INFO "Serial: NetX driver\n");
+
+ ret = uart_register_driver(&netx_reg);
+ if (ret)
+ return ret;
+
+ ret = platform_driver_register(&serial_netx_driver);
+ if (ret != 0)
+ uart_unregister_driver(&netx_reg);
+
+ return 0;
+}
+
+static void __exit netx_serial_exit(void)
+{
+ platform_driver_unregister(&serial_netx_driver);
+ uart_unregister_driver(&netx_reg);
+}
+
+module_init(netx_serial_init);
+module_exit(netx_serial_exit);
+
+MODULE_AUTHOR("Sascha Hauer");
+MODULE_DESCRIPTION("NetX serial port driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/serial/pxa.c b/drivers/serial/pxa.c
index 77d4568ccc3a..ae3649568541 100644
--- a/drivers/serial/pxa.c
+++ b/drivers/serial/pxa.c
@@ -269,7 +269,6 @@ static unsigned int serial_pxa_get_mctrl(struct uart_port *port)
unsigned char status;
unsigned int ret;
-return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR;
status = serial_in(up, UART_MSR);
ret = 0;
diff --git a/drivers/sn/ioc4.c b/drivers/sn/ioc4.c
index cdeff909403e..8256a97eb508 100644
--- a/drivers/sn/ioc4.c
+++ b/drivers/sn/ioc4.c
@@ -160,9 +160,6 @@ ioc4_clock_calibrate(struct ioc4_driver_data *idd)
writel(0, &idd->idd_misc_regs->int_out.raw);
mmiowb();
- printk(KERN_INFO
- "%s: Calibrating PCI bus speed "
- "for pci_dev %s ... ", __FUNCTION__, pci_name(idd->idd_pdev));
/* Set up square wave */
int_out.raw = 0;
int_out.fields.count = IOC4_CALIBRATE_COUNT;
@@ -206,11 +203,16 @@ ioc4_clock_calibrate(struct ioc4_driver_data *idd)
/* Bounds check the result. */
if (period > IOC4_CALIBRATE_LOW_LIMIT ||
period < IOC4_CALIBRATE_HIGH_LIMIT) {
- printk("failed. Assuming PCI clock ticks are %d ns.\n",
+ printk(KERN_INFO
+ "IOC4 %s: Clock calibration failed. Assuming"
+ "PCI clock is %d ns.\n",
+ pci_name(idd->idd_pdev),
IOC4_CALIBRATE_DEFAULT / IOC4_EXTINT_COUNT_DIVISOR);
period = IOC4_CALIBRATE_DEFAULT;
} else {
- printk("succeeded. PCI clock ticks are %ld ns.\n",
+ printk(KERN_DEBUG
+ "IOC4 %s: PCI clock is %ld ns.\n",
+ pci_name(idd->idd_pdev),
period / IOC4_EXTINT_COUNT_DIVISOR);
}
@@ -222,6 +224,51 @@ ioc4_clock_calibrate(struct ioc4_driver_data *idd)
idd->count_period = period;
}
+/* There are three variants of IOC4 cards: IO9, IO10, and PCI-RT.
+ * Each brings out different combinations of IOC4 signals, thus.
+ * the IOC4 subdrivers need to know to which we're attached.
+ *
+ * We look for the presence of a SCSI (IO9) or SATA (IO10) controller
+ * on the same PCI bus at slot number 3 to differentiate IO9 from IO10.
+ * If neither is present, it's a PCI-RT.
+ */
+static unsigned int
+ioc4_variant(struct ioc4_driver_data *idd)
+{
+ struct pci_dev *pdev = NULL;
+ int found = 0;
+
+ /* IO9: Look for a QLogic ISP 12160 at the same bus and slot 3. */
+ do {
+ pdev = pci_get_device(PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_ISP12160, pdev);
+ if (pdev &&
+ idd->idd_pdev->bus->number == pdev->bus->number &&
+ 3 == PCI_SLOT(pdev->devfn))
+ found = 1;
+ pci_dev_put(pdev);
+ } while (pdev && !found);
+ if (NULL != pdev)
+ return IOC4_VARIANT_IO9;
+
+ /* IO10: Look for a Vitesse VSC 7174 at the same bus and slot 3. */
+ pdev = NULL;
+ do {
+ pdev = pci_get_device(PCI_VENDOR_ID_VITESSE,
+ PCI_DEVICE_ID_VITESSE_VSC7174, pdev);
+ if (pdev &&
+ idd->idd_pdev->bus->number == pdev->bus->number &&
+ 3 == PCI_SLOT(pdev->devfn))
+ found = 1;
+ pci_dev_put(pdev);
+ } while (pdev && !found);
+ if (NULL != pdev)
+ return IOC4_VARIANT_IO10;
+
+ /* PCI-RT: No SCSI/SATA controller will be present */
+ return IOC4_VARIANT_PCI_RT;
+}
+
/* Adds a new instance of an IOC4 card */
static int
ioc4_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
@@ -286,6 +333,13 @@ ioc4_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
/* Failsafe portion of per-IOC4 initialization */
+ /* Detect card variant */
+ idd->idd_variant = ioc4_variant(idd);
+ printk(KERN_INFO "IOC4 %s: %s card detected.\n", pci_name(pdev),
+ idd->idd_variant == IOC4_VARIANT_IO9 ? "IO9" :
+ idd->idd_variant == IOC4_VARIANT_PCI_RT ? "PCI-RT" :
+ idd->idd_variant == IOC4_VARIANT_IO10 ? "IO10" : "unknown");
+
/* Initialize IOC4 */
pci_read_config_dword(idd->idd_pdev, PCI_COMMAND, &pcmd);
pci_write_config_dword(idd->idd_pdev, PCI_COMMAND,
diff --git a/drivers/usb/core/inode.c b/drivers/usb/core/inode.c
index 3cf945cc5b9a..95f5ad923b0f 100644
--- a/drivers/usb/core/inode.c
+++ b/drivers/usb/core/inode.c
@@ -543,10 +543,10 @@ static void fs_remove_file (struct dentry *dentry)
/* --------------------------------------------------------------------- */
-static struct super_block *usb_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int usb_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_single(fs_type, flags, data, usbfs_fill_super);
+ return get_sb_single(fs_type, flags, data, usbfs_fill_super, mnt);
}
static struct file_system_type usb_fs_type = {
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 515310751303..fb488c8a860c 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -991,6 +991,8 @@ void usb_buffer_unmap_sg (struct usb_device *dev, unsigned pipe,
static int verify_suspended(struct device *dev, void *unused)
{
+ if (dev->driver == NULL)
+ return 0;
return (dev->power.power_state.event == PM_EVENT_ON) ? -EBUSY : 0;
}
diff --git a/drivers/usb/gadget/file_storage.c b/drivers/usb/gadget/file_storage.c
index 6f887478b148..a43dc908ac59 100644
--- a/drivers/usb/gadget/file_storage.c
+++ b/drivers/usb/gadget/file_storage.c
@@ -1906,7 +1906,6 @@ static int fsync_sub(struct lun *curlun)
inode = filp->f_dentry->d_inode;
mutex_lock(&inode->i_mutex);
- current->flags |= PF_SYNCWRITE;
rc = filemap_fdatawrite(inode->i_mapping);
err = filp->f_op->fsync(filp, filp->f_dentry, 1);
if (!rc)
@@ -1914,7 +1913,6 @@ static int fsync_sub(struct lun *curlun)
err = filemap_fdatawait(inode->i_mapping);
if (!rc)
rc = err;
- current->flags &= ~PF_SYNCWRITE;
mutex_unlock(&inode->i_mutex);
VLDBG(curlun, "fdatasync -> %d\n", rc);
return rc;
diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c
index aef0722b8f17..3bdc5e3ba234 100644
--- a/drivers/usb/gadget/inode.c
+++ b/drivers/usb/gadget/inode.c
@@ -2070,11 +2070,11 @@ enomem0:
}
/* "mount -t gadgetfs path /dev/gadget" ends up here */
-static struct super_block *
+static int
gadgetfs_get_sb (struct file_system_type *t, int flags,
- const char *path, void *opts)
+ const char *path, void *opts, struct vfsmount *mnt)
{
- return get_sb_single (t, flags, opts, gadgetfs_fill_super);
+ return get_sb_single (t, flags, opts, gadgetfs_fill_super, mnt);
}
static void
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index c060eb9b3b19..b93d71d28db7 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -70,7 +70,6 @@ config USB_EHCI_TT_NEWSCHED
config USB_ISP116X_HCD
tristate "ISP116X HCD support"
depends on USB
- default N
---help---
The ISP1160 and ISP1161 chips are USB host controllers. Enable this
option if your board has this chip. If unsure, say N.
@@ -145,7 +144,6 @@ config USB_UHCI_HCD
config USB_SL811_HCD
tristate "SL811HS HCD support"
depends on USB
- default N
help
The SL811HS is a single-port USB controller that supports either
host side or peripheral side roles. Enable this option if your
@@ -158,7 +156,6 @@ config USB_SL811_HCD
config USB_SL811_CS
tristate "CF/PCMCIA support for SL811HS HCD"
depends on USB_SL811_HCD && PCMCIA
- default N
help
Wraps a PCMCIA driver around the SL811HS HCD, supporting the RATOC
REX-CFU1U CF card (often used with PDAs). If unsure, say N.
diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
index d9d7d3c4cae2..eb6aa42be60e 100644
--- a/drivers/video/aty/atyfb_base.c
+++ b/drivers/video/aty/atyfb_base.c
@@ -3722,7 +3722,9 @@ static int __init atyfb_init(void)
atyfb_setup(option);
#endif
+#ifdef CONFIG_PCI
pci_register_driver(&atyfb_driver);
+#endif
#ifdef CONFIG_ATARI
atyfb_atari_probe();
#endif
@@ -3731,7 +3733,9 @@ static int __init atyfb_init(void)
static void __exit atyfb_exit(void)
{
+#ifdef CONFIG_PCI
pci_unregister_driver(&atyfb_driver);
+#endif
}
module_init(atyfb_init);
diff --git a/drivers/w1/Kconfig b/drivers/w1/Kconfig
index f2d9a08e89ae..8b3d0f0c7bd5 100644
--- a/drivers/w1/Kconfig
+++ b/drivers/w1/Kconfig
@@ -2,6 +2,7 @@ menu "Dallas's 1-wire bus"
config W1
tristate "Dallas's 1-wire support"
+ depends on CONNECTOR
---help---
Dallas' 1-wire bus is useful to connect slow 1-pin devices
such as iButtons and thermal sensors.
diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c
index 61c599b4a1e3..872943004e59 100644
--- a/fs/9p/vfs_super.c
+++ b/fs/9p/vfs_super.c
@@ -99,12 +99,13 @@ v9fs_fill_super(struct super_block *sb, struct v9fs_session_info *v9ses,
* @flags: mount flags
* @dev_name: device name that was mounted
* @data: mount options
+ * @mnt: mountpoint record to be instantiated
*
*/
-static struct super_block *v9fs_get_sb(struct file_system_type
- *fs_type, int flags,
- const char *dev_name, void *data)
+static int v9fs_get_sb(struct file_system_type *fs_type, int flags,
+ const char *dev_name, void *data,
+ struct vfsmount *mnt)
{
struct super_block *sb = NULL;
struct v9fs_fcall *fcall = NULL;
@@ -123,17 +124,19 @@ static struct super_block *v9fs_get_sb(struct file_system_type
v9ses = kzalloc(sizeof(struct v9fs_session_info), GFP_KERNEL);
if (!v9ses)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
if ((newfid = v9fs_session_init(v9ses, dev_name, data)) < 0) {
dprintk(DEBUG_ERROR, "problem initiating session\n");
- sb = ERR_PTR(newfid);
+ retval = newfid;
goto out_free_session;
}
sb = sget(fs_type, NULL, v9fs_set_super, v9ses);
- if (IS_ERR(sb))
+ if (IS_ERR(sb)) {
+ retval = PTR_ERR(sb);
goto out_close_session;
+ }
v9fs_fill_super(sb, v9ses, flags);
inode = v9fs_get_inode(sb, S_IFDIR | mode);
@@ -184,19 +187,19 @@ static struct super_block *v9fs_get_sb(struct file_system_type
goto put_back_sb;
}
- return sb;
+ return simple_set_mnt(mnt, sb);
out_close_session:
v9fs_session_close(v9ses);
out_free_session:
kfree(v9ses);
- return sb;
+ return retval;
put_back_sb:
/* deactivate_super calls v9fs_kill_super which will frees the rest */
up_write(&sb->s_umount);
deactivate_super(sb);
- return ERR_PTR(retval);
+ return retval;
}
/**
diff --git a/fs/Kconfig b/fs/Kconfig
index 20f9b557732d..2aa4624cc018 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -53,7 +53,7 @@ config EXT2_FS_SECURITY
config EXT2_FS_XIP
bool "Ext2 execute in place support"
- depends on EXT2_FS
+ depends on EXT2_FS && MMU
help
Execute in place can be used on memory-backed block devices. If you
enable this option, you can select to mount block devices which are
diff --git a/fs/adfs/super.c b/fs/adfs/super.c
index 252abda0d200..ba1c88af49fe 100644
--- a/fs/adfs/super.c
+++ b/fs/adfs/super.c
@@ -196,17 +196,17 @@ static int adfs_remount(struct super_block *sb, int *flags, char *data)
return parse_options(sb, data);
}
-static int adfs_statfs(struct super_block *sb, struct kstatfs *buf)
+static int adfs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
- struct adfs_sb_info *asb = ADFS_SB(sb);
+ struct adfs_sb_info *asb = ADFS_SB(dentry->d_sb);
buf->f_type = ADFS_SUPER_MAGIC;
buf->f_namelen = asb->s_namelen;
- buf->f_bsize = sb->s_blocksize;
+ buf->f_bsize = dentry->d_sb->s_blocksize;
buf->f_blocks = asb->s_size;
buf->f_files = asb->s_ids_per_zone * asb->s_map_size;
buf->f_bavail =
- buf->f_bfree = adfs_map_free(sb);
+ buf->f_bfree = adfs_map_free(dentry->d_sb);
buf->f_ffree = (long)(buf->f_bfree * buf->f_files) / (long)buf->f_blocks;
return 0;
@@ -470,10 +470,11 @@ error:
return -EINVAL;
}
-static struct super_block *adfs_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int adfs_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_bdev(fs_type, flags, dev_name, data, adfs_fill_super);
+ return get_sb_bdev(fs_type, flags, dev_name, data, adfs_fill_super,
+ mnt);
}
static struct file_system_type adfs_fs_type = {
diff --git a/fs/affs/super.c b/fs/affs/super.c
index 4d7e5b19e5cd..8765cba35bb9 100644
--- a/fs/affs/super.c
+++ b/fs/affs/super.c
@@ -18,7 +18,7 @@
extern struct timezone sys_tz;
-static int affs_statfs(struct super_block *sb, struct kstatfs *buf);
+static int affs_statfs(struct dentry *dentry, struct kstatfs *buf);
static int affs_remount (struct super_block *sb, int *flags, char *data);
static void
@@ -508,8 +508,9 @@ affs_remount(struct super_block *sb, int *flags, char *data)
}
static int
-affs_statfs(struct super_block *sb, struct kstatfs *buf)
+affs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
+ struct super_block *sb = dentry->d_sb;
int free;
pr_debug("AFFS: statfs() partsize=%d, reserved=%d\n",AFFS_SB(sb)->s_partition_size,
@@ -524,10 +525,11 @@ affs_statfs(struct super_block *sb, struct kstatfs *buf)
return 0;
}
-static struct super_block *affs_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int affs_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_bdev(fs_type, flags, dev_name, data, affs_fill_super);
+ return get_sb_bdev(fs_type, flags, dev_name, data, affs_fill_super,
+ mnt);
}
static struct file_system_type affs_fs_type = {
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index a6dff6a4f204..2fc99877cb0d 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -185,9 +185,7 @@ static struct page *afs_dir_get_page(struct inode *dir, unsigned long index)
_enter("{%lu},%lu", dir->i_ino, index);
- page = read_cache_page(dir->i_mapping,index,
- (filler_t *) dir->i_mapping->a_ops->readpage,
- NULL);
+ page = read_mapping_page(dir->i_mapping, index, NULL);
if (!IS_ERR(page)) {
wait_on_page_locked(page);
kmap(page);
diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c
index 4e6eeb59b83c..b5cf9e1205ad 100644
--- a/fs/afs/mntpt.c
+++ b/fs/afs/mntpt.c
@@ -63,7 +63,6 @@ unsigned long afs_mntpt_expiry_timeout = 20;
int afs_mntpt_check_symlink(struct afs_vnode *vnode)
{
struct page *page;
- filler_t *filler;
size_t size;
char *buf;
int ret;
@@ -71,10 +70,7 @@ int afs_mntpt_check_symlink(struct afs_vnode *vnode)
_enter("{%u,%u}", vnode->fid.vnode, vnode->fid.unique);
/* read the contents of the symlink into the pagecache */
- filler = (filler_t *) AFS_VNODE_TO_I(vnode)->i_mapping->a_ops->readpage;
-
- page = read_cache_page(AFS_VNODE_TO_I(vnode)->i_mapping, 0,
- filler, NULL);
+ page = read_mapping_page(AFS_VNODE_TO_I(vnode)->i_mapping, 0, NULL);
if (IS_ERR(page)) {
ret = PTR_ERR(page);
goto out;
@@ -160,7 +156,6 @@ static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt)
struct page *page = NULL;
size_t size;
char *buf, *devname = NULL, *options = NULL;
- filler_t *filler;
int ret;
kenter("{%s}", mntpt->d_name.name);
@@ -182,9 +177,7 @@ static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt)
goto error;
/* read the contents of the AFS special symlink */
- filler = (filler_t *)mntpt->d_inode->i_mapping->a_ops->readpage;
-
- page = read_cache_page(mntpt->d_inode->i_mapping, 0, filler, NULL);
+ page = read_mapping_page(mntpt->d_inode->i_mapping, 0, NULL);
if (IS_ERR(page)) {
ret = PTR_ERR(page);
goto error;
diff --git a/fs/afs/super.c b/fs/afs/super.c
index 53c56e7231ab..82468df0ba54 100644
--- a/fs/afs/super.c
+++ b/fs/afs/super.c
@@ -38,9 +38,9 @@ struct afs_mount_params {
static void afs_i_init_once(void *foo, kmem_cache_t *cachep,
unsigned long flags);
-static struct super_block *afs_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name,
- void *data);
+static int afs_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name,
+ void *data, struct vfsmount *mnt);
static struct inode *afs_alloc_inode(struct super_block *sb);
@@ -294,10 +294,11 @@ static int afs_fill_super(struct super_block *sb, void *data, int silent)
* get an AFS superblock
* - TODO: don't use get_sb_nodev(), but rather call sget() directly
*/
-static struct super_block *afs_get_sb(struct file_system_type *fs_type,
- int flags,
- const char *dev_name,
- void *options)
+static int afs_get_sb(struct file_system_type *fs_type,
+ int flags,
+ const char *dev_name,
+ void *options,
+ struct vfsmount *mnt)
{
struct afs_mount_params params;
struct super_block *sb;
@@ -311,7 +312,7 @@ static struct super_block *afs_get_sb(struct file_system_type *fs_type,
ret = afscm_start();
if (ret < 0) {
_leave(" = %d", ret);
- return ERR_PTR(ret);
+ return ret;
}
/* parse the options */
@@ -348,18 +349,19 @@ static struct super_block *afs_get_sb(struct file_system_type *fs_type,
goto error;
}
sb->s_flags |= MS_ACTIVE;
+ simple_set_mnt(mnt, sb);
afs_put_volume(params.volume);
afs_put_cell(params.default_cell);
- _leave(" = %p", sb);
- return sb;
+ _leave(" = 0 [%p]", 0, sb);
+ return 0;
error:
afs_put_volume(params.volume);
afs_put_cell(params.default_cell);
afscm_stop();
_leave(" = %d", ret);
- return ERR_PTR(ret);
+ return ret;
} /* end afs_get_sb() */
/*****************************************************************************/
diff --git a/fs/aio.c b/fs/aio.c
index e41e932ba489..8c34a62df7d7 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -777,11 +777,11 @@ out:
static int __aio_run_iocbs(struct kioctx *ctx)
{
struct kiocb *iocb;
- LIST_HEAD(run_list);
+ struct list_head run_list;
assert_spin_locked(&ctx->ctx_lock);
- list_splice_init(&ctx->run_list, &run_list);
+ list_replace_init(&ctx->run_list, &run_list);
while (!list_empty(&run_list)) {
iocb = list_entry(run_list.next, struct kiocb,
ki_run_list);
diff --git a/fs/autofs/init.c b/fs/autofs/init.c
index b977ece69f0c..aca123752406 100644
--- a/fs/autofs/init.c
+++ b/fs/autofs/init.c
@@ -14,10 +14,10 @@
#include <linux/init.h>
#include "autofs_i.h"
-static struct super_block *autofs_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int autofs_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_nodev(fs_type, flags, data, autofs_fill_super);
+ return get_sb_nodev(fs_type, flags, data, autofs_fill_super, mnt);
}
static struct file_system_type autofs_fs_type = {
diff --git a/fs/autofs4/init.c b/fs/autofs4/init.c
index acecec8578ce..5d9193332bef 100644
--- a/fs/autofs4/init.c
+++ b/fs/autofs4/init.c
@@ -14,10 +14,10 @@
#include <linux/init.h>
#include "autofs_i.h"
-static struct super_block *autofs_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int autofs_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_nodev(fs_type, flags, data, autofs4_fill_super);
+ return get_sb_nodev(fs_type, flags, data, autofs4_fill_super, mnt);
}
static struct file_system_type autofs_fs_type = {
diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
index 68ebd10f345d..08201fab26cd 100644
--- a/fs/befs/linuxvfs.c
+++ b/fs/befs/linuxvfs.c
@@ -49,7 +49,7 @@ static int befs_nls2utf(struct super_block *sb, const char *in, int in_len,
char **out, int *out_len);
static void befs_put_super(struct super_block *);
static int befs_remount(struct super_block *, int *, char *);
-static int befs_statfs(struct super_block *, struct kstatfs *);
+static int befs_statfs(struct dentry *, struct kstatfs *);
static int parse_options(char *, befs_mount_options *);
static const struct super_operations befs_sops = {
@@ -880,8 +880,9 @@ befs_remount(struct super_block *sb, int *flags, char *data)
}
static int
-befs_statfs(struct super_block *sb, struct kstatfs *buf)
+befs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
+ struct super_block *sb = dentry->d_sb;
befs_debug(sb, "---> befs_statfs()");
@@ -899,11 +900,12 @@ befs_statfs(struct super_block *sb, struct kstatfs *buf)
return 0;
}
-static struct super_block *
+static int
befs_get_sb(struct file_system_type *fs_type, int flags, const char *dev_name,
- void *data)
+ void *data, struct vfsmount *mnt)
{
- return get_sb_bdev(fs_type, flags, dev_name, data, befs_fill_super);
+ return get_sb_bdev(fs_type, flags, dev_name, data, befs_fill_super,
+ mnt);
}
static struct file_system_type befs_fs_type = {
diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c
index 55a7a78332f8..cf74f3d4d966 100644
--- a/fs/bfs/inode.c
+++ b/fs/bfs/inode.c
@@ -203,8 +203,9 @@ static void bfs_put_super(struct super_block *s)
s->s_fs_info = NULL;
}
-static int bfs_statfs(struct super_block *s, struct kstatfs *buf)
+static int bfs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
+ struct super_block *s = dentry->d_sb;
struct bfs_sb_info *info = BFS_SB(s);
u64 id = huge_encode_dev(s->s_bdev->bd_dev);
buf->f_type = BFS_MAGIC;
@@ -410,10 +411,10 @@ out:
return -EINVAL;
}
-static struct super_block *bfs_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int bfs_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_bdev(fs_type, flags, dev_name, data, bfs_fill_super);
+ return get_sb_bdev(fs_type, flags, dev_name, data, bfs_fill_super, mnt);
}
static struct file_system_type bfs_fs_type = {
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 8a04216e8b4d..d0434406eaeb 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -38,15 +38,13 @@
#include <linux/security.h>
#include <linux/syscalls.h>
#include <linux/random.h>
-
+#include <linux/elf.h>
#include <asm/uaccess.h>
#include <asm/param.h>
#include <asm/page.h>
-#include <linux/elf.h>
-
-static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs);
-static int load_elf_library(struct file*);
+static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs);
+static int load_elf_library(struct file *);
static unsigned long elf_map (struct file *, unsigned long, struct elf_phdr *, int, int);
extern int dump_fpu (struct pt_regs *, elf_fpregset_t *);
@@ -59,15 +57,15 @@ extern int dump_fpu (struct pt_regs *, elf_fpregset_t *);
* don't even try.
*/
#if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE)
-static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file);
+static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file);
#else
#define elf_core_dump NULL
#endif
#if ELF_EXEC_PAGESIZE > PAGE_SIZE
-# define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
+#define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
#else
-# define ELF_MIN_ALIGN PAGE_SIZE
+#define ELF_MIN_ALIGN PAGE_SIZE
#endif
#ifndef ELF_CORE_EFLAGS
@@ -86,7 +84,7 @@ static struct linux_binfmt elf_format = {
.min_coredump = ELF_EXEC_PAGESIZE
};
-#define BAD_ADDR(x) ((unsigned long)(x) > TASK_SIZE)
+#define BAD_ADDR(x) ((unsigned long)(x) > TASK_SIZE)
static int set_brk(unsigned long start, unsigned long end)
{
@@ -104,13 +102,11 @@ static int set_brk(unsigned long start, unsigned long end)
return 0;
}
-
/* We need to explicitly zero any fractional pages
after the data section (i.e. bss). This would
contain the junk from the file that should not
- be in memory */
-
-
+ be in memory
+ */
static int padzero(unsigned long elf_bss)
{
unsigned long nbyte;
@@ -129,7 +125,9 @@ static int padzero(unsigned long elf_bss)
#define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) + (items))
#define STACK_ROUND(sp, items) \
((15 + (unsigned long) ((sp) + (items))) &~ 15UL)
-#define STACK_ALLOC(sp, len) ({ elf_addr_t __user *old_sp = (elf_addr_t __user *)sp; sp += len; old_sp; })
+#define STACK_ALLOC(sp, len) ({ \
+ elf_addr_t __user *old_sp = (elf_addr_t __user *)sp; sp += len; \
+ old_sp; })
#else
#define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) - (items))
#define STACK_ROUND(sp, items) \
@@ -138,7 +136,7 @@ static int padzero(unsigned long elf_bss)
#endif
static int
-create_elf_tables(struct linux_binprm *bprm, struct elfhdr * exec,
+create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
int interp_aout, unsigned long load_addr,
unsigned long interp_load_addr)
{
@@ -161,7 +159,6 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr * exec,
* for userspace to get any other way, in others (i386) it is
* merely difficult.
*/
-
u_platform = NULL;
if (k_platform) {
size_t len = strlen(k_platform) + 1;
@@ -171,7 +168,7 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr * exec,
* evictions by the processes running on the same package. One
* thing we can do is to shuffle the initial stack for them.
*/
-
+
p = arch_align_stack(p);
u_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
@@ -180,9 +177,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr * exec,
}
/* Create the ELF interpreter info */
- elf_info = (elf_addr_t *) current->mm->saved_auxv;
+ elf_info = (elf_addr_t *)current->mm->saved_auxv;
#define NEW_AUX_ENT(id, val) \
- do { elf_info[ei_index++] = id; elf_info[ei_index++] = val; } while (0)
+ do { \
+ elf_info[ei_index++] = id; \
+ elf_info[ei_index++] = val; \
+ } while (0)
#ifdef ARCH_DLINFO
/*
@@ -195,21 +195,22 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr * exec,
NEW_AUX_ENT(AT_PAGESZ, ELF_EXEC_PAGESIZE);
NEW_AUX_ENT(AT_CLKTCK, CLOCKS_PER_SEC);
NEW_AUX_ENT(AT_PHDR, load_addr + exec->e_phoff);
- NEW_AUX_ENT(AT_PHENT, sizeof (struct elf_phdr));
+ NEW_AUX_ENT(AT_PHENT, sizeof(struct elf_phdr));
NEW_AUX_ENT(AT_PHNUM, exec->e_phnum);
NEW_AUX_ENT(AT_BASE, interp_load_addr);
NEW_AUX_ENT(AT_FLAGS, 0);
NEW_AUX_ENT(AT_ENTRY, exec->e_entry);
- NEW_AUX_ENT(AT_UID, (elf_addr_t) tsk->uid);
- NEW_AUX_ENT(AT_EUID, (elf_addr_t) tsk->euid);
- NEW_AUX_ENT(AT_GID, (elf_addr_t) tsk->gid);
- NEW_AUX_ENT(AT_EGID, (elf_addr_t) tsk->egid);
- NEW_AUX_ENT(AT_SECURE, (elf_addr_t) security_bprm_secureexec(bprm));
+ NEW_AUX_ENT(AT_UID, tsk->uid);
+ NEW_AUX_ENT(AT_EUID, tsk->euid);
+ NEW_AUX_ENT(AT_GID, tsk->gid);
+ NEW_AUX_ENT(AT_EGID, tsk->egid);
+ NEW_AUX_ENT(AT_SECURE, security_bprm_secureexec(bprm));
if (k_platform) {
- NEW_AUX_ENT(AT_PLATFORM, (elf_addr_t)(unsigned long)u_platform);
+ NEW_AUX_ENT(AT_PLATFORM,
+ (elf_addr_t)(unsigned long)u_platform);
}
if (bprm->interp_flags & BINPRM_FLAGS_EXECFD) {
- NEW_AUX_ENT(AT_EXECFD, (elf_addr_t) bprm->interp_data);
+ NEW_AUX_ENT(AT_EXECFD, bprm->interp_data);
}
#undef NEW_AUX_ENT
/* AT_NULL is zero; clear the rest too */
@@ -232,7 +233,7 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr * exec,
/* Point sp at the lowest address on the stack */
#ifdef CONFIG_STACK_GROWSUP
sp = (elf_addr_t __user *)bprm->p - items - ei_index;
- bprm->exec = (unsigned long) sp; /* XXX: PARISC HACK */
+ bprm->exec = (unsigned long)sp; /* XXX: PARISC HACK */
#else
sp = (elf_addr_t __user *)bprm->p;
#endif
@@ -285,7 +286,7 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr * exec,
#ifndef elf_map
static unsigned long elf_map(struct file *filep, unsigned long addr,
- struct elf_phdr *eppnt, int prot, int type)
+ struct elf_phdr *eppnt, int prot, int type)
{
unsigned long map_addr;
unsigned long pageoffset = ELF_PAGEOFFSET(eppnt->p_vaddr);
@@ -310,9 +311,8 @@ static unsigned long elf_map(struct file *filep, unsigned long addr,
is only provided so that we can read a.out libraries that have
an ELF header */
-static unsigned long load_elf_interp(struct elfhdr * interp_elf_ex,
- struct file * interpreter,
- unsigned long *interp_load_addr)
+static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
+ struct file *interpreter, unsigned long *interp_load_addr)
{
struct elf_phdr *elf_phdata;
struct elf_phdr *eppnt;
@@ -342,15 +342,15 @@ static unsigned long load_elf_interp(struct elfhdr * interp_elf_ex,
goto out;
/* Now read in all of the header information */
-
size = sizeof(struct elf_phdr) * interp_elf_ex->e_phnum;
if (size > ELF_MIN_ALIGN)
goto out;
- elf_phdata = (struct elf_phdr *) kmalloc(size, GFP_KERNEL);
+ elf_phdata = kmalloc(size, GFP_KERNEL);
if (!elf_phdata)
goto out;
- retval = kernel_read(interpreter,interp_elf_ex->e_phoff,(char *)elf_phdata,size);
+ retval = kernel_read(interpreter, interp_elf_ex->e_phoff,
+ (char *)elf_phdata,size);
error = -EIO;
if (retval != size) {
if (retval < 0)
@@ -359,58 +359,65 @@ static unsigned long load_elf_interp(struct elfhdr * interp_elf_ex,
}
eppnt = elf_phdata;
- for (i=0; i<interp_elf_ex->e_phnum; i++, eppnt++) {
- if (eppnt->p_type == PT_LOAD) {
- int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
- int elf_prot = 0;
- unsigned long vaddr = 0;
- unsigned long k, map_addr;
-
- if (eppnt->p_flags & PF_R) elf_prot = PROT_READ;
- if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
- if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
- vaddr = eppnt->p_vaddr;
- if (interp_elf_ex->e_type == ET_EXEC || load_addr_set)
- elf_type |= MAP_FIXED;
-
- map_addr = elf_map(interpreter, load_addr + vaddr, eppnt, elf_prot, elf_type);
- error = map_addr;
- if (BAD_ADDR(map_addr))
- goto out_close;
-
- if (!load_addr_set && interp_elf_ex->e_type == ET_DYN) {
- load_addr = map_addr - ELF_PAGESTART(vaddr);
- load_addr_set = 1;
- }
-
- /*
- * Check to see if the section's size will overflow the
- * allowed task size. Note that p_filesz must always be
- * <= p_memsize so it is only necessary to check p_memsz.
- */
- k = load_addr + eppnt->p_vaddr;
- if (k > TASK_SIZE || eppnt->p_filesz > eppnt->p_memsz ||
- eppnt->p_memsz > TASK_SIZE || TASK_SIZE - eppnt->p_memsz < k) {
- error = -ENOMEM;
- goto out_close;
- }
-
- /*
- * Find the end of the file mapping for this phdr, and keep
- * track of the largest address we see for this.
- */
- k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
- if (k > elf_bss)
- elf_bss = k;
-
- /*
- * Do the same thing for the memory mapping - between
- * elf_bss and last_bss is the bss section.
- */
- k = load_addr + eppnt->p_memsz + eppnt->p_vaddr;
- if (k > last_bss)
- last_bss = k;
- }
+ for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
+ if (eppnt->p_type == PT_LOAD) {
+ int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
+ int elf_prot = 0;
+ unsigned long vaddr = 0;
+ unsigned long k, map_addr;
+
+ if (eppnt->p_flags & PF_R)
+ elf_prot = PROT_READ;
+ if (eppnt->p_flags & PF_W)
+ elf_prot |= PROT_WRITE;
+ if (eppnt->p_flags & PF_X)
+ elf_prot |= PROT_EXEC;
+ vaddr = eppnt->p_vaddr;
+ if (interp_elf_ex->e_type == ET_EXEC || load_addr_set)
+ elf_type |= MAP_FIXED;
+
+ map_addr = elf_map(interpreter, load_addr + vaddr,
+ eppnt, elf_prot, elf_type);
+ error = map_addr;
+ if (BAD_ADDR(map_addr))
+ goto out_close;
+
+ if (!load_addr_set &&
+ interp_elf_ex->e_type == ET_DYN) {
+ load_addr = map_addr - ELF_PAGESTART(vaddr);
+ load_addr_set = 1;
+ }
+
+ /*
+ * Check to see if the section's size will overflow the
+ * allowed task size. Note that p_filesz must always be
+ * <= p_memsize so it's only necessary to check p_memsz.
+ */
+ k = load_addr + eppnt->p_vaddr;
+ if (k > TASK_SIZE ||
+ eppnt->p_filesz > eppnt->p_memsz ||
+ eppnt->p_memsz > TASK_SIZE ||
+ TASK_SIZE - eppnt->p_memsz < k) {
+ error = -ENOMEM;
+ goto out_close;
+ }
+
+ /*
+ * Find the end of the file mapping for this phdr, and
+ * keep track of the largest address we see for this.
+ */
+ k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
+ if (k > elf_bss)
+ elf_bss = k;
+
+ /*
+ * Do the same thing for the memory mapping - between
+ * elf_bss and last_bss is the bss section.
+ */
+ k = load_addr + eppnt->p_memsz + eppnt->p_vaddr;
+ if (k > last_bss)
+ last_bss = k;
+ }
}
/*
@@ -424,7 +431,8 @@ static unsigned long load_elf_interp(struct elfhdr * interp_elf_ex,
goto out_close;
}
- elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1); /* What we have mapped so far */
+ /* What we have mapped so far */
+ elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1);
/* Map the last of the bss segment */
if (last_bss > elf_bss) {
@@ -436,7 +444,7 @@ static unsigned long load_elf_interp(struct elfhdr * interp_elf_ex,
}
*interp_load_addr = load_addr;
- error = ((unsigned long) interp_elf_ex->e_entry) + load_addr;
+ error = ((unsigned long)interp_elf_ex->e_entry) + load_addr;
out_close:
kfree(elf_phdata);
@@ -444,8 +452,8 @@ out:
return error;
}
-static unsigned long load_aout_interp(struct exec * interp_ex,
- struct file * interpreter)
+static unsigned long load_aout_interp(struct exec *interp_ex,
+ struct file *interpreter)
{
unsigned long text_data, elf_entry = ~0UL;
char __user * addr;
@@ -464,7 +472,7 @@ static unsigned long load_aout_interp(struct exec * interp_ex,
case ZMAGIC:
case QMAGIC:
offset = N_TXTOFF(*interp_ex);
- addr = (char __user *) N_TXTADDR(*interp_ex);
+ addr = (char __user *)N_TXTADDR(*interp_ex);
break;
default:
goto out;
@@ -480,7 +488,6 @@ static unsigned long load_aout_interp(struct exec * interp_ex,
flush_icache_range((unsigned long)addr,
(unsigned long)addr + text_data);
-
down_write(&current->mm->mmap_sem);
do_brk(ELF_PAGESTART(text_data + ELF_MIN_ALIGN - 1),
interp_ex->a_bss);
@@ -519,7 +526,7 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
#endif
}
-static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
+static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
{
struct file *interpreter = NULL; /* to shut gcc up */
unsigned long load_addr = 0, load_bias = 0;
@@ -528,7 +535,7 @@ static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
unsigned int interpreter_type = INTERPRETER_NONE;
unsigned char ibcs2_interpreter = 0;
unsigned long error;
- struct elf_phdr * elf_ppnt, *elf_phdata;
+ struct elf_phdr *elf_ppnt, *elf_phdata;
unsigned long elf_bss, elf_brk;
int elf_exec_fileno;
int retval, i;
@@ -553,7 +560,7 @@ static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
}
/* Get the exec-header */
- loc->elf_ex = *((struct elfhdr *) bprm->buf);
+ loc->elf_ex = *((struct elfhdr *)bprm->buf);
retval = -ENOEXEC;
/* First of all, some simple consistency checks */
@@ -568,7 +575,6 @@ static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
goto out;
/* Now read in all of the header information */
-
if (loc->elf_ex.e_phentsize != sizeof(struct elf_phdr))
goto out;
if (loc->elf_ex.e_phnum < 1 ||
@@ -576,18 +582,19 @@ static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
goto out;
size = loc->elf_ex.e_phnum * sizeof(struct elf_phdr);
retval = -ENOMEM;
- elf_phdata = (struct elf_phdr *) kmalloc(size, GFP_KERNEL);
+ elf_phdata = kmalloc(size, GFP_KERNEL);
if (!elf_phdata)
goto out;
- retval = kernel_read(bprm->file, loc->elf_ex.e_phoff, (char *) elf_phdata, size);
+ retval = kernel_read(bprm->file, loc->elf_ex.e_phoff,
+ (char *)elf_phdata, size);
if (retval != size) {
if (retval >= 0)
retval = -EIO;
goto out_free_ph;
}
- files = current->files; /* Refcounted so ok */
+ files = current->files; /* Refcounted so ok */
retval = unshare_files();
if (retval < 0)
goto out_free_ph;
@@ -598,7 +605,6 @@ static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
/* exec will make our files private anyway, but for the a.out
loader stuff we need to do it earlier */
-
retval = get_unused_fd();
if (retval < 0)
goto out_free_fh;
@@ -620,7 +626,6 @@ static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
* shared libraries - for now assume that this
* is an a.out format binary
*/
-
retval = -ENOEXEC;
if (elf_ppnt->p_filesz > PATH_MAX ||
elf_ppnt->p_filesz < 2)
@@ -628,13 +633,13 @@ static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
retval = -ENOMEM;
elf_interpreter = kmalloc(elf_ppnt->p_filesz,
- GFP_KERNEL);
+ GFP_KERNEL);
if (!elf_interpreter)
goto out_free_file;
retval = kernel_read(bprm->file, elf_ppnt->p_offset,
- elf_interpreter,
- elf_ppnt->p_filesz);
+ elf_interpreter,
+ elf_ppnt->p_filesz);
if (retval != elf_ppnt->p_filesz) {
if (retval >= 0)
retval = -EIO;
@@ -678,7 +683,8 @@ static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
retval = PTR_ERR(interpreter);
if (IS_ERR(interpreter))
goto out_free_interp;
- retval = kernel_read(interpreter, 0, bprm->buf, BINPRM_BUF_SIZE);
+ retval = kernel_read(interpreter, 0, bprm->buf,
+ BINPRM_BUF_SIZE);
if (retval != BINPRM_BUF_SIZE) {
if (retval >= 0)
retval = -EIO;
@@ -686,8 +692,8 @@ static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
}
/* Get the exec headers */
- loc->interp_ex = *((struct exec *) bprm->buf);
- loc->interp_elf_ex = *((struct elfhdr *) bprm->buf);
+ loc->interp_ex = *((struct exec *)bprm->buf);
+ loc->interp_elf_ex = *((struct elfhdr *)bprm->buf);
break;
}
elf_ppnt++;
@@ -739,7 +745,6 @@ static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
/* OK, we are done with that, now set up the arg stuff,
and then start this sucker up */
-
if ((!bprm->sh_bang) && (interpreter_type == INTERPRETER_AOUT)) {
char *passed_p = passed_fileno;
sprintf(passed_fileno, "%d", elf_exec_fileno);
@@ -777,7 +782,7 @@ static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
if (elf_read_implies_exec(loc->elf_ex, executable_stack))
current->personality |= READ_IMPLIES_EXEC;
- if ( !(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
+ if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
current->flags |= PF_RANDOMIZE;
arch_pick_mmap_layout(current->mm);
@@ -798,8 +803,8 @@ static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
the correct location in memory. At this point, we assume that
the image should be loaded at fixed address, not at a variable
address. */
-
- for(i = 0, elf_ppnt = elf_phdata; i < loc->elf_ex.e_phnum; i++, elf_ppnt++) {
+ for(i = 0, elf_ppnt = elf_phdata;
+ i < loc->elf_ex.e_phnum; i++, elf_ppnt++) {
int elf_prot = 0, elf_flags;
unsigned long k, vaddr;
@@ -827,30 +832,35 @@ static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
load_bias, nbyte)) {
/*
* This bss-zeroing can fail if the ELF
- * file specifies odd protections. So
+ * file specifies odd protections. So
* we don't check the return value
*/
}
}
}
- if (elf_ppnt->p_flags & PF_R) elf_prot |= PROT_READ;
- if (elf_ppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
- if (elf_ppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
+ if (elf_ppnt->p_flags & PF_R)
+ elf_prot |= PROT_READ;
+ if (elf_ppnt->p_flags & PF_W)
+ elf_prot |= PROT_WRITE;
+ if (elf_ppnt->p_flags & PF_X)
+ elf_prot |= PROT_EXEC;
- elf_flags = MAP_PRIVATE|MAP_DENYWRITE|MAP_EXECUTABLE;
+ elf_flags = MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE;
vaddr = elf_ppnt->p_vaddr;
if (loc->elf_ex.e_type == ET_EXEC || load_addr_set) {
elf_flags |= MAP_FIXED;
} else if (loc->elf_ex.e_type == ET_DYN) {
- /* Try and get dynamic programs out of the way of the default mmap
- base, as well as whatever program they might try to exec. This
- is because the brk will follow the loader, and is not movable. */
+ /* Try and get dynamic programs out of the way of the
+ * default mmap base, as well as whatever program they
+ * might try to exec. This is because the brk will
+ * follow the loader, and is not movable. */
load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
}
- error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, elf_prot, elf_flags);
+ error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
+ elf_prot, elf_flags);
if (BAD_ADDR(error)) {
send_sig(SIGKILL, current, 0);
goto out_free_dentry;
@@ -867,8 +877,10 @@ static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
}
}
k = elf_ppnt->p_vaddr;
- if (k < start_code) start_code = k;
- if (start_data < k) start_data = k;
+ if (k < start_code)
+ start_code = k;
+ if (start_data < k)
+ start_data = k;
/*
* Check to see if the section's size will overflow the
@@ -878,7 +890,7 @@ static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
if (k > TASK_SIZE || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
elf_ppnt->p_memsz > TASK_SIZE ||
TASK_SIZE - elf_ppnt->p_memsz < k) {
- /* set_brk can never work. Avoid overflows. */
+ /* set_brk can never work. Avoid overflows. */
send_sig(SIGKILL, current, 0);
goto out_free_dentry;
}
@@ -966,8 +978,9 @@ static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
compute_creds(bprm);
current->flags &= ~PF_FORKNOEXEC;
- create_elf_tables(bprm, &loc->elf_ex, (interpreter_type == INTERPRETER_AOUT),
- load_addr, interp_load_addr);
+ create_elf_tables(bprm, &loc->elf_ex,
+ (interpreter_type == INTERPRETER_AOUT),
+ load_addr, interp_load_addr);
/* N.B. passed_fileno might not be initialized? */
if (interpreter_type == INTERPRETER_AOUT)
current->mm->arg_start += strlen(passed_fileno) + 1;
@@ -981,7 +994,7 @@ static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
/* Why this, you ask??? Well SVr4 maps page 0 as read-only,
and some applications "depend" upon this behavior.
Since we do not have the power to recompile these, we
- emulate the SVr4 behavior. Sigh. */
+ emulate the SVr4 behavior. Sigh. */
down_write(&current->mm->mmap_sem);
error = do_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC,
MAP_FIXED | MAP_PRIVATE, 0);
@@ -1036,7 +1049,6 @@ out_free_ph:
/* This is really simpleminded and specialized - we are loading an
a.out library that is given an ELF header. */
-
static int load_elf_library(struct file *file)
{
struct elf_phdr *elf_phdata;
@@ -1046,7 +1058,7 @@ static int load_elf_library(struct file *file)
struct elfhdr elf_ex;
error = -ENOEXEC;
- retval = kernel_read(file, 0, (char *) &elf_ex, sizeof(elf_ex));
+ retval = kernel_read(file, 0, (char *)&elf_ex, sizeof(elf_ex));
if (retval != sizeof(elf_ex))
goto out;
@@ -1055,7 +1067,7 @@ static int load_elf_library(struct file *file)
/* First of all, some simple consistency checks */
if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 ||
- !elf_check_arch(&elf_ex) || !file->f_op || !file->f_op->mmap)
+ !elf_check_arch(&elf_ex) || !file->f_op || !file->f_op->mmap)
goto out;
/* Now read in all of the header information */
@@ -1103,7 +1115,8 @@ static int load_elf_library(struct file *file)
goto out_free_ph;
}
- len = ELF_PAGESTART(eppnt->p_filesz + eppnt->p_vaddr + ELF_MIN_ALIGN - 1);
+ len = ELF_PAGESTART(eppnt->p_filesz + eppnt->p_vaddr +
+ ELF_MIN_ALIGN - 1);
bss = eppnt->p_memsz + eppnt->p_vaddr;
if (bss > len) {
down_write(&current->mm->mmap_sem);
@@ -1162,7 +1175,7 @@ static int maydump(struct vm_area_struct *vma)
if (vma->vm_flags & (VM_IO | VM_RESERVED))
return 0;
- /* Dump shared memory only if mapped from an anonymous file. */
+ /* Dump shared memory only if mapped from an anonymous file. */
if (vma->vm_flags & VM_SHARED)
return vma->vm_file->f_dentry->d_inode->i_nlink == 0;
@@ -1173,7 +1186,7 @@ static int maydump(struct vm_area_struct *vma)
return 1;
}
-#define roundup(x, y) ((((x)+((y)-1))/(y))*(y))
+#define roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y))
/* An ELF note in memory */
struct memelfnote
@@ -1276,11 +1289,11 @@ static void fill_note(struct memelfnote *note, const char *name, int type,
}
/*
- * fill up all the fields in prstatus from the given task struct, except registers
- * which need to be filled up separately.
+ * fill up all the fields in prstatus from the given task struct, except
+ * registers which need to be filled up separately.
*/
static void fill_prstatus(struct elf_prstatus *prstatus,
- struct task_struct *p, long signr)
+ struct task_struct *p, long signr)
{
prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
prstatus->pr_sigpend = p->pending.signal.sig[0];
@@ -1365,8 +1378,8 @@ struct elf_thread_status
/*
* In order to add the specific thread information for the elf file format,
- * we need to keep a linked list of every threads pr_status and then
- * create a single section for them in the final core file.
+ * we need to keep a linked list of every threads pr_status and then create
+ * a single section for them in the final core file.
*/
static int elf_dump_thread_status(long signr, struct elf_thread_status *t)
{
@@ -1377,19 +1390,23 @@ static int elf_dump_thread_status(long signr, struct elf_thread_status *t)
fill_prstatus(&t->prstatus, p, signr);
elf_core_copy_task_regs(p, &t->prstatus.pr_reg);
- fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus), &(t->prstatus));
+ fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus),
+ &(t->prstatus));
t->num_notes++;
sz += notesize(&t->notes[0]);
- if ((t->prstatus.pr_fpvalid = elf_core_copy_task_fpregs(p, NULL, &t->fpu))) {
- fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(t->fpu), &(t->fpu));
+ if ((t->prstatus.pr_fpvalid = elf_core_copy_task_fpregs(p, NULL,
+ &t->fpu))) {
+ fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(t->fpu),
+ &(t->fpu));
t->num_notes++;
sz += notesize(&t->notes[1]);
}
#ifdef ELF_CORE_COPY_XFPREGS
if (elf_core_copy_task_xfpregs(p, &t->xfpu)) {
- fill_note(&t->notes[2], "LINUX", NT_PRXFPREG, sizeof(t->xfpu), &t->xfpu);
+ fill_note(&t->notes[2], "LINUX", NT_PRXFPREG, sizeof(t->xfpu),
+ &t->xfpu);
t->num_notes++;
sz += notesize(&t->notes[2]);
}
@@ -1404,7 +1421,7 @@ static int elf_dump_thread_status(long signr, struct elf_thread_status *t)
* and then they are actually written out. If we run out of core limit
* we just truncate.
*/
-static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file)
+static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file)
{
#define NUM_NOTES 6
int has_dumped = 0;
@@ -1433,12 +1450,12 @@ static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file)
/*
* We no longer stop all VM operations.
*
- * This is because those proceses that could possibly change map_count or
- * the mmap / vma pages are now blocked in do_exit on current finishing
- * this core dump.
+ * This is because those proceses that could possibly change map_count
+ * or the mmap / vma pages are now blocked in do_exit on current
+ * finishing this core dump.
*
* Only ptrace can touch these memory addresses, but it doesn't change
- * the map_count or the pages allocated. So no possibility of crashing
+ * the map_count or the pages allocated. So no possibility of crashing
* exists while dumping the mm->vm_next areas to the core file.
*/
@@ -1500,7 +1517,7 @@ static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file)
#endif
/* Set up header */
- fill_elf_header(elf, segs+1); /* including notes section */
+ fill_elf_header(elf, segs + 1); /* including notes section */
has_dumped = 1;
current->flags |= PF_DUMPCORE;
@@ -1510,24 +1527,24 @@ static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file)
* with info from their /proc.
*/
- fill_note(notes +0, "CORE", NT_PRSTATUS, sizeof(*prstatus), prstatus);
-
+ fill_note(notes + 0, "CORE", NT_PRSTATUS, sizeof(*prstatus), prstatus);
fill_psinfo(psinfo, current->group_leader, current->mm);
- fill_note(notes +1, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
+ fill_note(notes + 1, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
numnote = 2;
- auxv = (elf_addr_t *) current->mm->saved_auxv;
+ auxv = (elf_addr_t *)current->mm->saved_auxv;
i = 0;
do
i += 2;
while (auxv[i - 2] != AT_NULL);
fill_note(&notes[numnote++], "CORE", NT_AUXV,
- i * sizeof (elf_addr_t), auxv);
+ i * sizeof(elf_addr_t), auxv);
/* Try to dump the FPU. */
- if ((prstatus->pr_fpvalid = elf_core_copy_task_fpregs(current, regs, fpu)))
+ if ((prstatus->pr_fpvalid =
+ elf_core_copy_task_fpregs(current, regs, fpu)))
fill_note(notes + numnote++,
"CORE", NT_PRFPREG, sizeof(*fpu), fpu);
#ifdef ELF_CORE_COPY_XFPREGS
@@ -1576,8 +1593,10 @@ static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file)
phdr.p_memsz = sz;
offset += phdr.p_filesz;
phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
- if (vma->vm_flags & VM_WRITE) phdr.p_flags |= PF_W;
- if (vma->vm_flags & VM_EXEC) phdr.p_flags |= PF_X;
+ if (vma->vm_flags & VM_WRITE)
+ phdr.p_flags |= PF_W;
+ if (vma->vm_flags & VM_EXEC)
+ phdr.p_flags |= PF_X;
phdr.p_align = ELF_EXEC_PAGESIZE;
DUMP_WRITE(&phdr, sizeof(phdr));
@@ -1594,7 +1613,9 @@ static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file)
/* write out the thread status notes section */
list_for_each(t, &thread_list) {
- struct elf_thread_status *tmp = list_entry(t, struct elf_thread_status, list);
+ struct elf_thread_status *tmp =
+ list_entry(t, struct elf_thread_status, list);
+
for (i = 0; i < tmp->num_notes; i++)
if (!writenote(&tmp->notes[i], file))
goto end_coredump;
@@ -1611,18 +1632,19 @@ static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file)
for (addr = vma->vm_start;
addr < vma->vm_end;
addr += PAGE_SIZE) {
- struct page* page;
+ struct page *page;
struct vm_area_struct *vma;
if (get_user_pages(current, current->mm, addr, 1, 0, 1,
&page, &vma) <= 0) {
- DUMP_SEEK (file->f_pos + PAGE_SIZE);
+ DUMP_SEEK(file->f_pos + PAGE_SIZE);
} else {
if (page == ZERO_PAGE(addr)) {
- DUMP_SEEK (file->f_pos + PAGE_SIZE);
+ DUMP_SEEK(file->f_pos + PAGE_SIZE);
} else {
void *kaddr;
- flush_cache_page(vma, addr, page_to_pfn(page));
+ flush_cache_page(vma, addr,
+ page_to_pfn(page));
kaddr = kmap(page);
if ((size += PAGE_SIZE) > limit ||
!dump_write(file, kaddr,
@@ -1644,7 +1666,8 @@ static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file)
if ((off_t)file->f_pos != offset) {
/* Sanity check */
- printk(KERN_WARNING "elf_core_dump: file->f_pos (%ld) != offset (%ld)\n",
+ printk(KERN_WARNING
+ "elf_core_dump: file->f_pos (%ld) != offset (%ld)\n",
(off_t)file->f_pos, offset);
}
diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
index a2e48c999c24..eba4e23b9ca0 100644
--- a/fs/binfmt_elf_fdpic.c
+++ b/fs/binfmt_elf_fdpic.c
@@ -435,9 +435,10 @@ static int create_elf_fdpic_tables(struct linux_binprm *bprm,
struct elf_fdpic_params *interp_params)
{
unsigned long sp, csp, nitems;
- elf_caddr_t *argv, *envp;
+ elf_caddr_t __user *argv, *envp;
size_t platform_len = 0, len;
- char *k_platform, *u_platform, *p;
+ char *k_platform;
+ char __user *u_platform, *p;
long hwcap;
int loop;
@@ -462,12 +463,11 @@ static int create_elf_fdpic_tables(struct linux_binprm *bprm,
if (k_platform) {
platform_len = strlen(k_platform) + 1;
sp -= platform_len;
+ u_platform = (char __user *) sp;
if (__copy_to_user(u_platform, k_platform, platform_len) != 0)
return -EFAULT;
}
- u_platform = (char *) sp;
-
#if defined(__i386__) && defined(CONFIG_SMP)
/* in some cases (e.g. Hyper-Threading), we want to avoid L1 evictions
* by the processes running on the same package. One thing we can do
@@ -490,7 +490,7 @@ static int create_elf_fdpic_tables(struct linux_binprm *bprm,
sp = (sp - len) & ~7UL;
exec_params->map_addr = sp;
- if (copy_to_user((void *) sp, exec_params->loadmap, len) != 0)
+ if (copy_to_user((void __user *) sp, exec_params->loadmap, len) != 0)
return -EFAULT;
current->mm->context.exec_fdpic_loadmap = (unsigned long) sp;
@@ -501,7 +501,7 @@ static int create_elf_fdpic_tables(struct linux_binprm *bprm,
sp = (sp - len) & ~7UL;
interp_params->map_addr = sp;
- if (copy_to_user((void *) sp, interp_params->loadmap, len) != 0)
+ if (copy_to_user((void __user *) sp, interp_params->loadmap, len) != 0)
return -EFAULT;
current->mm->context.interp_fdpic_loadmap = (unsigned long) sp;
@@ -527,7 +527,7 @@ static int create_elf_fdpic_tables(struct linux_binprm *bprm,
/* put the ELF interpreter info on the stack */
#define NEW_AUX_ENT(nr, id, val) \
do { \
- struct { unsigned long _id, _val; } *ent = (void *) csp; \
+ struct { unsigned long _id, _val; } __user *ent = (void __user *) csp; \
__put_user((id), &ent[nr]._id); \
__put_user((val), &ent[nr]._val); \
} while (0)
@@ -564,13 +564,13 @@ static int create_elf_fdpic_tables(struct linux_binprm *bprm,
/* allocate room for argv[] and envv[] */
csp -= (bprm->envc + 1) * sizeof(elf_caddr_t);
- envp = (elf_caddr_t *) csp;
+ envp = (elf_caddr_t __user *) csp;
csp -= (bprm->argc + 1) * sizeof(elf_caddr_t);
- argv = (elf_caddr_t *) csp;
+ argv = (elf_caddr_t __user *) csp;
/* stack argc */
csp -= sizeof(unsigned long);
- __put_user(bprm->argc, (unsigned long *) csp);
+ __put_user(bprm->argc, (unsigned long __user *) csp);
BUG_ON(csp != sp);
@@ -581,7 +581,7 @@ static int create_elf_fdpic_tables(struct linux_binprm *bprm,
current->mm->arg_start = current->mm->start_stack - (MAX_ARG_PAGES * PAGE_SIZE - bprm->p);
#endif
- p = (char *) current->mm->arg_start;
+ p = (char __user *) current->mm->arg_start;
for (loop = bprm->argc; loop > 0; loop--) {
__put_user((elf_caddr_t) p, argv++);
len = strnlen_user(p, PAGE_SIZE * MAX_ARG_PAGES);
@@ -1025,7 +1025,7 @@ static int elf_fdpic_map_file_by_direct_mmap(struct elf_fdpic_params *params,
/* clear the bit between beginning of mapping and beginning of PT_LOAD */
if (prot & PROT_WRITE && disp > 0) {
kdebug("clear[%d] ad=%lx sz=%lx", loop, maddr, disp);
- clear_user((void *) maddr, disp);
+ clear_user((void __user *) maddr, disp);
maddr += disp;
}
@@ -1059,7 +1059,7 @@ static int elf_fdpic_map_file_by_direct_mmap(struct elf_fdpic_params *params,
if (prot & PROT_WRITE && excess1 > 0) {
kdebug("clear[%d] ad=%lx sz=%lx",
loop, maddr + phdr->p_filesz, excess1);
- clear_user((void *) maddr + phdr->p_filesz, excess1);
+ clear_user((void __user *) maddr + phdr->p_filesz, excess1);
}
#else
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
index 599f36fd0f67..07a4996cca3f 100644
--- a/fs/binfmt_misc.c
+++ b/fs/binfmt_misc.c
@@ -739,10 +739,10 @@ static int bm_fill_super(struct super_block * sb, void * data, int silent)
return err;
}
-static struct super_block *bm_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int bm_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_single(fs_type, flags, data, bm_fill_super);
+ return get_sb_single(fs_type, flags, data, bm_fill_super, mnt);
}
static struct linux_binfmt misc_format = {
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 44aaba202f78..028d9fb9c2d5 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -300,10 +300,10 @@ static struct super_operations bdev_sops = {
.clear_inode = bdev_clear_inode,
};
-static struct super_block *bd_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int bd_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_pseudo(fs_type, "bdev:", &bdev_sops, 0x62646576);
+ return get_sb_pseudo(fs_type, "bdev:", &bdev_sops, 0x62646576, mnt);
}
static struct file_system_type bd_type = {
diff --git a/fs/buffer.c b/fs/buffer.c
index 23f1f3a68077..373bb6292bdc 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -331,7 +331,6 @@ long do_fsync(struct file *file, int datasync)
goto out;
}
- current->flags |= PF_SYNCWRITE;
ret = filemap_fdatawrite(mapping);
/*
@@ -346,7 +345,6 @@ long do_fsync(struct file *file, int datasync)
err = filemap_fdatawait(mapping);
if (!ret)
ret = err;
- current->flags &= ~PF_SYNCWRITE;
out:
return ret;
}
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index c262d8874ce9..7520f4687158 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -166,8 +166,9 @@ cifs_put_super(struct super_block *sb)
}
static int
-cifs_statfs(struct super_block *sb, struct kstatfs *buf)
+cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
+ struct super_block *sb = dentry->d_sb;
int xid;
int rc = -EOPNOTSUPP;
struct cifs_sb_info *cifs_sb;
@@ -460,9 +461,9 @@ struct super_operations cifs_super_ops = {
.remount_fs = cifs_remount,
};
-static struct super_block *
+static int
cifs_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
int rc;
struct super_block *sb = sget(fs_type, NULL, set_anon_super, NULL);
@@ -470,7 +471,7 @@ cifs_get_sb(struct file_system_type *fs_type,
cFYI(1, ("Devname: %s flags: %d ", dev_name, flags));
if (IS_ERR(sb))
- return sb;
+ return PTR_ERR(sb);
sb->s_flags = flags;
@@ -478,10 +479,10 @@ cifs_get_sb(struct file_system_type *fs_type,
if (rc) {
up_write(&sb->s_umount);
deactivate_super(sb);
- return ERR_PTR(rc);
+ return rc;
}
sb->s_flags |= MS_ACTIVE;
- return sb;
+ return simple_set_mnt(mnt, sb);
}
static ssize_t cifs_file_writev(struct file *file, const struct iovec *iov,
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index c98755dca868..d56c0577c710 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -74,7 +74,7 @@ extern ssize_t cifs_user_write(struct file *file, const char __user *write_data,
size_t write_size, loff_t * poffset);
extern int cifs_lock(struct file *, int, struct file_lock *);
extern int cifs_fsync(struct file *, struct dentry *, int);
-extern int cifs_flush(struct file *);
+extern int cifs_flush(struct file *, fl_owner_t id);
extern int cifs_file_mmap(struct file * , struct vm_area_struct *);
extern const struct file_operations cifs_dir_ops;
extern int cifs_dir_open(struct inode *inode, struct file *file);
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index e2b4ce1dad66..b4a18c1cab0a 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -1079,9 +1079,9 @@ static int cifs_writepages(struct address_space *mapping,
unsigned int bytes_written;
struct cifs_sb_info *cifs_sb;
int done = 0;
- pgoff_t end = -1;
+ pgoff_t end;
pgoff_t index;
- int is_range = 0;
+ int range_whole = 0;
struct kvec iov[32];
int len;
int n_iov = 0;
@@ -1122,16 +1122,14 @@ static int cifs_writepages(struct address_space *mapping,
xid = GetXid();
pagevec_init(&pvec, 0);
- if (wbc->sync_mode == WB_SYNC_NONE)
+ if (wbc->range_cyclic) {
index = mapping->writeback_index; /* Start from prev offset */
- else {
- index = 0;
- scanned = 1;
- }
- if (wbc->start || wbc->end) {
- index = wbc->start >> PAGE_CACHE_SHIFT;
- end = wbc->end >> PAGE_CACHE_SHIFT;
- is_range = 1;
+ end = -1;
+ } else {
+ index = wbc->range_start >> PAGE_CACHE_SHIFT;
+ end = wbc->range_end >> PAGE_CACHE_SHIFT;
+ if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
+ range_whole = 1;
scanned = 1;
}
retry:
@@ -1167,7 +1165,7 @@ retry:
break;
}
- if (unlikely(is_range) && (page->index > end)) {
+ if (!wbc->range_cyclic && page->index > end) {
done = 1;
unlock_page(page);
break;
@@ -1271,7 +1269,7 @@ retry:
index = 0;
goto retry;
}
- if (!is_range)
+ if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
mapping->writeback_index = index;
FreeXid(xid);
@@ -1419,7 +1417,7 @@ int cifs_fsync(struct file *file, struct dentry *dentry, int datasync)
* As file closes, flush all cached write data for this inode checking
* for write behind errors.
*/
-int cifs_flush(struct file *file)
+int cifs_flush(struct file *file, fl_owner_t id)
{
struct inode * inode = file->f_dentry->d_inode;
int rc = 0;
diff --git a/fs/coda/file.c b/fs/coda/file.c
index 7c2642431fa5..cc66c681bd11 100644
--- a/fs/coda/file.c
+++ b/fs/coda/file.c
@@ -164,7 +164,7 @@ int coda_open(struct inode *coda_inode, struct file *coda_file)
return 0;
}
-int coda_flush(struct file *coda_file)
+int coda_flush(struct file *coda_file, fl_owner_t id)
{
unsigned short flags = coda_file->f_flags & ~O_EXCL;
unsigned short coda_flags = coda_flags_to_cflags(flags);
diff --git a/fs/coda/inode.c b/fs/coda/inode.c
index ada1a81df6bd..87f1dc8aa24b 100644
--- a/fs/coda/inode.c
+++ b/fs/coda/inode.c
@@ -36,7 +36,7 @@
/* VFS super_block ops */
static void coda_clear_inode(struct inode *);
static void coda_put_super(struct super_block *);
-static int coda_statfs(struct super_block *sb, struct kstatfs *buf);
+static int coda_statfs(struct dentry *dentry, struct kstatfs *buf);
static kmem_cache_t * coda_inode_cachep;
@@ -278,13 +278,13 @@ struct inode_operations coda_file_inode_operations = {
.setattr = coda_setattr,
};
-static int coda_statfs(struct super_block *sb, struct kstatfs *buf)
+static int coda_statfs(struct dentry *dentry, struct kstatfs *buf)
{
int error;
lock_kernel();
- error = venus_statfs(sb, buf);
+ error = venus_statfs(dentry, buf);
unlock_kernel();
@@ -307,10 +307,10 @@ static int coda_statfs(struct super_block *sb, struct kstatfs *buf)
/* init_coda: used by filesystems.c to register coda */
-static struct super_block *coda_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int coda_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_nodev(fs_type, flags, data, coda_fill_super);
+ return get_sb_nodev(fs_type, flags, data, coda_fill_super, mnt);
}
struct file_system_type coda_fs_type = {
diff --git a/fs/coda/upcall.c b/fs/coda/upcall.c
index 1bae99650a91..b040eba13a7d 100644
--- a/fs/coda/upcall.c
+++ b/fs/coda/upcall.c
@@ -611,7 +611,7 @@ int venus_pioctl(struct super_block *sb, struct CodaFid *fid,
return error;
}
-int venus_statfs(struct super_block *sb, struct kstatfs *sfs)
+int venus_statfs(struct dentry *dentry, struct kstatfs *sfs)
{
union inputArgs *inp;
union outputArgs *outp;
@@ -620,7 +620,7 @@ int venus_statfs(struct super_block *sb, struct kstatfs *sfs)
insize = max_t(unsigned int, INSIZE(statfs), OUTSIZE(statfs));
UPARG(CODA_STATFS);
- error = coda_upcall(coda_sbp(sb), insize, &outsize, inp);
+ error = coda_upcall(coda_sbp(dentry->d_sb), insize, &outsize, inp);
if (!error) {
sfs->f_blocks = outp->coda_statfs.stat.f_blocks;
diff --git a/fs/compat.c b/fs/compat.c
index b1f64786a613..7e7e5bc4f3cf 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -197,7 +197,7 @@ asmlinkage long compat_sys_statfs(const char __user *path, struct compat_statfs
error = user_path_walk(path, &nd);
if (!error) {
struct kstatfs tmp;
- error = vfs_statfs(nd.dentry->d_inode->i_sb, &tmp);
+ error = vfs_statfs(nd.dentry, &tmp);
if (!error)
error = put_compat_statfs(buf, &tmp);
path_release(&nd);
@@ -215,7 +215,7 @@ asmlinkage long compat_sys_fstatfs(unsigned int fd, struct compat_statfs __user
file = fget(fd);
if (!file)
goto out;
- error = vfs_statfs(file->f_dentry->d_inode->i_sb, &tmp);
+ error = vfs_statfs(file->f_dentry, &tmp);
if (!error)
error = put_compat_statfs(buf, &tmp);
fput(file);
@@ -265,7 +265,7 @@ asmlinkage long compat_sys_statfs64(const char __user *path, compat_size_t sz, s
error = user_path_walk(path, &nd);
if (!error) {
struct kstatfs tmp;
- error = vfs_statfs(nd.dentry->d_inode->i_sb, &tmp);
+ error = vfs_statfs(nd.dentry, &tmp);
if (!error)
error = put_compat_statfs64(buf, &tmp);
path_release(&nd);
@@ -286,7 +286,7 @@ asmlinkage long compat_sys_fstatfs64(unsigned int fd, compat_size_t sz, struct c
file = fget(fd);
if (!file)
goto out;
- error = vfs_statfs(file->f_dentry->d_inode->i_sb, &tmp);
+ error = vfs_statfs(file->f_dentry, &tmp);
if (!error)
error = put_compat_statfs64(buf, &tmp);
fput(file);
diff --git a/fs/configfs/mount.c b/fs/configfs/mount.c
index f920d30478e5..94dab7bdd851 100644
--- a/fs/configfs/mount.c
+++ b/fs/configfs/mount.c
@@ -103,10 +103,10 @@ static int configfs_fill_super(struct super_block *sb, void *data, int silent)
return 0;
}
-static struct super_block *configfs_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int configfs_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_single(fs_type, flags, data, configfs_fill_super);
+ return get_sb_single(fs_type, flags, data, configfs_fill_super, mnt);
}
static struct file_system_type configfs_fs_type = {
diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c
index 9efcc3a164e8..c45d73860803 100644
--- a/fs/cramfs/inode.c
+++ b/fs/cramfs/inode.c
@@ -181,9 +181,7 @@ static void *cramfs_read(struct super_block *sb, unsigned int offset, unsigned i
struct page *page = NULL;
if (blocknr + i < devsize) {
- page = read_cache_page(mapping, blocknr + i,
- (filler_t *)mapping->a_ops->readpage,
- NULL);
+ page = read_mapping_page(mapping, blocknr + i, NULL);
/* synchronous error? */
if (IS_ERR(page))
page = NULL;
@@ -322,8 +320,10 @@ out:
return -EINVAL;
}
-static int cramfs_statfs(struct super_block *sb, struct kstatfs *buf)
+static int cramfs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
+ struct super_block *sb = dentry->d_sb;
+
buf->f_type = CRAMFS_MAGIC;
buf->f_bsize = PAGE_CACHE_SIZE;
buf->f_blocks = CRAMFS_SB(sb)->blocks;
@@ -528,10 +528,11 @@ static struct super_operations cramfs_ops = {
.statfs = cramfs_statfs,
};
-static struct super_block *cramfs_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int cramfs_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_bdev(fs_type, flags, dev_name, data, cramfs_fill_super);
+ return get_sb_bdev(fs_type, flags, dev_name, data, cramfs_fill_super,
+ mnt);
}
static struct file_system_type cramfs_fs_type = {
diff --git a/fs/dcache.c b/fs/dcache.c
index 59dbc92c2079..313b54b2b8f2 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -687,46 +687,6 @@ void shrink_dcache_parent(struct dentry * parent)
prune_dcache(found, parent->d_sb);
}
-/**
- * shrink_dcache_anon - further prune the cache
- * @head: head of d_hash list of dentries to prune
- *
- * Prune the dentries that are anonymous
- *
- * parsing d_hash list does not hlist_for_each_entry_rcu() as it
- * done under dcache_lock.
- *
- */
-void shrink_dcache_anon(struct super_block *sb)
-{
- struct hlist_node *lp;
- struct hlist_head *head = &sb->s_anon;
- int found;
- do {
- found = 0;
- spin_lock(&dcache_lock);
- hlist_for_each(lp, head) {
- struct dentry *this = hlist_entry(lp, struct dentry, d_hash);
- if (!list_empty(&this->d_lru)) {
- dentry_stat.nr_unused--;
- list_del_init(&this->d_lru);
- }
-
- /*
- * move only zero ref count dentries to the end
- * of the unused list for prune_dcache
- */
- if (!atomic_read(&this->d_count)) {
- list_add_tail(&this->d_lru, &dentry_unused);
- dentry_stat.nr_unused++;
- found++;
- }
- }
- spin_unlock(&dcache_lock);
- prune_dcache(found, sb);
- } while(found);
-}
-
/*
* Scan `nr' dentries and return the number which remain.
*
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index b55b4ea9a676..440128ebef3b 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -111,11 +111,11 @@ static int debug_fill_super(struct super_block *sb, void *data, int silent)
return simple_fill_super(sb, DEBUGFS_MAGIC, debug_files);
}
-static struct super_block *debug_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name,
- void *data)
+static int debug_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name,
+ void *data, struct vfsmount *mnt)
{
- return get_sb_single(fs_type, flags, data, debug_fill_super);
+ return get_sb_single(fs_type, flags, data, debug_fill_super, mnt);
}
static struct file_system_type debug_fs_type = {
diff --git a/fs/devfs/base.c b/fs/devfs/base.c
index 52f5059c4f31..51a97f132745 100644
--- a/fs/devfs/base.c
+++ b/fs/devfs/base.c
@@ -2549,11 +2549,11 @@ static int devfs_fill_super(struct super_block *sb, void *data, int silent)
return -EINVAL;
} /* End Function devfs_fill_super */
-static struct super_block *devfs_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name,
- void *data)
+static int devfs_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name,
+ void *data, struct vfsmount *mnt)
{
- return get_sb_single(fs_type, flags, data, devfs_fill_super);
+ return get_sb_single(fs_type, flags, data, devfs_fill_super, mnt);
}
static struct file_system_type devfs_fs_type = {
diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
index 14c5620b5cab..f7aef5bb584a 100644
--- a/fs/devpts/inode.c
+++ b/fs/devpts/inode.c
@@ -130,10 +130,10 @@ fail:
return -ENOMEM;
}
-static struct super_block *devpts_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int devpts_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_single(fs_type, flags, data, devpts_fill_super);
+ return get_sb_single(fs_type, flags, data, devpts_fill_super, mnt);
}
static struct file_system_type devpts_fs_type = {
diff --git a/fs/direct-io.c b/fs/direct-io.c
index b05d1b218776..538fb0418fba 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -162,7 +162,7 @@ static int dio_refill_pages(struct dio *dio)
NULL); /* vmas */
up_read(&current->mm->mmap_sem);
- if (ret < 0 && dio->blocks_available && (dio->rw == WRITE)) {
+ if (ret < 0 && dio->blocks_available && (dio->rw & WRITE)) {
struct page *page = ZERO_PAGE(dio->curr_user_address);
/*
* A memory fault, but the filesystem has some outstanding
@@ -535,7 +535,7 @@ static int get_more_blocks(struct dio *dio)
map_bh->b_state = 0;
map_bh->b_size = fs_count << dio->inode->i_blkbits;
- create = dio->rw == WRITE;
+ create = dio->rw & WRITE;
if (dio->lock_type == DIO_LOCKING) {
if (dio->block_in_file < (i_size_read(dio->inode) >>
dio->blkbits))
@@ -867,7 +867,7 @@ do_holes:
loff_t i_size_aligned;
/* AKPM: eargh, -ENOTBLK is a hack */
- if (dio->rw == WRITE) {
+ if (dio->rw & WRITE) {
page_cache_release(page);
return -ENOTBLK;
}
@@ -1045,7 +1045,7 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
}
} /* end iovec loop */
- if (ret == -ENOTBLK && rw == WRITE) {
+ if (ret == -ENOTBLK && (rw & WRITE)) {
/*
* The remaining part of the request will be
* be handled by buffered I/O when we return
@@ -1089,7 +1089,7 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
if (dio->is_async) {
int should_wait = 0;
- if (dio->result < dio->size && rw == WRITE) {
+ if (dio->result < dio->size && (rw & WRITE)) {
dio->waiter = current;
should_wait = 1;
}
@@ -1142,7 +1142,7 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
ret = transferred;
/* We could have also come here on an AIO file extend */
- if (!is_sync_kiocb(iocb) && rw == WRITE &&
+ if (!is_sync_kiocb(iocb) && (rw & WRITE) &&
ret >= 0 && dio->result == dio->size)
/*
* For AIO writes where we have completed the
@@ -1194,7 +1194,7 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
int acquire_i_mutex = 0;
if (rw & WRITE)
- current->flags |= PF_SYNCWRITE;
+ rw = WRITE_SYNC;
if (bdev)
bdev_blkbits = blksize_bits(bdev_hardsect_size(bdev));
@@ -1270,7 +1270,7 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
* even for AIO, we need to wait for i/o to complete before
* returning in this case.
*/
- dio->is_async = !is_sync_kiocb(iocb) && !((rw == WRITE) &&
+ dio->is_async = !is_sync_kiocb(iocb) && !((rw & WRITE) &&
(end > i_size_read(inode)));
retval = direct_io_worker(rw, iocb, inode, iov, offset,
@@ -1284,8 +1284,6 @@ out:
mutex_unlock(&inode->i_mutex);
else if (acquire_i_mutex)
mutex_lock(&inode->i_mutex);
- if (rw & WRITE)
- current->flags &= ~PF_SYNCWRITE;
return retval;
}
EXPORT_SYMBOL(__blockdev_direct_IO);
diff --git a/fs/efs/super.c b/fs/efs/super.c
index dff623e3ddbf..8ac2462ae5dd 100644
--- a/fs/efs/super.c
+++ b/fs/efs/super.c
@@ -15,13 +15,13 @@
#include <linux/buffer_head.h>
#include <linux/vfs.h>
-static int efs_statfs(struct super_block *s, struct kstatfs *buf);
+static int efs_statfs(struct dentry *dentry, struct kstatfs *buf);
static int efs_fill_super(struct super_block *s, void *d, int silent);
-static struct super_block *efs_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int efs_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_bdev(fs_type, flags, dev_name, data, efs_fill_super);
+ return get_sb_bdev(fs_type, flags, dev_name, data, efs_fill_super, mnt);
}
static struct file_system_type efs_fs_type = {
@@ -322,8 +322,8 @@ out_no_fs:
return -EINVAL;
}
-static int efs_statfs(struct super_block *s, struct kstatfs *buf) {
- struct efs_sb_info *sb = SUPER_INFO(s);
+static int efs_statfs(struct dentry *dentry, struct kstatfs *buf) {
+ struct efs_sb_info *sb = SUPER_INFO(dentry->d_sb);
buf->f_type = EFS_SUPER_MAGIC; /* efs magic number */
buf->f_bsize = EFS_BLOCKSIZE; /* blocksize */
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 2695337d4d64..08e7e6a555ca 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -268,9 +268,9 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
int maxevents, long timeout);
static int eventpollfs_delete_dentry(struct dentry *dentry);
static struct inode *ep_eventpoll_inode(void);
-static struct super_block *eventpollfs_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name,
- void *data);
+static int eventpollfs_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name,
+ void *data, struct vfsmount *mnt);
/*
* This semaphore is used to serialize ep_free() and eventpoll_release_file().
@@ -1595,11 +1595,12 @@ eexit_1:
}
-static struct super_block *
+static int
eventpollfs_get_sb(struct file_system_type *fs_type, int flags,
- const char *dev_name, void *data)
+ const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_pseudo(fs_type, "eventpoll:", NULL, EVENTPOLLFS_MAGIC);
+ return get_sb_pseudo(fs_type, "eventpoll:", NULL, EVENTPOLLFS_MAGIC,
+ mnt);
}
diff --git a/fs/ext2/dir.c b/fs/ext2/dir.c
index d672aa9f4061..3c1c9aaaca6b 100644
--- a/fs/ext2/dir.c
+++ b/fs/ext2/dir.c
@@ -159,8 +159,7 @@ fail:
static struct page * ext2_get_page(struct inode *dir, unsigned long n)
{
struct address_space *mapping = dir->i_mapping;
- struct page *page = read_cache_page(mapping, n,
- (filler_t*)mapping->a_ops->readpage, NULL);
+ struct page *page = read_mapping_page(mapping, n, NULL);
if (!IS_ERR(page)) {
wait_on_page_locked(page);
kmap(page);
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index 7e30bae174ed..ee4ba759581e 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -39,7 +39,7 @@
static void ext2_sync_super(struct super_block *sb,
struct ext2_super_block *es);
static int ext2_remount (struct super_block * sb, int * flags, char * data);
-static int ext2_statfs (struct super_block * sb, struct kstatfs * buf);
+static int ext2_statfs (struct dentry * dentry, struct kstatfs * buf);
void ext2_error (struct super_block * sb, const char * function,
const char * fmt, ...)
@@ -834,9 +834,6 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
printk ("EXT2-fs: not enough memory\n");
goto failed_mount;
}
- percpu_counter_init(&sbi->s_freeblocks_counter);
- percpu_counter_init(&sbi->s_freeinodes_counter);
- percpu_counter_init(&sbi->s_dirs_counter);
bgl_lock_init(&sbi->s_blockgroup_lock);
sbi->s_debts = kmalloc(sbi->s_groups_count * sizeof(*sbi->s_debts),
GFP_KERNEL);
@@ -863,6 +860,13 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
sbi->s_gdb_count = db_count;
get_random_bytes(&sbi->s_next_generation, sizeof(u32));
spin_lock_init(&sbi->s_next_gen_lock);
+
+ percpu_counter_init(&sbi->s_freeblocks_counter,
+ ext2_count_free_blocks(sb));
+ percpu_counter_init(&sbi->s_freeinodes_counter,
+ ext2_count_free_inodes(sb));
+ percpu_counter_init(&sbi->s_dirs_counter,
+ ext2_count_dirs(sb));
/*
* set up enough so that it can read an inode
*/
@@ -874,24 +878,18 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
if (!sb->s_root) {
iput(root);
printk(KERN_ERR "EXT2-fs: get root inode failed\n");
- goto failed_mount2;
+ goto failed_mount3;
}
if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
dput(sb->s_root);
sb->s_root = NULL;
printk(KERN_ERR "EXT2-fs: corrupt root inode, run e2fsck\n");
- goto failed_mount2;
+ goto failed_mount3;
}
if (EXT2_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_HAS_JOURNAL))
ext2_warning(sb, __FUNCTION__,
"mounting ext3 filesystem as ext2");
ext2_setup_super (sb, es, sb->s_flags & MS_RDONLY);
- percpu_counter_mod(&sbi->s_freeblocks_counter,
- ext2_count_free_blocks(sb));
- percpu_counter_mod(&sbi->s_freeinodes_counter,
- ext2_count_free_inodes(sb));
- percpu_counter_mod(&sbi->s_dirs_counter,
- ext2_count_dirs(sb));
return 0;
cantfind_ext2:
@@ -899,7 +897,10 @@ cantfind_ext2:
printk("VFS: Can't find an ext2 filesystem on dev %s.\n",
sb->s_id);
goto failed_mount;
-
+failed_mount3:
+ percpu_counter_destroy(&sbi->s_freeblocks_counter);
+ percpu_counter_destroy(&sbi->s_freeinodes_counter);
+ percpu_counter_destroy(&sbi->s_dirs_counter);
failed_mount2:
for (i = 0; i < db_count; i++)
brelse(sbi->s_group_desc[i]);
@@ -1038,8 +1039,9 @@ restore_opts:
return err;
}
-static int ext2_statfs (struct super_block * sb, struct kstatfs * buf)
+static int ext2_statfs (struct dentry * dentry, struct kstatfs * buf)
{
+ struct super_block *sb = dentry->d_sb;
struct ext2_sb_info *sbi = EXT2_SB(sb);
unsigned long overhead;
int i;
@@ -1087,10 +1089,10 @@ static int ext2_statfs (struct super_block * sb, struct kstatfs * buf)
return 0;
}
-static struct super_block *ext2_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int ext2_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_bdev(fs_type, flags, dev_name, data, ext2_fill_super);
+ return get_sb_bdev(fs_type, flags, dev_name, data, ext2_fill_super, mnt);
}
#ifdef CONFIG_QUOTA
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index f8a5266ea1ff..a60cc6ec130f 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -58,7 +58,7 @@ static int ext3_sync_fs(struct super_block *sb, int wait);
static const char *ext3_decode_error(struct super_block * sb, int errno,
char nbuf[16]);
static int ext3_remount (struct super_block * sb, int * flags, char * data);
-static int ext3_statfs (struct super_block * sb, struct kstatfs * buf);
+static int ext3_statfs (struct dentry * dentry, struct kstatfs * buf);
static void ext3_unlockfs(struct super_block *sb);
static void ext3_write_super (struct super_block * sb);
static void ext3_write_super_lockfs(struct super_block *sb);
@@ -499,20 +499,21 @@ static void ext3_clear_inode(struct inode *inode)
{
struct ext3_block_alloc_info *rsv = EXT3_I(inode)->i_block_alloc_info;
#ifdef CONFIG_EXT3_FS_POSIX_ACL
- if (EXT3_I(inode)->i_acl &&
- EXT3_I(inode)->i_acl != EXT3_ACL_NOT_CACHED) {
- posix_acl_release(EXT3_I(inode)->i_acl);
- EXT3_I(inode)->i_acl = EXT3_ACL_NOT_CACHED;
- }
- if (EXT3_I(inode)->i_default_acl &&
- EXT3_I(inode)->i_default_acl != EXT3_ACL_NOT_CACHED) {
- posix_acl_release(EXT3_I(inode)->i_default_acl);
- EXT3_I(inode)->i_default_acl = EXT3_ACL_NOT_CACHED;
- }
+ if (EXT3_I(inode)->i_acl &&
+ EXT3_I(inode)->i_acl != EXT3_ACL_NOT_CACHED) {
+ posix_acl_release(EXT3_I(inode)->i_acl);
+ EXT3_I(inode)->i_acl = EXT3_ACL_NOT_CACHED;
+ }
+ if (EXT3_I(inode)->i_default_acl &&
+ EXT3_I(inode)->i_default_acl != EXT3_ACL_NOT_CACHED) {
+ posix_acl_release(EXT3_I(inode)->i_default_acl);
+ EXT3_I(inode)->i_default_acl = EXT3_ACL_NOT_CACHED;
+ }
#endif
ext3_discard_reservation(inode);
EXT3_I(inode)->i_block_alloc_info = NULL;
- kfree(rsv);
+ if (unlikely(rsv))
+ kfree(rsv);
}
static inline void ext3_show_quota_options(struct seq_file *seq, struct super_block *sb)
@@ -1579,9 +1580,6 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
goto failed_mount;
}
- percpu_counter_init(&sbi->s_freeblocks_counter);
- percpu_counter_init(&sbi->s_freeinodes_counter);
- percpu_counter_init(&sbi->s_dirs_counter);
bgl_lock_init(&sbi->s_blockgroup_lock);
for (i = 0; i < db_count; i++) {
@@ -1601,6 +1599,14 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
sbi->s_gdb_count = db_count;
get_random_bytes(&sbi->s_next_generation, sizeof(u32));
spin_lock_init(&sbi->s_next_gen_lock);
+
+ percpu_counter_init(&sbi->s_freeblocks_counter,
+ ext3_count_free_blocks(sb));
+ percpu_counter_init(&sbi->s_freeinodes_counter,
+ ext3_count_free_inodes(sb));
+ percpu_counter_init(&sbi->s_dirs_counter,
+ ext3_count_dirs(sb));
+
/* per fileystem reservation list head & lock */
spin_lock_init(&sbi->s_rsv_window_lock);
sbi->s_rsv_window_root = RB_ROOT;
@@ -1639,16 +1645,16 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
if (!test_opt(sb, NOLOAD) &&
EXT3_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_HAS_JOURNAL)) {
if (ext3_load_journal(sb, es, journal_devnum))
- goto failed_mount2;
+ goto failed_mount3;
} else if (journal_inum) {
if (ext3_create_journal(sb, es, journal_inum))
- goto failed_mount2;
+ goto failed_mount3;
} else {
if (!silent)
printk (KERN_ERR
"ext3: No journal on filesystem on %s\n",
sb->s_id);
- goto failed_mount2;
+ goto failed_mount3;
}
/* We have now updated the journal if required, so we can
@@ -1671,7 +1677,7 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
(sbi->s_journal, 0, 0, JFS_FEATURE_INCOMPAT_REVOKE)) {
printk(KERN_ERR "EXT3-fs: Journal does not support "
"requested data journaling mode\n");
- goto failed_mount3;
+ goto failed_mount4;
}
default:
break;
@@ -1694,13 +1700,13 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
if (!sb->s_root) {
printk(KERN_ERR "EXT3-fs: get root inode failed\n");
iput(root);
- goto failed_mount3;
+ goto failed_mount4;
}
if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
dput(sb->s_root);
sb->s_root = NULL;
printk(KERN_ERR "EXT3-fs: corrupt root inode, run e2fsck\n");
- goto failed_mount3;
+ goto failed_mount4;
}
ext3_setup_super (sb, es, sb->s_flags & MS_RDONLY);
@@ -1723,13 +1729,6 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
test_opt(sb,DATA_FLAGS) == EXT3_MOUNT_ORDERED_DATA ? "ordered":
"writeback");
- percpu_counter_mod(&sbi->s_freeblocks_counter,
- ext3_count_free_blocks(sb));
- percpu_counter_mod(&sbi->s_freeinodes_counter,
- ext3_count_free_inodes(sb));
- percpu_counter_mod(&sbi->s_dirs_counter,
- ext3_count_dirs(sb));
-
lock_kernel();
return 0;
@@ -1739,8 +1738,12 @@ cantfind_ext3:
sb->s_id);
goto failed_mount;
-failed_mount3:
+failed_mount4:
journal_destroy(sbi->s_journal);
+failed_mount3:
+ percpu_counter_destroy(&sbi->s_freeblocks_counter);
+ percpu_counter_destroy(&sbi->s_freeinodes_counter);
+ percpu_counter_destroy(&sbi->s_dirs_counter);
failed_mount2:
for (i = 0; i < db_count; i++)
brelse(sbi->s_group_desc[i]);
@@ -2318,8 +2321,9 @@ restore_opts:
return err;
}
-static int ext3_statfs (struct super_block * sb, struct kstatfs * buf)
+static int ext3_statfs (struct dentry * dentry, struct kstatfs * buf)
{
+ struct super_block *sb = dentry->d_sb;
struct ext3_sb_info *sbi = EXT3_SB(sb);
struct ext3_super_block *es = sbi->s_es;
unsigned long overhead;
@@ -2646,10 +2650,10 @@ out:
#endif
-static struct super_block *ext3_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int ext3_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_bdev(fs_type, flags, dev_name, data, ext3_fill_super);
+ return get_sb_bdev(fs_type, flags, dev_name, data, ext3_fill_super, mnt);
}
static struct file_system_type ext3_fs_type = {
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index c1ce284f8a94..7c35d582ec10 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -539,18 +539,18 @@ static int fat_remount(struct super_block *sb, int *flags, char *data)
return 0;
}
-static int fat_statfs(struct super_block *sb, struct kstatfs *buf)
+static int fat_statfs(struct dentry *dentry, struct kstatfs *buf)
{
- struct msdos_sb_info *sbi = MSDOS_SB(sb);
+ struct msdos_sb_info *sbi = MSDOS_SB(dentry->d_sb);
/* If the count of free cluster is still unknown, counts it here. */
if (sbi->free_clusters == -1) {
- int err = fat_count_free_clusters(sb);
+ int err = fat_count_free_clusters(dentry->d_sb);
if (err)
return err;
}
- buf->f_type = sb->s_magic;
+ buf->f_type = dentry->d_sb->s_magic;
buf->f_bsize = sbi->cluster_size;
buf->f_blocks = sbi->max_cluster - FAT_START_ENT;
buf->f_bfree = sbi->free_clusters;
diff --git a/fs/fat/misc.c b/fs/fat/misc.c
index 944652e9dde1..308f2b6b5026 100644
--- a/fs/fat/misc.c
+++ b/fs/fat/misc.c
@@ -210,4 +210,3 @@ int fat_sync_bhs(struct buffer_head **bhs, int nr_bhs)
return err;
}
-EXPORT_SYMBOL_GPL(fat_sync_bhs);
diff --git a/fs/file_table.c b/fs/file_table.c
index bcea1998b4de..506d5307108d 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -300,5 +300,5 @@ void __init files_init(unsigned long mempages)
if (files_stat.max_files < NR_FILE)
files_stat.max_files = NR_FILE;
files_defer_init();
- percpu_counter_init(&nr_files);
+ percpu_counter_init(&nr_files, 0);
}
diff --git a/fs/freevxfs/vxfs_subr.c b/fs/freevxfs/vxfs_subr.c
index 50aae77651b2..c1be118fc067 100644
--- a/fs/freevxfs/vxfs_subr.c
+++ b/fs/freevxfs/vxfs_subr.c
@@ -71,8 +71,7 @@ vxfs_get_page(struct address_space *mapping, u_long n)
{
struct page * pp;
- pp = read_cache_page(mapping, n,
- (filler_t*)mapping->a_ops->readpage, NULL);
+ pp = read_mapping_page(mapping, n, NULL);
if (!IS_ERR(pp)) {
wait_on_page_locked(pp);
diff --git a/fs/freevxfs/vxfs_super.c b/fs/freevxfs/vxfs_super.c
index b44c916d24a1..b74b791fc23b 100644
--- a/fs/freevxfs/vxfs_super.c
+++ b/fs/freevxfs/vxfs_super.c
@@ -40,6 +40,7 @@
#include <linux/slab.h>
#include <linux/stat.h>
#include <linux/vfs.h>
+#include <linux/mount.h>
#include "vxfs.h"
#include "vxfs_extern.h"
@@ -55,7 +56,7 @@ MODULE_ALIAS("vxfs"); /* makes mount -t vxfs autoload the module */
static void vxfs_put_super(struct super_block *);
-static int vxfs_statfs(struct super_block *, struct kstatfs *);
+static int vxfs_statfs(struct dentry *, struct kstatfs *);
static int vxfs_remount(struct super_block *, int *, char *);
static struct super_operations vxfs_super_ops = {
@@ -90,12 +91,12 @@ vxfs_put_super(struct super_block *sbp)
/**
* vxfs_statfs - get filesystem information
- * @sbp: VFS superblock
+ * @dentry: VFS dentry to locate superblock
* @bufp: output buffer
*
* Description:
* vxfs_statfs fills the statfs buffer @bufp with information
- * about the filesystem described by @sbp.
+ * about the filesystem described by @dentry.
*
* Returns:
* Zero.
@@ -107,12 +108,12 @@ vxfs_put_super(struct super_block *sbp)
* This is everything but complete...
*/
static int
-vxfs_statfs(struct super_block *sbp, struct kstatfs *bufp)
+vxfs_statfs(struct dentry *dentry, struct kstatfs *bufp)
{
- struct vxfs_sb_info *infp = VXFS_SBI(sbp);
+ struct vxfs_sb_info *infp = VXFS_SBI(dentry->d_sb);
bufp->f_type = VXFS_SUPER_MAGIC;
- bufp->f_bsize = sbp->s_blocksize;
+ bufp->f_bsize = dentry->d_sb->s_blocksize;
bufp->f_blocks = infp->vsi_raw->vs_dsize;
bufp->f_bfree = infp->vsi_raw->vs_free;
bufp->f_bavail = 0;
@@ -241,10 +242,11 @@ out:
/*
* The usual module blurb.
*/
-static struct super_block *vxfs_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int vxfs_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_bdev(fs_type, flags, dev_name, data, vxfs_fill_super);
+ return get_sb_bdev(fs_type, flags, dev_name, data, vxfs_fill_super,
+ mnt);
}
static struct file_system_type vxfs_fs_type = {
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index f3fbe2d030f4..031b27a4bc9a 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -461,6 +461,8 @@ void sync_inodes_sb(struct super_block *sb, int wait)
{
struct writeback_control wbc = {
.sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_HOLD,
+ .range_start = 0,
+ .range_end = LLONG_MAX,
};
unsigned long nr_dirty = read_page_state(nr_dirty);
unsigned long nr_unstable = read_page_state(nr_unstable);
@@ -559,6 +561,8 @@ int write_inode_now(struct inode *inode, int sync)
struct writeback_control wbc = {
.nr_to_write = LONG_MAX,
.sync_mode = WB_SYNC_ALL,
+ .range_start = 0,
+ .range_end = LLONG_MAX,
};
if (!mapping_cap_writeback_dirty(inode->i_mapping))
@@ -619,7 +623,6 @@ int generic_osync_inode(struct inode *inode, struct address_space *mapping, int
int need_write_inode_now = 0;
int err2;
- current->flags |= PF_SYNCWRITE;
if (what & OSYNC_DATA)
err = filemap_fdatawrite(mapping);
if (what & (OSYNC_METADATA|OSYNC_DATA)) {
@@ -632,7 +635,6 @@ int generic_osync_inode(struct inode *inode, struct address_space *mapping, int
if (!err)
err = err2;
}
- current->flags &= ~PF_SYNCWRITE;
spin_lock(&inode_lock);
if ((inode->i_state & I_DIRTY) &&
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index fc342cf7c2cc..087f3b734f40 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -169,7 +169,7 @@ static int fuse_release(struct inode *inode, struct file *file)
return fuse_release_common(inode, file, 0);
}
-static int fuse_flush(struct file *file)
+static int fuse_flush(struct file *file, fl_owner_t id)
{
struct inode *inode = file->f_dentry->d_inode;
struct fuse_conn *fc = get_fuse_conn(inode);
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 7627022446b2..a13c0f529058 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -236,8 +236,9 @@ static void convert_fuse_statfs(struct kstatfs *stbuf, struct fuse_kstatfs *attr
/* fsid is left zero */
}
-static int fuse_statfs(struct super_block *sb, struct kstatfs *buf)
+static int fuse_statfs(struct dentry *dentry, struct kstatfs *buf)
{
+ struct super_block *sb = dentry->d_sb;
struct fuse_conn *fc = get_fuse_conn_super(sb);
struct fuse_req *req;
struct fuse_statfs_out outarg;
@@ -569,11 +570,11 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
return err;
}
-static struct super_block *fuse_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name,
- void *raw_data)
+static int fuse_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name,
+ void *raw_data, struct vfsmount *mnt)
{
- return get_sb_nodev(fs_type, flags, raw_data, fuse_fill_super);
+ return get_sb_nodev(fs_type, flags, raw_data, fuse_fill_super, mnt);
}
static struct file_system_type fuse_fs_type = {
diff --git a/fs/hfs/bnode.c b/fs/hfs/bnode.c
index 1e44dcfe49c4..13231dd5ce66 100644
--- a/fs/hfs/bnode.c
+++ b/fs/hfs/bnode.c
@@ -280,7 +280,7 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
block = off >> PAGE_CACHE_SHIFT;
node->page_offset = off & ~PAGE_CACHE_MASK;
for (i = 0; i < tree->pages_per_bnode; i++) {
- page = read_cache_page(mapping, block++, (filler_t *)mapping->a_ops->readpage, NULL);
+ page = read_mapping_page(mapping, block++, NULL);
if (IS_ERR(page))
goto fail;
if (PageError(page)) {
diff --git a/fs/hfs/btree.c b/fs/hfs/btree.c
index d20131ce4b95..400357994319 100644
--- a/fs/hfs/btree.c
+++ b/fs/hfs/btree.c
@@ -59,7 +59,7 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke
unlock_new_inode(tree->inode);
mapping = tree->inode->i_mapping;
- page = read_cache_page(mapping, 0, (filler_t *)mapping->a_ops->readpage, NULL);
+ page = read_mapping_page(mapping, 0, NULL);
if (IS_ERR(page))
goto free_tree;
diff --git a/fs/hfs/super.c b/fs/hfs/super.c
index 1181d116117d..d9227bf14e86 100644
--- a/fs/hfs/super.c
+++ b/fs/hfs/super.c
@@ -80,8 +80,10 @@ static void hfs_put_super(struct super_block *sb)
*
* changed f_files/f_ffree to reflect the fs_ablock/free_ablocks.
*/
-static int hfs_statfs(struct super_block *sb, struct kstatfs *buf)
+static int hfs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
+ struct super_block *sb = dentry->d_sb;
+
buf->f_type = HFS_SUPER_MAGIC;
buf->f_bsize = sb->s_blocksize;
buf->f_blocks = (u32)HFS_SB(sb)->fs_ablocks * HFS_SB(sb)->fs_div;
@@ -413,10 +415,11 @@ bail:
return res;
}
-static struct super_block *hfs_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int hfs_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data,
+ struct vfsmount *mnt)
{
- return get_sb_bdev(fs_type, flags, dev_name, data, hfs_fill_super);
+ return get_sb_bdev(fs_type, flags, dev_name, data, hfs_fill_super, mnt);
}
static struct file_system_type hfs_fs_type = {
diff --git a/fs/hfsplus/bitmap.c b/fs/hfsplus/bitmap.c
index 9fb51632303c..d128a25b74d2 100644
--- a/fs/hfsplus/bitmap.c
+++ b/fs/hfsplus/bitmap.c
@@ -31,8 +31,7 @@ int hfsplus_block_allocate(struct super_block *sb, u32 size, u32 offset, u32 *ma
dprint(DBG_BITMAP, "block_allocate: %u,%u,%u\n", size, offset, len);
mutex_lock(&HFSPLUS_SB(sb).alloc_file->i_mutex);
mapping = HFSPLUS_SB(sb).alloc_file->i_mapping;
- page = read_cache_page(mapping, offset / PAGE_CACHE_BITS,
- (filler_t *)mapping->a_ops->readpage, NULL);
+ page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, NULL);
pptr = kmap(page);
curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32;
i = offset % 32;
@@ -72,8 +71,8 @@ int hfsplus_block_allocate(struct super_block *sb, u32 size, u32 offset, u32 *ma
offset += PAGE_CACHE_BITS;
if (offset >= size)
break;
- page = read_cache_page(mapping, offset / PAGE_CACHE_BITS,
- (filler_t *)mapping->a_ops->readpage, NULL);
+ page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS,
+ NULL);
curr = pptr = kmap(page);
if ((size ^ offset) / PAGE_CACHE_BITS)
end = pptr + PAGE_CACHE_BITS / 32;
@@ -119,8 +118,8 @@ found:
set_page_dirty(page);
kunmap(page);
offset += PAGE_CACHE_BITS;
- page = read_cache_page(mapping, offset / PAGE_CACHE_BITS,
- (filler_t *)mapping->a_ops->readpage, NULL);
+ page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS,
+ NULL);
pptr = kmap(page);
curr = pptr;
end = pptr + PAGE_CACHE_BITS / 32;
@@ -167,7 +166,7 @@ int hfsplus_block_free(struct super_block *sb, u32 offset, u32 count)
mutex_lock(&HFSPLUS_SB(sb).alloc_file->i_mutex);
mapping = HFSPLUS_SB(sb).alloc_file->i_mapping;
pnr = offset / PAGE_CACHE_BITS;
- page = read_cache_page(mapping, pnr, (filler_t *)mapping->a_ops->readpage, NULL);
+ page = read_mapping_page(mapping, pnr, NULL);
pptr = kmap(page);
curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32;
end = pptr + PAGE_CACHE_BITS / 32;
@@ -199,7 +198,7 @@ int hfsplus_block_free(struct super_block *sb, u32 offset, u32 count)
break;
set_page_dirty(page);
kunmap(page);
- page = read_cache_page(mapping, ++pnr, (filler_t *)mapping->a_ops->readpage, NULL);
+ page = read_mapping_page(mapping, ++pnr, NULL);
pptr = kmap(page);
curr = pptr;
end = pptr + PAGE_CACHE_BITS / 32;
diff --git a/fs/hfsplus/bnode.c b/fs/hfsplus/bnode.c
index 746abc9ecf70..77bf434da679 100644
--- a/fs/hfsplus/bnode.c
+++ b/fs/hfsplus/bnode.c
@@ -440,7 +440,7 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
block = off >> PAGE_CACHE_SHIFT;
node->page_offset = off & ~PAGE_CACHE_MASK;
for (i = 0; i < tree->pages_per_bnode; block++, i++) {
- page = read_cache_page(mapping, block, (filler_t *)mapping->a_ops->readpage, NULL);
+ page = read_mapping_page(mapping, block, NULL);
if (IS_ERR(page))
goto fail;
if (PageError(page)) {
diff --git a/fs/hfsplus/btree.c b/fs/hfsplus/btree.c
index effa8991999c..cfc852fdd1b5 100644
--- a/fs/hfsplus/btree.c
+++ b/fs/hfsplus/btree.c
@@ -38,7 +38,7 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id)
goto free_tree;
mapping = tree->inode->i_mapping;
- page = read_cache_page(mapping, 0, (filler_t *)mapping->a_ops->readpage, NULL);
+ page = read_mapping_page(mapping, 0, NULL);
if (IS_ERR(page))
goto free_tree;
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
index 7843f792a4b7..0a92fa2336a2 100644
--- a/fs/hfsplus/super.c
+++ b/fs/hfsplus/super.c
@@ -212,8 +212,10 @@ static void hfsplus_put_super(struct super_block *sb)
sb->s_fs_info = NULL;
}
-static int hfsplus_statfs(struct super_block *sb, struct kstatfs *buf)
+static int hfsplus_statfs(struct dentry *dentry, struct kstatfs *buf)
{
+ struct super_block *sb = dentry->d_sb;
+
buf->f_type = HFSPLUS_SUPER_MAGIC;
buf->f_bsize = sb->s_blocksize;
buf->f_blocks = HFSPLUS_SB(sb).total_blocks << HFSPLUS_SB(sb).fs_shift;
@@ -450,10 +452,12 @@ static void hfsplus_destroy_inode(struct inode *inode)
#define HFSPLUS_INODE_SIZE sizeof(struct hfsplus_inode_info)
-static struct super_block *hfsplus_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int hfsplus_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data,
+ struct vfsmount *mnt)
{
- return get_sb_bdev(fs_type, flags, dev_name, data, hfsplus_fill_super);
+ return get_sb_bdev(fs_type, flags, dev_name, data, hfsplus_fill_super,
+ mnt);
}
static struct file_system_type hfsplus_fs_type = {
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index bf0f8e16e433..8e0d37743e7c 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -239,7 +239,7 @@ static int read_inode(struct inode *ino)
return(err);
}
-int hostfs_statfs(struct super_block *sb, struct kstatfs *sf)
+int hostfs_statfs(struct dentry *dentry, struct kstatfs *sf)
{
/* do_statfs uses struct statfs64 internally, but the linux kernel
* struct statfs still has 32-bit versions for most of these fields,
@@ -252,7 +252,7 @@ int hostfs_statfs(struct super_block *sb, struct kstatfs *sf)
long long f_files;
long long f_ffree;
- err = do_statfs(HOSTFS_I(sb->s_root->d_inode)->host_filename,
+ err = do_statfs(HOSTFS_I(dentry->d_sb->s_root->d_inode)->host_filename,
&sf->f_bsize, &f_blocks, &f_bfree, &f_bavail, &f_files,
&f_ffree, &sf->f_fsid, sizeof(sf->f_fsid),
&sf->f_namelen, sf->f_spare);
@@ -993,11 +993,11 @@ static int hostfs_fill_sb_common(struct super_block *sb, void *d, int silent)
return(err);
}
-static struct super_block *hostfs_read_sb(struct file_system_type *type,
- int flags, const char *dev_name,
- void *data)
+static int hostfs_read_sb(struct file_system_type *type,
+ int flags, const char *dev_name,
+ void *data, struct vfsmount *mnt)
{
- return(get_sb_nodev(type, flags, data, hostfs_fill_sb_common));
+ return get_sb_nodev(type, flags, data, hostfs_fill_sb_common, mnt);
}
static struct file_system_type hostfs_type = {
diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c
index d72d8c87c996..f798480a363f 100644
--- a/fs/hpfs/super.c
+++ b/fs/hpfs/super.c
@@ -135,8 +135,9 @@ static unsigned count_bitmaps(struct super_block *s)
return count;
}
-static int hpfs_statfs(struct super_block *s, struct kstatfs *buf)
+static int hpfs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
+ struct super_block *s = dentry->d_sb;
struct hpfs_sb_info *sbi = hpfs_sb(s);
lock_kernel();
@@ -662,10 +663,11 @@ bail0:
return -EINVAL;
}
-static struct super_block *hpfs_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int hpfs_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_bdev(fs_type, flags, dev_name, data, hpfs_fill_super);
+ return get_sb_bdev(fs_type, flags, dev_name, data, hpfs_fill_super,
+ mnt);
}
static struct file_system_type hpfs_fs_type = {
diff --git a/fs/hppfs/hppfs_kern.c b/fs/hppfs/hppfs_kern.c
index 5e6363be246f..3a9bdf58166f 100644
--- a/fs/hppfs/hppfs_kern.c
+++ b/fs/hppfs/hppfs_kern.c
@@ -616,7 +616,7 @@ static const struct file_operations hppfs_dir_fops = {
.fsync = hppfs_fsync,
};
-static int hppfs_statfs(struct super_block *sb, struct kstatfs *sf)
+static int hppfs_statfs(struct dentry *dentry, struct kstatfs *sf)
{
sf->f_blocks = 0;
sf->f_bfree = 0;
@@ -769,11 +769,11 @@ static int hppfs_fill_super(struct super_block *sb, void *d, int silent)
return(err);
}
-static struct super_block *hppfs_read_super(struct file_system_type *type,
- int flags, const char *dev_name,
- void *data)
+static int hppfs_read_super(struct file_system_type *type,
+ int flags, const char *dev_name,
+ void *data, struct vfsmount *mnt)
{
- return(get_sb_nodev(type, flags, data, hppfs_fill_super));
+ return get_sb_nodev(type, flags, data, hppfs_fill_super, mnt);
}
static struct file_system_type hppfs_type = {
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 3a5b4e923455..e6410d8edd0e 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -59,7 +59,6 @@ static void huge_pagevec_release(struct pagevec *pvec)
static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
{
struct inode *inode = file->f_dentry->d_inode;
- struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
loff_t len, vma_len;
int ret;
@@ -87,9 +86,10 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
if (!(vma->vm_flags & VM_WRITE) && len > inode->i_size)
goto out;
- if (vma->vm_flags & VM_MAYSHARE)
- if (hugetlb_extend_reservation(info, len >> HPAGE_SHIFT) != 0)
- goto out;
+ if (vma->vm_flags & VM_MAYSHARE &&
+ hugetlb_reserve_pages(inode, vma->vm_pgoff >> (HPAGE_SHIFT-PAGE_SHIFT),
+ len >> HPAGE_SHIFT))
+ goto out;
ret = 0;
hugetlb_prefault_arch_hook(vma->vm_mm);
@@ -195,12 +195,8 @@ static void truncate_hugepages(struct inode *inode, loff_t lstart)
const pgoff_t start = lstart >> HPAGE_SHIFT;
struct pagevec pvec;
pgoff_t next;
- int i;
+ int i, freed = 0;
- hugetlb_truncate_reservation(HUGETLBFS_I(inode),
- lstart >> HPAGE_SHIFT);
- if (!mapping->nrpages)
- return;
pagevec_init(&pvec, 0);
next = start;
while (1) {
@@ -221,10 +217,12 @@ static void truncate_hugepages(struct inode *inode, loff_t lstart)
truncate_huge_page(page);
unlock_page(page);
hugetlb_put_quota(mapping);
+ freed++;
}
huge_pagevec_release(&pvec);
}
BUG_ON(!lstart && mapping->nrpages);
+ hugetlb_unreserve_pages(inode, start, freed);
}
static void hugetlbfs_delete_inode(struct inode *inode)
@@ -366,6 +364,7 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb, uid_t uid,
inode->i_mapping->a_ops = &hugetlbfs_aops;
inode->i_mapping->backing_dev_info =&hugetlbfs_backing_dev_info;
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+ INIT_LIST_HEAD(&inode->i_mapping->private_list);
info = HUGETLBFS_I(inode);
mpol_shared_policy_init(&info->policy, MPOL_DEFAULT, NULL);
switch (mode & S_IFMT) {
@@ -467,9 +466,9 @@ static int hugetlbfs_set_page_dirty(struct page *page)
return 0;
}
-static int hugetlbfs_statfs(struct super_block *sb, struct kstatfs *buf)
+static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
- struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(sb);
+ struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb);
buf->f_type = HUGETLBFS_MAGIC;
buf->f_bsize = HPAGE_SIZE;
@@ -538,7 +537,6 @@ static struct inode *hugetlbfs_alloc_inode(struct super_block *sb)
hugetlbfs_inc_free_inodes(sbinfo);
return NULL;
}
- p->prereserved_hpages = 0;
return &p->vfs_inode;
}
@@ -723,10 +721,10 @@ void hugetlb_put_quota(struct address_space *mapping)
}
}
-static struct super_block *hugetlbfs_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int hugetlbfs_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_nodev(fs_type, flags, data, hugetlbfs_fill_super);
+ return get_sb_nodev(fs_type, flags, data, hugetlbfs_fill_super, mnt);
}
static struct file_system_type hugetlbfs_fs_type = {
@@ -781,8 +779,7 @@ struct file *hugetlb_zero_setup(size_t size)
goto out_file;
error = -ENOMEM;
- if (hugetlb_extend_reservation(HUGETLBFS_I(inode),
- size >> HPAGE_SHIFT) != 0)
+ if (hugetlb_reserve_pages(inode, 0, size >> HPAGE_SHIFT))
goto out_inode;
d_instantiate(dentry, inode);
diff --git a/fs/inotify_user.c b/fs/inotify_user.c
index 9e9931e2badd..f2386442adee 100644
--- a/fs/inotify_user.c
+++ b/fs/inotify_user.c
@@ -672,11 +672,11 @@ out:
return ret;
}
-static struct super_block *
+static int
inotify_get_sb(struct file_system_type *fs_type, int flags,
- const char *dev_name, void *data)
+ const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_pseudo(fs_type, "inotify", NULL, 0xBAD1DEA);
+ return get_sb_pseudo(fs_type, "inotify", NULL, 0xBAD1DEA, mnt);
}
static struct file_system_type inotify_fs_type = {
diff --git a/fs/ioprio.c b/fs/ioprio.c
index ca77008146c0..7fa76ed53c10 100644
--- a/fs/ioprio.c
+++ b/fs/ioprio.c
@@ -24,15 +24,21 @@
#include <linux/blkdev.h>
#include <linux/capability.h>
#include <linux/syscalls.h>
+#include <linux/security.h>
static int set_task_ioprio(struct task_struct *task, int ioprio)
{
+ int err;
struct io_context *ioc;
if (task->uid != current->euid &&
task->uid != current->uid && !capable(CAP_SYS_NICE))
return -EPERM;
+ err = security_task_setioprio(task, ioprio);
+ if (err)
+ return err;
+
task_lock(task);
task->ioprio = ioprio;
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
index 70adbb98bad1..3f9c8ba1fa1f 100644
--- a/fs/isofs/inode.c
+++ b/fs/isofs/inode.c
@@ -56,7 +56,7 @@ static void isofs_put_super(struct super_block *sb)
}
static void isofs_read_inode(struct inode *);
-static int isofs_statfs (struct super_block *, struct kstatfs *);
+static int isofs_statfs (struct dentry *, struct kstatfs *);
static kmem_cache_t *isofs_inode_cachep;
@@ -901,8 +901,10 @@ out_freesbi:
return -EINVAL;
}
-static int isofs_statfs (struct super_block *sb, struct kstatfs *buf)
+static int isofs_statfs (struct dentry *dentry, struct kstatfs *buf)
{
+ struct super_block *sb = dentry->d_sb;
+
buf->f_type = ISOFS_SUPER_MAGIC;
buf->f_bsize = sb->s_blocksize;
buf->f_blocks = (ISOFS_SB(sb)->s_nzones
@@ -1399,10 +1401,11 @@ struct inode *isofs_iget(struct super_block *sb,
return inode;
}
-static struct super_block *isofs_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int isofs_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_bdev(fs_type, flags, dev_name, data, isofs_fill_super);
+ return get_sb_bdev(fs_type, flags, dev_name, data, isofs_fill_super,
+ mnt);
}
static struct file_system_type iso9660_fs_type = {
diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c
index 3f5102b069db..47678a26c13b 100644
--- a/fs/jbd/checkpoint.c
+++ b/fs/jbd/checkpoint.c
@@ -24,29 +24,67 @@
#include <linux/slab.h>
/*
- * Unlink a buffer from a transaction.
+ * Unlink a buffer from a transaction checkpoint list.
*
* Called with j_list_lock held.
*/
-
-static inline void __buffer_unlink(struct journal_head *jh)
+static inline void __buffer_unlink_first(struct journal_head *jh)
{
- transaction_t *transaction;
-
- transaction = jh->b_cp_transaction;
- jh->b_cp_transaction = NULL;
+ transaction_t *transaction = jh->b_cp_transaction;
jh->b_cpnext->b_cpprev = jh->b_cpprev;
jh->b_cpprev->b_cpnext = jh->b_cpnext;
- if (transaction->t_checkpoint_list == jh)
+ if (transaction->t_checkpoint_list == jh) {
transaction->t_checkpoint_list = jh->b_cpnext;
- if (transaction->t_checkpoint_list == jh)
- transaction->t_checkpoint_list = NULL;
+ if (transaction->t_checkpoint_list == jh)
+ transaction->t_checkpoint_list = NULL;
+ }
+}
+
+/*
+ * Unlink a buffer from a transaction checkpoint(io) list.
+ *
+ * Called with j_list_lock held.
+ */
+static inline void __buffer_unlink(struct journal_head *jh)
+{
+ transaction_t *transaction = jh->b_cp_transaction;
+
+ __buffer_unlink_first(jh);
+ if (transaction->t_checkpoint_io_list == jh) {
+ transaction->t_checkpoint_io_list = jh->b_cpnext;
+ if (transaction->t_checkpoint_io_list == jh)
+ transaction->t_checkpoint_io_list = NULL;
+ }
+}
+
+/*
+ * Move a buffer from the checkpoint list to the checkpoint io list
+ *
+ * Called with j_list_lock held
+ */
+static inline void __buffer_relink_io(struct journal_head *jh)
+{
+ transaction_t *transaction = jh->b_cp_transaction;
+
+ __buffer_unlink_first(jh);
+
+ if (!transaction->t_checkpoint_io_list) {
+ jh->b_cpnext = jh->b_cpprev = jh;
+ } else {
+ jh->b_cpnext = transaction->t_checkpoint_io_list;
+ jh->b_cpprev = transaction->t_checkpoint_io_list->b_cpprev;
+ jh->b_cpprev->b_cpnext = jh;
+ jh->b_cpnext->b_cpprev = jh;
+ }
+ transaction->t_checkpoint_io_list = jh;
}
/*
* Try to release a checkpointed buffer from its transaction.
- * Returns 1 if we released it.
+ * Returns 1 if we released it and 2 if we also released the
+ * whole transaction.
+ *
* Requires j_list_lock
* Called under jbd_lock_bh_state(jh2bh(jh)), and drops it
*/
@@ -57,12 +95,11 @@ static int __try_to_free_cp_buf(struct journal_head *jh)
if (jh->b_jlist == BJ_None && !buffer_locked(bh) && !buffer_dirty(bh)) {
JBUFFER_TRACE(jh, "remove from checkpoint list");
- __journal_remove_checkpoint(jh);
+ ret = __journal_remove_checkpoint(jh) + 1;
jbd_unlock_bh_state(bh);
journal_remove_journal_head(bh);
BUFFER_TRACE(bh, "release");
__brelse(bh);
- ret = 1;
} else {
jbd_unlock_bh_state(bh);
}
@@ -117,83 +154,54 @@ static void jbd_sync_bh(journal_t *journal, struct buffer_head *bh)
}
/*
- * Clean up a transaction's checkpoint list.
- *
- * We wait for any pending IO to complete and make sure any clean
- * buffers are removed from the transaction.
- *
- * Return 1 if we performed any actions which might have destroyed the
- * checkpoint. (journal_remove_checkpoint() deletes the transaction when
- * the last checkpoint buffer is cleansed)
+ * Clean up transaction's list of buffers submitted for io.
+ * We wait for any pending IO to complete and remove any clean
+ * buffers. Note that we take the buffers in the opposite ordering
+ * from the one in which they were submitted for IO.
*
* Called with j_list_lock held.
*/
-static int __cleanup_transaction(journal_t *journal, transaction_t *transaction)
+static void __wait_cp_io(journal_t *journal, transaction_t *transaction)
{
- struct journal_head *jh, *next_jh, *last_jh;
+ struct journal_head *jh;
struct buffer_head *bh;
- int ret = 0;
-
- assert_spin_locked(&journal->j_list_lock);
- jh = transaction->t_checkpoint_list;
- if (!jh)
- return 0;
-
- last_jh = jh->b_cpprev;
- next_jh = jh;
- do {
- jh = next_jh;
+ tid_t this_tid;
+ int released = 0;
+
+ this_tid = transaction->t_tid;
+restart:
+ /* Did somebody clean up the transaction in the meanwhile? */
+ if (journal->j_checkpoint_transactions != transaction ||
+ transaction->t_tid != this_tid)
+ return;
+ while (!released && transaction->t_checkpoint_io_list) {
+ jh = transaction->t_checkpoint_io_list;
bh = jh2bh(jh);
+ if (!jbd_trylock_bh_state(bh)) {
+ jbd_sync_bh(journal, bh);
+ spin_lock(&journal->j_list_lock);
+ goto restart;
+ }
if (buffer_locked(bh)) {
atomic_inc(&bh->b_count);
spin_unlock(&journal->j_list_lock);
+ jbd_unlock_bh_state(bh);
wait_on_buffer(bh);
/* the journal_head may have gone by now */
BUFFER_TRACE(bh, "brelse");
__brelse(bh);
- goto out_return_1;
+ spin_lock(&journal->j_list_lock);
+ goto restart;
}
-
/*
- * This is foul
+ * Now in whatever state the buffer currently is, we know that
+ * it has been written out and so we can drop it from the list
*/
- if (!jbd_trylock_bh_state(bh)) {
- jbd_sync_bh(journal, bh);
- goto out_return_1;
- }
-
- if (jh->b_transaction != NULL) {
- transaction_t *t = jh->b_transaction;
- tid_t tid = t->t_tid;
-
- spin_unlock(&journal->j_list_lock);
- jbd_unlock_bh_state(bh);
- log_start_commit(journal, tid);
- log_wait_commit(journal, tid);
- goto out_return_1;
- }
-
- /*
- * AKPM: I think the buffer_jbddirty test is redundant - it
- * shouldn't have NULL b_transaction?
- */
- next_jh = jh->b_cpnext;
- if (!buffer_dirty(bh) && !buffer_jbddirty(bh)) {
- BUFFER_TRACE(bh, "remove from checkpoint");
- __journal_remove_checkpoint(jh);
- jbd_unlock_bh_state(bh);
- journal_remove_journal_head(bh);
- __brelse(bh);
- ret = 1;
- } else {
- jbd_unlock_bh_state(bh);
- }
- } while (jh != last_jh);
-
- return ret;
-out_return_1:
- spin_lock(&journal->j_list_lock);
- return 1;
+ released = __journal_remove_checkpoint(jh);
+ jbd_unlock_bh_state(bh);
+ journal_remove_journal_head(bh);
+ __brelse(bh);
+ }
}
#define NR_BATCH 64
@@ -203,9 +211,7 @@ __flush_batch(journal_t *journal, struct buffer_head **bhs, int *batch_count)
{
int i;
- spin_unlock(&journal->j_list_lock);
ll_rw_block(SWRITE, *batch_count, bhs);
- spin_lock(&journal->j_list_lock);
for (i = 0; i < *batch_count; i++) {
struct buffer_head *bh = bhs[i];
clear_buffer_jwrite(bh);
@@ -221,19 +227,43 @@ __flush_batch(journal_t *journal, struct buffer_head **bhs, int *batch_count)
* Return 1 if something happened which requires us to abort the current
* scan of the checkpoint list.
*
- * Called with j_list_lock held.
+ * Called with j_list_lock held and drops it if 1 is returned
* Called under jbd_lock_bh_state(jh2bh(jh)), and drops it
*/
-static int __flush_buffer(journal_t *journal, struct journal_head *jh,
- struct buffer_head **bhs, int *batch_count,
- int *drop_count)
+static int __process_buffer(journal_t *journal, struct journal_head *jh,
+ struct buffer_head **bhs, int *batch_count)
{
struct buffer_head *bh = jh2bh(jh);
int ret = 0;
- if (buffer_dirty(bh) && !buffer_locked(bh) && jh->b_jlist == BJ_None) {
- J_ASSERT_JH(jh, jh->b_transaction == NULL);
+ if (buffer_locked(bh)) {
+ atomic_inc(&bh->b_count);
+ spin_unlock(&journal->j_list_lock);
+ jbd_unlock_bh_state(bh);
+ wait_on_buffer(bh);
+ /* the journal_head may have gone by now */
+ BUFFER_TRACE(bh, "brelse");
+ __brelse(bh);
+ ret = 1;
+ } else if (jh->b_transaction != NULL) {
+ transaction_t *t = jh->b_transaction;
+ tid_t tid = t->t_tid;
+ spin_unlock(&journal->j_list_lock);
+ jbd_unlock_bh_state(bh);
+ log_start_commit(journal, tid);
+ log_wait_commit(journal, tid);
+ ret = 1;
+ } else if (!buffer_dirty(bh)) {
+ J_ASSERT_JH(jh, !buffer_jbddirty(bh));
+ BUFFER_TRACE(bh, "remove from checkpoint");
+ __journal_remove_checkpoint(jh);
+ spin_unlock(&journal->j_list_lock);
+ jbd_unlock_bh_state(bh);
+ journal_remove_journal_head(bh);
+ __brelse(bh);
+ ret = 1;
+ } else {
/*
* Important: we are about to write the buffer, and
* possibly block, while still holding the journal lock.
@@ -246,45 +276,30 @@ static int __flush_buffer(journal_t *journal, struct journal_head *jh,
J_ASSERT_BH(bh, !buffer_jwrite(bh));
set_buffer_jwrite(bh);
bhs[*batch_count] = bh;
+ __buffer_relink_io(jh);
jbd_unlock_bh_state(bh);
(*batch_count)++;
if (*batch_count == NR_BATCH) {
+ spin_unlock(&journal->j_list_lock);
__flush_batch(journal, bhs, batch_count);
ret = 1;
}
- } else {
- int last_buffer = 0;
- if (jh->b_cpnext == jh) {
- /* We may be about to drop the transaction. Tell the
- * caller that the lists have changed.
- */
- last_buffer = 1;
- }
- if (__try_to_free_cp_buf(jh)) {
- (*drop_count)++;
- ret = last_buffer;
- }
}
return ret;
}
/*
- * Perform an actual checkpoint. We don't write out only enough to
- * satisfy the current blocked requests: rather we submit a reasonably
- * sized chunk of the outstanding data to disk at once for
- * efficiency. __log_wait_for_space() will retry if we didn't free enough.
+ * Perform an actual checkpoint. We take the first transaction on the
+ * list of transactions to be checkpointed and send all its buffers
+ * to disk. We submit larger chunks of data at once.
*
- * However, we _do_ take into account the amount requested so that once
- * the IO has been queued, we can return as soon as enough of it has
- * completed to disk.
- *
* The journal should be locked before calling this function.
*/
int log_do_checkpoint(journal_t *journal)
{
+ transaction_t *transaction;
+ tid_t this_tid;
int result;
- int batch_count = 0;
- struct buffer_head *bhs[NR_BATCH];
jbd_debug(1, "Start checkpoint\n");
@@ -299,79 +314,68 @@ int log_do_checkpoint(journal_t *journal)
return result;
/*
- * OK, we need to start writing disk blocks. Try to free up a
- * quarter of the log in a single checkpoint if we can.
+ * OK, we need to start writing disk blocks. Take one transaction
+ * and write it.
*/
+ spin_lock(&journal->j_list_lock);
+ if (!journal->j_checkpoint_transactions)
+ goto out;
+ transaction = journal->j_checkpoint_transactions;
+ this_tid = transaction->t_tid;
+restart:
/*
- * AKPM: check this code. I had a feeling a while back that it
- * degenerates into a busy loop at unmount time.
+ * If someone cleaned up this transaction while we slept, we're
+ * done (maybe it's a new transaction, but it fell at the same
+ * address).
*/
- spin_lock(&journal->j_list_lock);
- while (journal->j_checkpoint_transactions) {
- transaction_t *transaction;
- struct journal_head *jh, *last_jh, *next_jh;
- int drop_count = 0;
- int cleanup_ret, retry = 0;
- tid_t this_tid;
-
- transaction = journal->j_checkpoint_transactions;
- this_tid = transaction->t_tid;
- jh = transaction->t_checkpoint_list;
- last_jh = jh->b_cpprev;
- next_jh = jh;
- do {
+ if (journal->j_checkpoint_transactions == transaction &&
+ transaction->t_tid == this_tid) {
+ int batch_count = 0;
+ struct buffer_head *bhs[NR_BATCH];
+ struct journal_head *jh;
+ int retry = 0;
+
+ while (!retry && transaction->t_checkpoint_list) {
struct buffer_head *bh;
- jh = next_jh;
- next_jh = jh->b_cpnext;
+ jh = transaction->t_checkpoint_list;
bh = jh2bh(jh);
if (!jbd_trylock_bh_state(bh)) {
jbd_sync_bh(journal, bh);
- spin_lock(&journal->j_list_lock);
retry = 1;
break;
}
- retry = __flush_buffer(journal, jh, bhs, &batch_count, &drop_count);
- if (cond_resched_lock(&journal->j_list_lock)) {
+ retry = __process_buffer(journal, jh, bhs,&batch_count);
+ if (!retry && lock_need_resched(&journal->j_list_lock)){
+ spin_unlock(&journal->j_list_lock);
retry = 1;
break;
}
- } while (jh != last_jh && !retry);
+ }
if (batch_count) {
+ if (!retry) {
+ spin_unlock(&journal->j_list_lock);
+ retry = 1;
+ }
__flush_batch(journal, bhs, &batch_count);
- retry = 1;
}
+ if (retry) {
+ spin_lock(&journal->j_list_lock);
+ goto restart;
+ }
/*
- * If someone cleaned up this transaction while we slept, we're
- * done
- */
- if (journal->j_checkpoint_transactions != transaction)
- break;
- if (retry)
- continue;
- /*
- * Maybe it's a new transaction, but it fell at the same
- * address
- */
- if (transaction->t_tid != this_tid)
- continue;
- /*
- * We have walked the whole transaction list without
- * finding anything to write to disk. We had better be
- * able to make some progress or we are in trouble.
+ * Now we have cleaned up the first transaction's checkpoint
+ * list. Let's clean up the second one
*/
- cleanup_ret = __cleanup_transaction(journal, transaction);
- J_ASSERT(drop_count != 0 || cleanup_ret != 0);
- if (journal->j_checkpoint_transactions != transaction)
- break;
+ __wait_cp_io(journal, transaction);
}
+out:
spin_unlock(&journal->j_list_lock);
result = cleanup_journal_tail(journal);
if (result < 0)
return result;
-
return 0;
}
@@ -456,52 +460,98 @@ int cleanup_journal_tail(journal_t *journal)
/* Checkpoint list management */
/*
+ * journal_clean_one_cp_list
+ *
+ * Find all the written-back checkpoint buffers in the given list and release them.
+ *
+ * Called with the journal locked.
+ * Called with j_list_lock held.
+ * Returns number of bufers reaped (for debug)
+ */
+
+static int journal_clean_one_cp_list(struct journal_head *jh, int *released)
+{
+ struct journal_head *last_jh;
+ struct journal_head *next_jh = jh;
+ int ret, freed = 0;
+
+ *released = 0;
+ if (!jh)
+ return 0;
+
+ last_jh = jh->b_cpprev;
+ do {
+ jh = next_jh;
+ next_jh = jh->b_cpnext;
+ /* Use trylock because of the ranking */
+ if (jbd_trylock_bh_state(jh2bh(jh))) {
+ ret = __try_to_free_cp_buf(jh);
+ if (ret) {
+ freed++;
+ if (ret == 2) {
+ *released = 1;
+ return freed;
+ }
+ }
+ }
+ /*
+ * This function only frees up some memory
+ * if possible so we dont have an obligation
+ * to finish processing. Bail out if preemption
+ * requested:
+ */
+ if (need_resched())
+ return freed;
+ } while (jh != last_jh);
+
+ return freed;
+}
+
+/*
* journal_clean_checkpoint_list
*
* Find all the written-back checkpoint buffers in the journal and release them.
*
* Called with the journal locked.
* Called with j_list_lock held.
- * Returns number of bufers reaped (for debug)
+ * Returns number of buffers reaped (for debug)
*/
int __journal_clean_checkpoint_list(journal_t *journal)
{
transaction_t *transaction, *last_transaction, *next_transaction;
int ret = 0;
+ int released;
transaction = journal->j_checkpoint_transactions;
- if (transaction == 0)
+ if (!transaction)
goto out;
last_transaction = transaction->t_cpprev;
next_transaction = transaction;
do {
- struct journal_head *jh;
-
transaction = next_transaction;
next_transaction = transaction->t_cpnext;
- jh = transaction->t_checkpoint_list;
- if (jh) {
- struct journal_head *last_jh = jh->b_cpprev;
- struct journal_head *next_jh = jh;
-
- do {
- jh = next_jh;
- next_jh = jh->b_cpnext;
- /* Use trylock because of the ranknig */
- if (jbd_trylock_bh_state(jh2bh(jh)))
- ret += __try_to_free_cp_buf(jh);
- /*
- * This function only frees up some memory
- * if possible so we dont have an obligation
- * to finish processing. Bail out if preemption
- * requested:
- */
- if (need_resched())
- goto out;
- } while (jh != last_jh);
- }
+ ret += journal_clean_one_cp_list(transaction->
+ t_checkpoint_list, &released);
+ /*
+ * This function only frees up some memory if possible so we
+ * dont have an obligation to finish processing. Bail out if
+ * preemption requested:
+ */
+ if (need_resched())
+ goto out;
+ if (released)
+ continue;
+ /*
+ * It is essential that we are as careful as in the case of
+ * t_checkpoint_list with removing the buffer from the list as
+ * we can possibly see not yet submitted buffers on io_list
+ */
+ ret += journal_clean_one_cp_list(transaction->
+ t_checkpoint_io_list, &released);
+ if (need_resched())
+ goto out;
} while (transaction != last_transaction);
out:
return ret;
@@ -516,18 +566,22 @@ out:
* buffer updates committed in that transaction have safely been stored
* elsewhere on disk. To achieve this, all of the buffers in a
* transaction need to be maintained on the transaction's checkpoint
- * list until they have been rewritten, at which point this function is
+ * lists until they have been rewritten, at which point this function is
* called to remove the buffer from the existing transaction's
- * checkpoint list.
+ * checkpoint lists.
+ *
+ * The function returns 1 if it frees the transaction, 0 otherwise.
*
* This function is called with the journal locked.
* This function is called with j_list_lock held.
+ * This function is called with jbd_lock_bh_state(jh2bh(jh))
*/
-void __journal_remove_checkpoint(struct journal_head *jh)
+int __journal_remove_checkpoint(struct journal_head *jh)
{
transaction_t *transaction;
journal_t *journal;
+ int ret = 0;
JBUFFER_TRACE(jh, "entry");
@@ -538,8 +592,10 @@ void __journal_remove_checkpoint(struct journal_head *jh)
journal = transaction->t_journal;
__buffer_unlink(jh);
+ jh->b_cp_transaction = NULL;
- if (transaction->t_checkpoint_list != NULL)
+ if (transaction->t_checkpoint_list != NULL ||
+ transaction->t_checkpoint_io_list != NULL)
goto out;
JBUFFER_TRACE(jh, "transaction has no more buffers");
@@ -565,8 +621,10 @@ void __journal_remove_checkpoint(struct journal_head *jh)
/* Just in case anybody was waiting for more transactions to be
checkpointed... */
wake_up(&journal->j_wait_logspace);
+ ret = 1;
out:
JBUFFER_TRACE(jh, "exit");
+ return ret;
}
/*
@@ -628,6 +686,7 @@ void __journal_drop_transaction(journal_t *journal, transaction_t *transaction)
J_ASSERT(transaction->t_shadow_list == NULL);
J_ASSERT(transaction->t_log_list == NULL);
J_ASSERT(transaction->t_checkpoint_list == NULL);
+ J_ASSERT(transaction->t_checkpoint_io_list == NULL);
J_ASSERT(transaction->t_updates == 0);
J_ASSERT(journal->j_committing_transaction != transaction);
J_ASSERT(journal->j_running_transaction != transaction);
diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c
index 002ad2bbc769..0971814c38b8 100644
--- a/fs/jbd/commit.c
+++ b/fs/jbd/commit.c
@@ -790,11 +790,22 @@ restart_loop:
jbd_unlock_bh_state(bh);
} else {
J_ASSERT_BH(bh, !buffer_dirty(bh));
- J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
- __journal_unfile_buffer(jh);
- jbd_unlock_bh_state(bh);
- journal_remove_journal_head(bh); /* needs a brelse */
- release_buffer_page(bh);
+ /* The buffer on BJ_Forget list and not jbddirty means
+ * it has been freed by this transaction and hence it
+ * could not have been reallocated until this
+ * transaction has committed. *BUT* it could be
+ * reallocated once we have written all the data to
+ * disk and before we process the buffer on BJ_Forget
+ * list. */
+ JBUFFER_TRACE(jh, "refile or unfile freed buffer");
+ __journal_refile_buffer(jh);
+ if (!jh->b_transaction) {
+ jbd_unlock_bh_state(bh);
+ /* needs a brelse */
+ journal_remove_journal_head(bh);
+ release_buffer_page(bh);
+ } else
+ jbd_unlock_bh_state(bh);
}
cond_resched_lock(&journal->j_list_lock);
}
diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c
index c609f5034fcd..508b2ea91f43 100644
--- a/fs/jbd/transaction.c
+++ b/fs/jbd/transaction.c
@@ -227,7 +227,8 @@ repeat_locked:
spin_unlock(&transaction->t_handle_lock);
spin_unlock(&journal->j_state_lock);
out:
- kfree(new_transaction);
+ if (unlikely(new_transaction)) /* It's usually NULL */
+ kfree(new_transaction);
return ret;
}
@@ -724,7 +725,8 @@ done:
journal_cancel_revoke(handle, jh);
out:
- kfree(frozen_buffer);
+ if (unlikely(frozen_buffer)) /* It's usually NULL */
+ kfree(frozen_buffer);
JBUFFER_TRACE(jh, "exit");
return error;
@@ -903,7 +905,8 @@ repeat:
jbd_unlock_bh_state(bh);
out:
journal_put_journal_head(jh);
- kfree(committed_data);
+ if (unlikely(committed_data))
+ kfree(committed_data);
return err;
}
@@ -2038,7 +2041,8 @@ void __journal_refile_buffer(struct journal_head *jh)
__journal_temp_unlink_buffer(jh);
jh->b_transaction = jh->b_next_transaction;
jh->b_next_transaction = NULL;
- __journal_file_buffer(jh, jh->b_transaction, BJ_Metadata);
+ __journal_file_buffer(jh, jh->b_transaction,
+ was_dirty ? BJ_Metadata : BJ_Reserved);
J_ASSERT_JH(jh, jh->b_transaction->t_state == T_RUNNING);
if (was_dirty)
diff --git a/fs/jffs/inode-v23.c b/fs/jffs/inode-v23.c
index 020cc097c539..9e46ea6da752 100644
--- a/fs/jffs/inode-v23.c
+++ b/fs/jffs/inode-v23.c
@@ -377,9 +377,9 @@ jffs_new_inode(const struct inode * dir, struct jffs_raw_inode *raw_inode,
/* Get statistics of the file system. */
static int
-jffs_statfs(struct super_block *sb, struct kstatfs *buf)
+jffs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
- struct jffs_control *c = (struct jffs_control *) sb->s_fs_info;
+ struct jffs_control *c = (struct jffs_control *) dentry->d_sb->s_fs_info;
struct jffs_fmcontrol *fmc;
lock_kernel();
@@ -1785,10 +1785,11 @@ static struct super_operations jffs_ops =
.remount_fs = jffs_remount,
};
-static struct super_block *jffs_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int jffs_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_bdev(fs_type, flags, dev_name, data, jffs_fill_super);
+ return get_sb_bdev(fs_type, flags, dev_name, data, jffs_fill_super,
+ mnt);
}
static struct file_system_type jffs_fs_type = {
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
index 7b6c24b14f85..2900ec3ec3af 100644
--- a/fs/jffs2/fs.c
+++ b/fs/jffs2/fs.c
@@ -192,9 +192,9 @@ int jffs2_setattr(struct dentry *dentry, struct iattr *iattr)
return rc;
}
-int jffs2_statfs(struct super_block *sb, struct kstatfs *buf)
+int jffs2_statfs(struct dentry *dentry, struct kstatfs *buf)
{
- struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
+ struct jffs2_sb_info *c = JFFS2_SB_INFO(dentry->d_sb);
unsigned long avail;
buf->f_type = JFFS2_SUPER_MAGIC;
diff --git a/fs/jffs2/os-linux.h b/fs/jffs2/os-linux.h
index cd4021bcb944..6b5223565405 100644
--- a/fs/jffs2/os-linux.h
+++ b/fs/jffs2/os-linux.h
@@ -175,7 +175,7 @@ void jffs2_clear_inode (struct inode *);
void jffs2_dirty_inode(struct inode *inode);
struct inode *jffs2_new_inode (struct inode *dir_i, int mode,
struct jffs2_raw_inode *ri);
-int jffs2_statfs (struct super_block *, struct kstatfs *);
+int jffs2_statfs (struct dentry *, struct kstatfs *);
void jffs2_write_super (struct super_block *);
int jffs2_remount_fs (struct super_block *, int *, char *);
int jffs2_do_fill_super(struct super_block *sb, void *data, int silent);
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
index 9d0521451f59..2378a662c256 100644
--- a/fs/jffs2/super.c
+++ b/fs/jffs2/super.c
@@ -111,9 +111,10 @@ static int jffs2_sb_set(struct super_block *sb, void *data)
return 0;
}
-static struct super_block *jffs2_get_sb_mtd(struct file_system_type *fs_type,
- int flags, const char *dev_name,
- void *data, struct mtd_info *mtd)
+static int jffs2_get_sb_mtd(struct file_system_type *fs_type,
+ int flags, const char *dev_name,
+ void *data, struct mtd_info *mtd,
+ struct vfsmount *mnt)
{
struct super_block *sb;
struct jffs2_sb_info *c;
@@ -121,19 +122,20 @@ static struct super_block *jffs2_get_sb_mtd(struct file_system_type *fs_type,
c = kmalloc(sizeof(*c), GFP_KERNEL);
if (!c)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
memset(c, 0, sizeof(*c));
c->mtd = mtd;
sb = sget(fs_type, jffs2_sb_compare, jffs2_sb_set, c);
if (IS_ERR(sb))
- goto out_put;
+ goto out_error;
if (sb->s_root) {
/* New mountpoint for JFFS2 which is already mounted */
D1(printk(KERN_DEBUG "jffs2_get_sb_mtd(): Device %d (\"%s\") is already mounted\n",
mtd->index, mtd->name));
+ ret = simple_set_mnt(mnt, sb);
goto out_put;
}
@@ -161,44 +163,47 @@ static struct super_block *jffs2_get_sb_mtd(struct file_system_type *fs_type,
/* Failure case... */
up_write(&sb->s_umount);
deactivate_super(sb);
- return ERR_PTR(ret);
+ return ret;
}
sb->s_flags |= MS_ACTIVE;
- return sb;
+ return simple_set_mnt(mnt, sb);
+out_error:
+ ret = PTR_ERR(sb);
out_put:
kfree(c);
put_mtd_device(mtd);
- return sb;
+ return ret;
}
-static struct super_block *jffs2_get_sb_mtdnr(struct file_system_type *fs_type,
- int flags, const char *dev_name,
- void *data, int mtdnr)
+static int jffs2_get_sb_mtdnr(struct file_system_type *fs_type,
+ int flags, const char *dev_name,
+ void *data, int mtdnr,
+ struct vfsmount *mnt)
{
struct mtd_info *mtd;
mtd = get_mtd_device(NULL, mtdnr);
if (!mtd) {
D1(printk(KERN_DEBUG "jffs2: MTD device #%u doesn't appear to exist\n", mtdnr));
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
- return jffs2_get_sb_mtd(fs_type, flags, dev_name, data, mtd);
+ return jffs2_get_sb_mtd(fs_type, flags, dev_name, data, mtd, mnt);
}
-static struct super_block *jffs2_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name,
- void *data)
+static int jffs2_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name,
+ void *data, struct vfsmount *mnt)
{
int err;
struct nameidata nd;
int mtdnr;
if (!dev_name)
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
D1(printk(KERN_DEBUG "jffs2_get_sb(): dev_name \"%s\"\n", dev_name));
@@ -220,7 +225,7 @@ static struct super_block *jffs2_get_sb(struct file_system_type *fs_type,
mtd = get_mtd_device(NULL, mtdnr);
if (mtd) {
if (!strcmp(mtd->name, dev_name+4))
- return jffs2_get_sb_mtd(fs_type, flags, dev_name, data, mtd);
+ return jffs2_get_sb_mtd(fs_type, flags, dev_name, data, mtd, mnt);
put_mtd_device(mtd);
}
}
@@ -233,7 +238,7 @@ static struct super_block *jffs2_get_sb(struct file_system_type *fs_type,
if (!*endptr) {
/* It was a valid number */
D1(printk(KERN_DEBUG "jffs2_get_sb(): mtd%%d, mtdnr %d\n", mtdnr));
- return jffs2_get_sb_mtdnr(fs_type, flags, dev_name, data, mtdnr);
+ return jffs2_get_sb_mtdnr(fs_type, flags, dev_name, data, mtdnr, mnt);
}
}
}
@@ -247,7 +252,7 @@ static struct super_block *jffs2_get_sb(struct file_system_type *fs_type,
err, nd.dentry->d_inode));
if (err)
- return ERR_PTR(err);
+ return err;
err = -EINVAL;
@@ -269,11 +274,11 @@ static struct super_block *jffs2_get_sb(struct file_system_type *fs_type,
mtdnr = iminor(nd.dentry->d_inode);
path_release(&nd);
- return jffs2_get_sb_mtdnr(fs_type, flags, dev_name, data, mtdnr);
+ return jffs2_get_sb_mtdnr(fs_type, flags, dev_name, data, mtdnr, mnt);
out:
path_release(&nd);
- return ERR_PTR(err);
+ return err;
}
static void jffs2_put_super (struct super_block *sb)
diff --git a/fs/jfs/jfs_metapage.c b/fs/jfs/jfs_metapage.c
index 2b220dd6b4e7..7f6e88039700 100644
--- a/fs/jfs/jfs_metapage.c
+++ b/fs/jfs/jfs_metapage.c
@@ -632,10 +632,9 @@ struct metapage *__get_metapage(struct inode *inode, unsigned long lblock,
}
SetPageUptodate(page);
} else {
- page = read_cache_page(mapping, page_index,
- (filler_t *)mapping->a_ops->readpage, NULL);
+ page = read_mapping_page(mapping, page_index, NULL);
if (IS_ERR(page) || !PageUptodate(page)) {
- jfs_err("read_cache_page failed!");
+ jfs_err("read_mapping_page failed!");
return NULL;
}
lock_page(page);
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
index db6f41d6dd60..73d2aba084c6 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -139,9 +139,9 @@ static void jfs_destroy_inode(struct inode *inode)
kmem_cache_free(jfs_inode_cachep, ji);
}
-static int jfs_statfs(struct super_block *sb, struct kstatfs *buf)
+static int jfs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
- struct jfs_sb_info *sbi = JFS_SBI(sb);
+ struct jfs_sb_info *sbi = JFS_SBI(dentry->d_sb);
s64 maxinodes;
struct inomap *imap = JFS_IP(sbi->ipimap)->i_imap;
@@ -565,10 +565,11 @@ static void jfs_unlockfs(struct super_block *sb)
}
}
-static struct super_block *jfs_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int jfs_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_bdev(fs_type, flags, dev_name, data, jfs_fill_super);
+ return get_sb_bdev(fs_type, flags, dev_name, data, jfs_fill_super,
+ mnt);
}
static int jfs_sync_fs(struct super_block *sb, int wait)
diff --git a/fs/libfs.c b/fs/libfs.c
index 7145ba7a48d0..1b1156381787 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -20,9 +20,9 @@ int simple_getattr(struct vfsmount *mnt, struct dentry *dentry,
return 0;
}
-int simple_statfs(struct super_block *sb, struct kstatfs *buf)
+int simple_statfs(struct dentry *dentry, struct kstatfs *buf)
{
- buf->f_type = sb->s_magic;
+ buf->f_type = dentry->d_sb->s_magic;
buf->f_bsize = PAGE_CACHE_SIZE;
buf->f_namelen = NAME_MAX;
return 0;
@@ -196,9 +196,9 @@ struct inode_operations simple_dir_inode_operations = {
* Common helper for pseudo-filesystems (sockfs, pipefs, bdev - stuff that
* will never be mountable)
*/
-struct super_block *
-get_sb_pseudo(struct file_system_type *fs_type, char *name,
- struct super_operations *ops, unsigned long magic)
+int get_sb_pseudo(struct file_system_type *fs_type, char *name,
+ struct super_operations *ops, unsigned long magic,
+ struct vfsmount *mnt)
{
struct super_block *s = sget(fs_type, NULL, set_anon_super, NULL);
static struct super_operations default_ops = {.statfs = simple_statfs};
@@ -207,7 +207,7 @@ get_sb_pseudo(struct file_system_type *fs_type, char *name,
struct qstr d_name = {.name = name, .len = strlen(name)};
if (IS_ERR(s))
- return s;
+ return PTR_ERR(s);
s->s_flags = MS_NOUSER;
s->s_maxbytes = ~0ULL;
@@ -232,12 +232,12 @@ get_sb_pseudo(struct file_system_type *fs_type, char *name,
d_instantiate(dentry, root);
s->s_root = dentry;
s->s_flags |= MS_ACTIVE;
- return s;
+ return simple_set_mnt(mnt, s);
Enomem:
up_write(&s->s_umount);
deactivate_super(s);
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
}
int simple_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
diff --git a/fs/locks.c b/fs/locks.c
index 69435c68c1ed..1ad29c9b6252 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -703,7 +703,7 @@ EXPORT_SYMBOL(posix_test_lock);
* from a broken NFS client. But broken NFS clients have a lot more to
* worry about than proper deadlock detection anyway... --okir
*/
-int posix_locks_deadlock(struct file_lock *caller_fl,
+static int posix_locks_deadlock(struct file_lock *caller_fl,
struct file_lock *block_fl)
{
struct list_head *tmp;
@@ -722,8 +722,6 @@ next_task:
return 0;
}
-EXPORT_SYMBOL(posix_locks_deadlock);
-
/* Try to create a FLOCK lock on filp. We always insert new FLOCK locks
* at the head of the list, but that's secret knowledge known only to
* flock_lock_file and posix_lock_file.
@@ -794,7 +792,8 @@ out:
static int __posix_lock_file_conf(struct inode *inode, struct file_lock *request, struct file_lock *conflock)
{
struct file_lock *fl;
- struct file_lock *new_fl, *new_fl2;
+ struct file_lock *new_fl = NULL;
+ struct file_lock *new_fl2 = NULL;
struct file_lock *left = NULL;
struct file_lock *right = NULL;
struct file_lock **before;
@@ -803,9 +802,15 @@ static int __posix_lock_file_conf(struct inode *inode, struct file_lock *request
/*
* We may need two file_lock structures for this operation,
* so we get them in advance to avoid races.
+ *
+ * In some cases we can be sure, that no new locks will be needed
*/
- new_fl = locks_alloc_lock();
- new_fl2 = locks_alloc_lock();
+ if (!(request->fl_flags & FL_ACCESS) &&
+ (request->fl_type != F_UNLCK ||
+ request->fl_start != 0 || request->fl_end != OFFSET_MAX)) {
+ new_fl = locks_alloc_lock();
+ new_fl2 = locks_alloc_lock();
+ }
lock_kernel();
if (request->fl_type != F_UNLCK) {
@@ -834,14 +839,7 @@ static int __posix_lock_file_conf(struct inode *inode, struct file_lock *request
if (request->fl_flags & FL_ACCESS)
goto out;
- error = -ENOLCK; /* "no luck" */
- if (!(new_fl && new_fl2))
- goto out;
-
/*
- * We've allocated the new locks in advance, so there are no
- * errors possible (and no blocking operations) from here on.
- *
* Find the first old lock with the same owner as the new lock.
*/
@@ -938,10 +936,25 @@ static int __posix_lock_file_conf(struct inode *inode, struct file_lock *request
before = &fl->fl_next;
}
+ /*
+ * The above code only modifies existing locks in case of
+ * merging or replacing. If new lock(s) need to be inserted
+ * all modifications are done bellow this, so it's safe yet to
+ * bail out.
+ */
+ error = -ENOLCK; /* "no luck" */
+ if (right && left == right && !new_fl2)
+ goto out;
+
error = 0;
if (!added) {
if (request->fl_type == F_UNLCK)
goto out;
+
+ if (!new_fl) {
+ error = -ENOLCK;
+ goto out;
+ }
locks_copy_lock(new_fl, request);
locks_insert_lock(before, new_fl);
new_fl = NULL;
@@ -1881,19 +1894,18 @@ out:
*/
void locks_remove_posix(struct file *filp, fl_owner_t owner)
{
- struct file_lock lock, **before;
+ struct file_lock lock;
/*
* If there are no locks held on this file, we don't need to call
* posix_lock_file(). Another process could be setting a lock on this
* file at the same time, but we wouldn't remove that lock anyway.
*/
- before = &filp->f_dentry->d_inode->i_flock;
- if (*before == NULL)
+ if (!filp->f_dentry->d_inode->i_flock)
return;
lock.fl_type = F_UNLCK;
- lock.fl_flags = FL_POSIX;
+ lock.fl_flags = FL_POSIX | FL_CLOSE;
lock.fl_start = 0;
lock.fl_end = OFFSET_MAX;
lock.fl_owner = owner;
@@ -1902,25 +1914,11 @@ void locks_remove_posix(struct file *filp, fl_owner_t owner)
lock.fl_ops = NULL;
lock.fl_lmops = NULL;
- if (filp->f_op && filp->f_op->lock != NULL) {
+ if (filp->f_op && filp->f_op->lock != NULL)
filp->f_op->lock(filp, F_SETLK, &lock);
- goto out;
- }
+ else
+ posix_lock_file(filp, &lock);
- /* Can't use posix_lock_file here; we need to remove it no matter
- * which pid we have.
- */
- lock_kernel();
- while (*before != NULL) {
- struct file_lock *fl = *before;
- if (IS_POSIX(fl) && posix_same_owner(fl, &lock)) {
- locks_delete_lock(before);
- continue;
- }
- before = &fl->fl_next;
- }
- unlock_kernel();
-out:
if (lock.fl_ops && lock.fl_ops->fl_release_private)
lock.fl_ops->fl_release_private(&lock);
}
diff --git a/fs/minix/dir.c b/fs/minix/dir.c
index 69224d1fe043..2b0a389d1987 100644
--- a/fs/minix/dir.c
+++ b/fs/minix/dir.c
@@ -60,8 +60,7 @@ static int dir_commit_chunk(struct page *page, unsigned from, unsigned to)
static struct page * dir_get_page(struct inode *dir, unsigned long n)
{
struct address_space *mapping = dir->i_mapping;
- struct page *page = read_cache_page(mapping, n,
- (filler_t*)mapping->a_ops->readpage, NULL);
+ struct page *page = read_mapping_page(mapping, n, NULL);
if (!IS_ERR(page)) {
wait_on_page_locked(page);
kmap(page);
diff --git a/fs/minix/inode.c b/fs/minix/inode.c
index 2dcccf1d1b7f..a6fb509b7341 100644
--- a/fs/minix/inode.c
+++ b/fs/minix/inode.c
@@ -19,7 +19,7 @@
static void minix_read_inode(struct inode * inode);
static int minix_write_inode(struct inode * inode, int wait);
-static int minix_statfs(struct super_block *sb, struct kstatfs *buf);
+static int minix_statfs(struct dentry *dentry, struct kstatfs *buf);
static int minix_remount (struct super_block * sb, int * flags, char * data);
static void minix_delete_inode(struct inode *inode)
@@ -296,11 +296,11 @@ out_bad_sb:
return -EINVAL;
}
-static int minix_statfs(struct super_block *sb, struct kstatfs *buf)
+static int minix_statfs(struct dentry *dentry, struct kstatfs *buf)
{
- struct minix_sb_info *sbi = minix_sb(sb);
- buf->f_type = sb->s_magic;
- buf->f_bsize = sb->s_blocksize;
+ struct minix_sb_info *sbi = minix_sb(dentry->d_sb);
+ buf->f_type = dentry->d_sb->s_magic;
+ buf->f_bsize = dentry->d_sb->s_blocksize;
buf->f_blocks = (sbi->s_nzones - sbi->s_firstdatazone) << sbi->s_log_zone_size;
buf->f_bfree = minix_count_free_blocks(sbi);
buf->f_bavail = buf->f_bfree;
@@ -559,10 +559,11 @@ void minix_truncate(struct inode * inode)
V2_minix_truncate(inode);
}
-static struct super_block *minix_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int minix_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_bdev(fs_type, flags, dev_name, data, minix_fill_super);
+ return get_sb_bdev(fs_type, flags, dev_name, data, minix_fill_super,
+ mnt);
}
static struct file_system_type minix_fs_type = {
diff --git a/fs/mpage.c b/fs/mpage.c
index 9bf2eb30e6f4..1e4598247d0b 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -707,9 +707,9 @@ mpage_writepages(struct address_space *mapping,
struct pagevec pvec;
int nr_pages;
pgoff_t index;
- pgoff_t end = -1; /* Inclusive */
+ pgoff_t end; /* Inclusive */
int scanned = 0;
- int is_range = 0;
+ int range_whole = 0;
if (wbc->nonblocking && bdi_write_congested(bdi)) {
wbc->encountered_congestion = 1;
@@ -721,16 +721,14 @@ mpage_writepages(struct address_space *mapping,
writepage = mapping->a_ops->writepage;
pagevec_init(&pvec, 0);
- if (wbc->sync_mode == WB_SYNC_NONE) {
+ if (wbc->range_cyclic) {
index = mapping->writeback_index; /* Start from prev offset */
+ end = -1;
} else {
- index = 0; /* whole-file sweep */
- scanned = 1;
- }
- if (wbc->start || wbc->end) {
- index = wbc->start >> PAGE_CACHE_SHIFT;
- end = wbc->end >> PAGE_CACHE_SHIFT;
- is_range = 1;
+ index = wbc->range_start >> PAGE_CACHE_SHIFT;
+ end = wbc->range_end >> PAGE_CACHE_SHIFT;
+ if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
+ range_whole = 1;
scanned = 1;
}
retry:
@@ -759,7 +757,7 @@ retry:
continue;
}
- if (unlikely(is_range) && page->index > end) {
+ if (!wbc->range_cyclic && page->index > end) {
done = 1;
unlock_page(page);
continue;
@@ -810,7 +808,7 @@ retry:
index = 0;
goto retry;
}
- if (!is_range)
+ if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
mapping->writeback_index = index;
if (bio)
mpage_bio_submit(WRITE, bio);
diff --git a/fs/msdos/namei.c b/fs/msdos/namei.c
index 5b76ccd19e3f..9e44158a7540 100644
--- a/fs/msdos/namei.c
+++ b/fs/msdos/namei.c
@@ -661,11 +661,12 @@ static int msdos_fill_super(struct super_block *sb, void *data, int silent)
return 0;
}
-static struct super_block *msdos_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name,
- void *data)
+static int msdos_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name,
+ void *data, struct vfsmount *mnt)
{
- return get_sb_bdev(fs_type, flags, dev_name, data, msdos_fill_super);
+ return get_sb_bdev(fs_type, flags, dev_name, data, msdos_fill_super,
+ mnt);
}
static struct file_system_type msdos_fs_type = {
diff --git a/fs/namei.c b/fs/namei.c
index 184fe4acf824..bb4a3e40e432 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -2577,8 +2577,7 @@ static char *page_getlink(struct dentry * dentry, struct page **ppage)
{
struct page * page;
struct address_space *mapping = dentry->d_inode->i_mapping;
- page = read_cache_page(mapping, 0, (filler_t *)mapping->a_ops->readpage,
- NULL);
+ page = read_mapping_page(mapping, 0, NULL);
if (IS_ERR(page))
goto sync_fail;
wait_on_page_locked(page);
diff --git a/fs/namespace.c b/fs/namespace.c
index bf478addb852..c13072a5f1ee 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -86,6 +86,15 @@ struct vfsmount *alloc_vfsmnt(const char *name)
return mnt;
}
+int simple_set_mnt(struct vfsmount *mnt, struct super_block *sb)
+{
+ mnt->mnt_sb = sb;
+ mnt->mnt_root = dget(sb->s_root);
+ return 0;
+}
+
+EXPORT_SYMBOL(simple_set_mnt);
+
void free_vfsmnt(struct vfsmount *mnt)
{
kfree(mnt->mnt_devname);
diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
index a1f3e972c6ef..90d2ea28f333 100644
--- a/fs/ncpfs/inode.c
+++ b/fs/ncpfs/inode.c
@@ -39,7 +39,7 @@
static void ncp_delete_inode(struct inode *);
static void ncp_put_super(struct super_block *);
-static int ncp_statfs(struct super_block *, struct kstatfs *);
+static int ncp_statfs(struct dentry *, struct kstatfs *);
static kmem_cache_t * ncp_inode_cachep;
@@ -724,13 +724,14 @@ static void ncp_put_super(struct super_block *sb)
kfree(server);
}
-static int ncp_statfs(struct super_block *sb, struct kstatfs *buf)
+static int ncp_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct dentry* d;
struct inode* i;
struct ncp_inode_info* ni;
struct ncp_server* s;
struct ncp_volume_info vi;
+ struct super_block *sb = dentry->d_sb;
int err;
__u8 dh;
@@ -957,10 +958,10 @@ out:
return result;
}
-static struct super_block *ncp_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int ncp_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_nodev(fs_type, flags, data, ncp_fill_super);
+ return get_sb_nodev(fs_type, flags, data, ncp_fill_super, mnt);
}
static struct file_system_type ncp_fs_type = {
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index fade02c15e6e..fa05c027ea11 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -43,7 +43,7 @@ static int nfs_file_mmap(struct file *, struct vm_area_struct *);
static ssize_t nfs_file_sendfile(struct file *, loff_t *, size_t, read_actor_t, void *);
static ssize_t nfs_file_read(struct kiocb *, char __user *, size_t, loff_t);
static ssize_t nfs_file_write(struct kiocb *, const char __user *, size_t, loff_t);
-static int nfs_file_flush(struct file *);
+static int nfs_file_flush(struct file *, fl_owner_t id);
static int nfs_fsync(struct file *, struct dentry *dentry, int datasync);
static int nfs_check_flags(int flags);
static int nfs_lock(struct file *filp, int cmd, struct file_lock *fl);
@@ -188,7 +188,7 @@ static loff_t nfs_file_llseek(struct file *filp, loff_t offset, int origin)
*
*/
static int
-nfs_file_flush(struct file *file)
+nfs_file_flush(struct file *file, fl_owner_t id)
{
struct nfs_open_context *ctx = (struct nfs_open_context *)file->private_data;
struct inode *inode = file->f_dentry->d_inode;
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index d0b991a92327..937fbfc381bb 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -65,7 +65,7 @@ static int nfs_write_inode(struct inode *,int);
static void nfs_delete_inode(struct inode *);
static void nfs_clear_inode(struct inode *);
static void nfs_umount_begin(struct super_block *);
-static int nfs_statfs(struct super_block *, struct kstatfs *);
+static int nfs_statfs(struct dentry *, struct kstatfs *);
static int nfs_show_options(struct seq_file *, struct vfsmount *);
static int nfs_show_stats(struct seq_file *, struct vfsmount *);
static void nfs_zap_acl_cache(struct inode *);
@@ -534,8 +534,9 @@ nfs_fill_super(struct super_block *sb, struct nfs_mount_data *data, int silent)
}
static int
-nfs_statfs(struct super_block *sb, struct kstatfs *buf)
+nfs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
+ struct super_block *sb = dentry->d_sb;
struct nfs_server *server = NFS_SB(sb);
unsigned char blockbits;
unsigned long blockres;
@@ -1690,8 +1691,8 @@ static int nfs_compare_super(struct super_block *sb, void *data)
return !nfs_compare_fh(&old->fh, &server->fh);
}
-static struct super_block *nfs_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *raw_data)
+static int nfs_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *raw_data, struct vfsmount *mnt)
{
int error;
struct nfs_server *server = NULL;
@@ -1699,14 +1700,14 @@ static struct super_block *nfs_get_sb(struct file_system_type *fs_type,
struct nfs_fh *root;
struct nfs_mount_data *data = raw_data;
- s = ERR_PTR(-EINVAL);
+ error = -EINVAL;
if (data == NULL) {
dprintk("%s: missing data argument\n", __FUNCTION__);
- goto out_err;
+ goto out_err_noserver;
}
if (data->version <= 0 || data->version > NFS_MOUNT_VERSION) {
dprintk("%s: bad mount version\n", __FUNCTION__);
- goto out_err;
+ goto out_err_noserver;
}
switch (data->version) {
case 1:
@@ -1718,7 +1719,7 @@ static struct super_block *nfs_get_sb(struct file_system_type *fs_type,
dprintk("%s: mount structure version %d does not support NFSv3\n",
__FUNCTION__,
data->version);
- goto out_err;
+ goto out_err_noserver;
}
data->root.size = NFS2_FHSIZE;
memcpy(data->root.data, data->old_root.data, NFS2_FHSIZE);
@@ -1727,24 +1728,24 @@ static struct super_block *nfs_get_sb(struct file_system_type *fs_type,
dprintk("%s: mount structure version %d does not support strong security\n",
__FUNCTION__,
data->version);
- goto out_err;
+ goto out_err_noserver;
}
case 5:
memset(data->context, 0, sizeof(data->context));
}
#ifndef CONFIG_NFS_V3
/* If NFSv3 is not compiled in, return -EPROTONOSUPPORT */
- s = ERR_PTR(-EPROTONOSUPPORT);
+ error = -EPROTONOSUPPORT;
if (data->flags & NFS_MOUNT_VER3) {
dprintk("%s: NFSv3 not compiled into kernel\n", __FUNCTION__);
- goto out_err;
+ goto out_err_noserver;
}
#endif /* CONFIG_NFS_V3 */
- s = ERR_PTR(-ENOMEM);
+ error = -ENOMEM;
server = kzalloc(sizeof(struct nfs_server), GFP_KERNEL);
if (!server)
- goto out_err;
+ goto out_err_noserver;
/* Zero out the NFS state stuff */
init_nfsv4_state(server);
server->client = server->client_sys = server->client_acl = ERR_PTR(-EINVAL);
@@ -1754,7 +1755,7 @@ static struct super_block *nfs_get_sb(struct file_system_type *fs_type,
root->size = data->root.size;
else
root->size = NFS2_FHSIZE;
- s = ERR_PTR(-EINVAL);
+ error = -EINVAL;
if (root->size > sizeof(root->data)) {
dprintk("%s: invalid root filehandle\n", __FUNCTION__);
goto out_err;
@@ -1770,15 +1771,20 @@ static struct super_block *nfs_get_sb(struct file_system_type *fs_type,
}
/* Fire up rpciod if not yet running */
- s = ERR_PTR(rpciod_up());
- if (IS_ERR(s)) {
- dprintk("%s: couldn't start rpciod! Error = %ld\n",
- __FUNCTION__, PTR_ERR(s));
+ error = rpciod_up();
+ if (error < 0) {
+ dprintk("%s: couldn't start rpciod! Error = %d\n",
+ __FUNCTION__, error);
goto out_err;
}
s = sget(fs_type, nfs_compare_super, nfs_set_super, server);
- if (IS_ERR(s) || s->s_root)
+ if (IS_ERR(s)) {
+ error = PTR_ERR(s);
+ goto out_err_rpciod;
+ }
+
+ if (s->s_root)
goto out_rpciod_down;
s->s_flags = flags;
@@ -1787,15 +1793,22 @@ static struct super_block *nfs_get_sb(struct file_system_type *fs_type,
if (error) {
up_write(&s->s_umount);
deactivate_super(s);
- return ERR_PTR(error);
+ return error;
}
s->s_flags |= MS_ACTIVE;
- return s;
+ return simple_set_mnt(mnt, s);
+
out_rpciod_down:
rpciod_down();
+ kfree(server);
+ return simple_set_mnt(mnt, s);
+
+out_err_rpciod:
+ rpciod_down();
out_err:
kfree(server);
- return s;
+out_err_noserver:
+ return error;
}
static void nfs_kill_super(struct super_block *s)
@@ -2032,8 +2045,8 @@ nfs_copy_user_string(char *dst, struct nfs_string *src, int maxlen)
return dst;
}
-static struct super_block *nfs4_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *raw_data)
+static int nfs4_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *raw_data, struct vfsmount *mnt)
{
int error;
struct nfs_server *server;
@@ -2043,16 +2056,16 @@ static struct super_block *nfs4_get_sb(struct file_system_type *fs_type,
if (data == NULL) {
dprintk("%s: missing data argument\n", __FUNCTION__);
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
if (data->version <= 0 || data->version > NFS4_MOUNT_VERSION) {
dprintk("%s: bad mount version\n", __FUNCTION__);
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
server = kzalloc(sizeof(struct nfs_server), GFP_KERNEL);
if (!server)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
/* Zero out the NFS state stuff */
init_nfsv4_state(server);
server->client = server->client_sys = server->client_acl = ERR_PTR(-EINVAL);
@@ -2074,33 +2087,41 @@ static struct super_block *nfs4_get_sb(struct file_system_type *fs_type,
/* We now require that the mount process passes the remote address */
if (data->host_addrlen != sizeof(server->addr)) {
- s = ERR_PTR(-EINVAL);
+ error = -EINVAL;
goto out_free;
}
if (copy_from_user(&server->addr, data->host_addr, sizeof(server->addr))) {
- s = ERR_PTR(-EFAULT);
+ error = -EFAULT;
goto out_free;
}
if (server->addr.sin_family != AF_INET ||
server->addr.sin_addr.s_addr == INADDR_ANY) {
dprintk("%s: mount program didn't pass remote IP address!\n",
__FUNCTION__);
- s = ERR_PTR(-EINVAL);
+ error = -EINVAL;
goto out_free;
}
/* Fire up rpciod if not yet running */
- s = ERR_PTR(rpciod_up());
- if (IS_ERR(s)) {
- dprintk("%s: couldn't start rpciod! Error = %ld\n",
- __FUNCTION__, PTR_ERR(s));
+ error = rpciod_up();
+ if (error < 0) {
+ dprintk("%s: couldn't start rpciod! Error = %d\n",
+ __FUNCTION__, error);
goto out_free;
}
s = sget(fs_type, nfs4_compare_super, nfs_set_super, server);
-
- if (IS_ERR(s) || s->s_root)
+ if (IS_ERR(s)) {
+ error = PTR_ERR(s);
goto out_free;
+ }
+
+ if (s->s_root) {
+ kfree(server->mnt_path);
+ kfree(server->hostname);
+ kfree(server);
+ return simple_set_mnt(mnt, s);
+ }
s->s_flags = flags;
@@ -2108,17 +2129,17 @@ static struct super_block *nfs4_get_sb(struct file_system_type *fs_type,
if (error) {
up_write(&s->s_umount);
deactivate_super(s);
- return ERR_PTR(error);
+ return error;
}
s->s_flags |= MS_ACTIVE;
- return s;
+ return simple_set_mnt(mnt, s);
out_err:
- s = (struct super_block *)p;
+ error = PTR_ERR(p);
out_free:
kfree(server->mnt_path);
kfree(server->hostname);
kfree(server);
- return s;
+ return error;
}
static void nfs4_kill_super(struct super_block *sb)
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index de3998f15f10..5446a0861d1d 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -1310,7 +1310,7 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
if ((bmval0 & (FATTR4_WORD0_FILES_FREE | FATTR4_WORD0_FILES_TOTAL)) ||
(bmval1 & (FATTR4_WORD1_SPACE_AVAIL | FATTR4_WORD1_SPACE_FREE |
FATTR4_WORD1_SPACE_TOTAL))) {
- status = vfs_statfs(dentry->d_inode->i_sb, &statfs);
+ status = vfs_statfs(dentry, &statfs);
if (status)
goto out_nfserr;
}
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 3ef017b3b5bd..a1810e6a93e5 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -494,10 +494,10 @@ static int nfsd_fill_super(struct super_block * sb, void * data, int silent)
return simple_fill_super(sb, 0x6e667364, nfsd_files);
}
-static struct super_block *nfsd_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int nfsd_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_single(fs_type, flags, data, nfsd_fill_super);
+ return get_sb_single(fs_type, flags, data, nfsd_fill_super, mnt);
}
static struct file_system_type nfsd_fs_type = {
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 1d65f13f458c..245eaa1fb59b 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -1737,7 +1737,7 @@ int
nfsd_statfs(struct svc_rqst *rqstp, struct svc_fh *fhp, struct kstatfs *stat)
{
int err = fh_verify(rqstp, fhp, 0, MAY_NOP);
- if (!err && vfs_statfs(fhp->fh_dentry->d_inode->i_sb,stat))
+ if (!err && vfs_statfs(fhp->fh_dentry,stat))
err = nfserr_io;
return err;
}
diff --git a/fs/ntfs/aops.h b/fs/ntfs/aops.h
index 3b74e66ca2ff..325ce261a107 100644
--- a/fs/ntfs/aops.h
+++ b/fs/ntfs/aops.h
@@ -86,8 +86,7 @@ static inline void ntfs_unmap_page(struct page *page)
static inline struct page *ntfs_map_page(struct address_space *mapping,
unsigned long index)
{
- struct page *page = read_cache_page(mapping, index,
- (filler_t*)mapping->a_ops->readpage, NULL);
+ struct page *page = read_mapping_page(mapping, index, NULL);
if (!IS_ERR(page)) {
wait_on_page_locked(page);
diff --git a/fs/ntfs/attrib.c b/fs/ntfs/attrib.c
index 1663f5c3c6aa..6708e1d68a9e 100644
--- a/fs/ntfs/attrib.c
+++ b/fs/ntfs/attrib.c
@@ -2529,8 +2529,7 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val)
end >>= PAGE_CACHE_SHIFT;
/* If there is a first partial page, need to do it the slow way. */
if (start_ofs) {
- page = read_cache_page(mapping, idx,
- (filler_t*)mapping->a_ops->readpage, NULL);
+ page = read_mapping_page(mapping, idx, NULL);
if (IS_ERR(page)) {
ntfs_error(vol->sb, "Failed to read first partial "
"page (sync error, index 0x%lx).", idx);
@@ -2600,8 +2599,7 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val)
}
/* If there is a last partial page, need to do it the slow way. */
if (end_ofs) {
- page = read_cache_page(mapping, idx,
- (filler_t*)mapping->a_ops->readpage, NULL);
+ page = read_mapping_page(mapping, idx, NULL);
if (IS_ERR(page)) {
ntfs_error(vol->sb, "Failed to read last partial page "
"(sync error, index 0x%lx).", idx);
diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
index 36e1e136bb0c..88292f9e4b9b 100644
--- a/fs/ntfs/file.c
+++ b/fs/ntfs/file.c
@@ -231,8 +231,7 @@ do_non_resident_extend:
* Read the page. If the page is not present, this will zero
* the uninitialized regions for us.
*/
- page = read_cache_page(mapping, index,
- (filler_t*)mapping->a_ops->readpage, NULL);
+ page = read_mapping_page(mapping, index, NULL);
if (IS_ERR(page)) {
err = PTR_ERR(page);
goto init_err_out;
diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
index 27833f6df49f..0e14acea3f8b 100644
--- a/fs/ntfs/super.c
+++ b/fs/ntfs/super.c
@@ -2601,10 +2601,10 @@ static unsigned long __get_nr_free_mft_records(ntfs_volume *vol,
/**
* ntfs_statfs - return information about mounted NTFS volume
- * @sb: super block of mounted volume
+ * @dentry: dentry from mounted volume
* @sfs: statfs structure in which to return the information
*
- * Return information about the mounted NTFS volume @sb in the statfs structure
+ * Return information about the mounted NTFS volume @dentry in the statfs structure
* pointed to by @sfs (this is initialized with zeros before ntfs_statfs is
* called). We interpret the values to be correct of the moment in time at
* which we are called. Most values are variable otherwise and this isn't just
@@ -2617,8 +2617,9 @@ static unsigned long __get_nr_free_mft_records(ntfs_volume *vol,
*
* Return 0 on success or -errno on error.
*/
-static int ntfs_statfs(struct super_block *sb, struct kstatfs *sfs)
+static int ntfs_statfs(struct dentry *dentry, struct kstatfs *sfs)
{
+ struct super_block *sb = dentry->d_sb;
s64 size;
ntfs_volume *vol = NTFS_SB(sb);
ntfs_inode *mft_ni = NTFS_I(vol->mft_ino);
@@ -3093,10 +3094,11 @@ struct kmem_cache *ntfs_index_ctx_cache;
/* Driver wide mutex. */
DEFINE_MUTEX(ntfs_lock);
-static struct super_block *ntfs_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int ntfs_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_bdev(fs_type, flags, dev_name, data, ntfs_fill_super);
+ return get_sb_bdev(fs_type, flags, dev_name, data, ntfs_fill_super,
+ mnt);
}
static struct file_system_type ntfs_fs_type = {
diff --git a/fs/ocfs2/dlm/dlmfs.c b/fs/ocfs2/dlm/dlmfs.c
index 7e88e24b3471..7273d9fa6bab 100644
--- a/fs/ocfs2/dlm/dlmfs.c
+++ b/fs/ocfs2/dlm/dlmfs.c
@@ -574,10 +574,10 @@ static struct inode_operations dlmfs_file_inode_operations = {
.getattr = simple_getattr,
};
-static struct super_block *dlmfs_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int dlmfs_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_nodev(fs_type, flags, data, dlmfs_fill_super);
+ return get_sb_nodev(fs_type, flags, data, dlmfs_fill_super, mnt);
}
static struct file_system_type dlmfs_fs_type = {
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 949b3dac30f1..cdf73393f094 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -100,7 +100,7 @@ static int ocfs2_initialize_mem_caches(void);
static void ocfs2_free_mem_caches(void);
static void ocfs2_delete_osb(struct ocfs2_super *osb);
-static int ocfs2_statfs(struct super_block *sb, struct kstatfs *buf);
+static int ocfs2_statfs(struct dentry *dentry, struct kstatfs *buf);
static int ocfs2_sync_fs(struct super_block *sb, int wait);
@@ -672,12 +672,14 @@ read_super_error:
return status;
}
-static struct super_block *ocfs2_get_sb(struct file_system_type *fs_type,
- int flags,
- const char *dev_name,
- void *data)
+static int ocfs2_get_sb(struct file_system_type *fs_type,
+ int flags,
+ const char *dev_name,
+ void *data,
+ struct vfsmount *mnt)
{
- return get_sb_bdev(fs_type, flags, dev_name, data, ocfs2_fill_super);
+ return get_sb_bdev(fs_type, flags, dev_name, data, ocfs2_fill_super,
+ mnt);
}
static struct file_system_type ocfs2_fs_type = {
@@ -855,7 +857,7 @@ static void ocfs2_put_super(struct super_block *sb)
mlog_exit_void();
}
-static int ocfs2_statfs(struct super_block *sb, struct kstatfs *buf)
+static int ocfs2_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct ocfs2_super *osb;
u32 numbits, freebits;
@@ -864,9 +866,9 @@ static int ocfs2_statfs(struct super_block *sb, struct kstatfs *buf)
struct buffer_head *bh = NULL;
struct inode *inode = NULL;
- mlog_entry("(%p, %p)\n", sb, buf);
+ mlog_entry("(%p, %p)\n", dentry->d_sb, buf);
- osb = OCFS2_SB(sb);
+ osb = OCFS2_SB(dentry->d_sb);
inode = ocfs2_get_system_file_inode(osb,
GLOBAL_BITMAP_SYSTEM_INODE,
@@ -889,7 +891,7 @@ static int ocfs2_statfs(struct super_block *sb, struct kstatfs *buf)
freebits = numbits - le32_to_cpu(bm_lock->id1.bitmap1.i_used);
buf->f_type = OCFS2_SUPER_MAGIC;
- buf->f_bsize = sb->s_blocksize;
+ buf->f_bsize = dentry->d_sb->s_blocksize;
buf->f_namelen = OCFS2_MAX_FILENAME_LEN;
buf->f_blocks = ((sector_t) numbits) *
(osb->s_clustersize >> osb->sb->s_blocksize_bits);
diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c
index f6986bd79e75..0c8a1294ec96 100644
--- a/fs/ocfs2/symlink.c
+++ b/fs/ocfs2/symlink.c
@@ -64,8 +64,7 @@ static char *ocfs2_page_getlink(struct dentry * dentry,
{
struct page * page;
struct address_space *mapping = dentry->d_inode->i_mapping;
- page = read_cache_page(mapping, 0,
- (filler_t *)mapping->a_ops->readpage, NULL);
+ page = read_mapping_page(mapping, 0, NULL);
if (IS_ERR(page))
goto sync_fail;
wait_on_page_locked(page);
diff --git a/fs/open.c b/fs/open.c
index 4f178acd4c09..5fb16e5267dc 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -31,18 +31,18 @@
#include <asm/unistd.h>
-int vfs_statfs(struct super_block *sb, struct kstatfs *buf)
+int vfs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
int retval = -ENODEV;
- if (sb) {
+ if (dentry) {
retval = -ENOSYS;
- if (sb->s_op->statfs) {
+ if (dentry->d_sb->s_op->statfs) {
memset(buf, 0, sizeof(*buf));
- retval = security_sb_statfs(sb);
+ retval = security_sb_statfs(dentry);
if (retval)
return retval;
- retval = sb->s_op->statfs(sb, buf);
+ retval = dentry->d_sb->s_op->statfs(dentry, buf);
if (retval == 0 && buf->f_frsize == 0)
buf->f_frsize = buf->f_bsize;
}
@@ -52,12 +52,12 @@ int vfs_statfs(struct super_block *sb, struct kstatfs *buf)
EXPORT_SYMBOL(vfs_statfs);
-static int vfs_statfs_native(struct super_block *sb, struct statfs *buf)
+static int vfs_statfs_native(struct dentry *dentry, struct statfs *buf)
{
struct kstatfs st;
int retval;
- retval = vfs_statfs(sb, &st);
+ retval = vfs_statfs(dentry, &st);
if (retval)
return retval;
@@ -95,12 +95,12 @@ static int vfs_statfs_native(struct super_block *sb, struct statfs *buf)
return 0;
}
-static int vfs_statfs64(struct super_block *sb, struct statfs64 *buf)
+static int vfs_statfs64(struct dentry *dentry, struct statfs64 *buf)
{
struct kstatfs st;
int retval;
- retval = vfs_statfs(sb, &st);
+ retval = vfs_statfs(dentry, &st);
if (retval)
return retval;
@@ -130,7 +130,7 @@ asmlinkage long sys_statfs(const char __user * path, struct statfs __user * buf)
error = user_path_walk(path, &nd);
if (!error) {
struct statfs tmp;
- error = vfs_statfs_native(nd.dentry->d_inode->i_sb, &tmp);
+ error = vfs_statfs_native(nd.dentry, &tmp);
if (!error && copy_to_user(buf, &tmp, sizeof(tmp)))
error = -EFAULT;
path_release(&nd);
@@ -149,7 +149,7 @@ asmlinkage long sys_statfs64(const char __user *path, size_t sz, struct statfs64
error = user_path_walk(path, &nd);
if (!error) {
struct statfs64 tmp;
- error = vfs_statfs64(nd.dentry->d_inode->i_sb, &tmp);
+ error = vfs_statfs64(nd.dentry, &tmp);
if (!error && copy_to_user(buf, &tmp, sizeof(tmp)))
error = -EFAULT;
path_release(&nd);
@@ -168,7 +168,7 @@ asmlinkage long sys_fstatfs(unsigned int fd, struct statfs __user * buf)
file = fget(fd);
if (!file)
goto out;
- error = vfs_statfs_native(file->f_dentry->d_inode->i_sb, &tmp);
+ error = vfs_statfs_native(file->f_dentry, &tmp);
if (!error && copy_to_user(buf, &tmp, sizeof(tmp)))
error = -EFAULT;
fput(file);
@@ -189,7 +189,7 @@ asmlinkage long sys_fstatfs64(unsigned int fd, size_t sz, struct statfs64 __user
file = fget(fd);
if (!file)
goto out;
- error = vfs_statfs64(file->f_dentry->d_inode->i_sb, &tmp);
+ error = vfs_statfs64(file->f_dentry, &tmp);
if (!error && copy_to_user(buf, &tmp, sizeof(tmp)))
error = -EFAULT;
fput(file);
@@ -1152,7 +1152,7 @@ int filp_close(struct file *filp, fl_owner_t id)
}
if (filp->f_op && filp->f_op->flush)
- retval = filp->f_op->flush(filp);
+ retval = filp->f_op->flush(filp, id);
dnotify_flush(filp, id);
locks_remove_posix(filp, id);
diff --git a/fs/openpromfs/inode.c b/fs/openpromfs/inode.c
index 0f14276a2e51..464e2bce0203 100644
--- a/fs/openpromfs/inode.c
+++ b/fs/openpromfs/inode.c
@@ -1054,10 +1054,10 @@ out_no_root:
return -ENOMEM;
}
-static struct super_block *openprom_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int openprom_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_single(fs_type, flags, data, openprom_fill_super);
+ return get_sb_single(fs_type, flags, data, openprom_fill_super, mnt);
}
static struct file_system_type openprom_fs_type = {
diff --git a/fs/partitions/check.c b/fs/partitions/check.c
index 8851b81e7c5a..2ef313a96b66 100644
--- a/fs/partitions/check.c
+++ b/fs/partitions/check.c
@@ -484,6 +484,10 @@ int rescan_partitions(struct gendisk *disk, struct block_device *bdev)
sector_t from = state->parts[p].from;
if (!size)
continue;
+ if (from + size > get_capacity(disk)) {
+ printk(" %s: p%d exceeds device capacity\n",
+ disk->disk_name, p);
+ }
add_partition(disk, p, from, size);
#ifdef CONFIG_BLK_DEV_MD
if (state->parts[p].flags)
@@ -499,8 +503,8 @@ unsigned char *read_dev_sector(struct block_device *bdev, sector_t n, Sector *p)
struct address_space *mapping = bdev->bd_inode->i_mapping;
struct page *page;
- page = read_cache_page(mapping, (pgoff_t)(n >> (PAGE_CACHE_SHIFT-9)),
- (filler_t *)mapping->a_ops->readpage, NULL);
+ page = read_mapping_page(mapping, (pgoff_t)(n >> (PAGE_CACHE_SHIFT-9)),
+ NULL);
if (!IS_ERR(page)) {
wait_on_page_locked(page);
if (!PageUptodate(page))
diff --git a/fs/pipe.c b/fs/pipe.c
index 5acd8954aaa0..20352573e025 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -979,12 +979,11 @@ no_files:
* any operations on the root directory. However, we need a non-trivial
* d_name - pipe: will go nicely and kill the special-casing in procfs.
*/
-
-static struct super_block *
-pipefs_get_sb(struct file_system_type *fs_type, int flags,
- const char *dev_name, void *data)
+static int pipefs_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data,
+ struct vfsmount *mnt)
{
- return get_sb_pseudo(fs_type, "pipe:", NULL, PIPEFS_MAGIC);
+ return get_sb_pseudo(fs_type, "pipe:", NULL, PIPEFS_MAGIC, mnt);
}
static struct file_system_type pipe_fs_type = {
diff --git a/fs/proc/root.c b/fs/proc/root.c
index c3fd3611112f..9995356ce73e 100644
--- a/fs/proc/root.c
+++ b/fs/proc/root.c
@@ -26,10 +26,10 @@ struct proc_dir_entry *proc_net, *proc_net_stat, *proc_bus, *proc_root_fs, *proc
struct proc_dir_entry *proc_sys_root;
#endif
-static struct super_block *proc_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int proc_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_single(fs_type, flags, data, proc_fill_super);
+ return get_sb_single(fs_type, flags, data, proc_fill_super, mnt);
}
static struct file_system_type proc_fs_type = {
diff --git a/fs/qnx4/inode.c b/fs/qnx4/inode.c
index 2ecd46f85e9f..2f24c46f72a1 100644
--- a/fs/qnx4/inode.c
+++ b/fs/qnx4/inode.c
@@ -128,7 +128,7 @@ static struct inode *qnx4_alloc_inode(struct super_block *sb);
static void qnx4_destroy_inode(struct inode *inode);
static void qnx4_read_inode(struct inode *);
static int qnx4_remount(struct super_block *sb, int *flags, char *data);
-static int qnx4_statfs(struct super_block *, struct kstatfs *);
+static int qnx4_statfs(struct dentry *, struct kstatfs *);
static struct super_operations qnx4_sops =
{
@@ -282,8 +282,10 @@ unsigned long qnx4_block_map( struct inode *inode, long iblock )
return block;
}
-static int qnx4_statfs(struct super_block *sb, struct kstatfs *buf)
+static int qnx4_statfs(struct dentry *dentry, struct kstatfs *buf)
{
+ struct super_block *sb = dentry->d_sb;
+
lock_kernel();
buf->f_type = sb->s_magic;
@@ -561,10 +563,11 @@ static void destroy_inodecache(void)
"qnx4_inode_cache: not all structures were freed\n");
}
-static struct super_block *qnx4_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int qnx4_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_bdev(fs_type, flags, dev_name, data, qnx4_fill_super);
+ return get_sb_bdev(fs_type, flags, dev_name, data, qnx4_fill_super,
+ mnt);
}
static struct file_system_type qnx4_fs_type = {
diff --git a/fs/ramfs/inode.c b/fs/ramfs/inode.c
index 14bd2246fb6d..b9677335cc8d 100644
--- a/fs/ramfs/inode.c
+++ b/fs/ramfs/inode.c
@@ -185,16 +185,17 @@ static int ramfs_fill_super(struct super_block * sb, void * data, int silent)
return 0;
}
-struct super_block *ramfs_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+int ramfs_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_nodev(fs_type, flags, data, ramfs_fill_super);
+ return get_sb_nodev(fs_type, flags, data, ramfs_fill_super, mnt);
}
-static struct super_block *rootfs_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int rootfs_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_nodev(fs_type, flags|MS_NOUSER, data, ramfs_fill_super);
+ return get_sb_nodev(fs_type, flags|MS_NOUSER, data, ramfs_fill_super,
+ mnt);
}
static struct file_system_type ramfs_fs_type = {
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index cae2abbc0c71..00f1321e9209 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -60,7 +60,7 @@ static int is_any_reiserfs_magic_string(struct reiserfs_super_block *rs)
}
static int reiserfs_remount(struct super_block *s, int *flags, char *data);
-static int reiserfs_statfs(struct super_block *s, struct kstatfs *buf);
+static int reiserfs_statfs(struct dentry *dentry, struct kstatfs *buf);
static int reiserfs_sync_fs(struct super_block *s, int wait)
{
@@ -1938,15 +1938,15 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
return errval;
}
-static int reiserfs_statfs(struct super_block *s, struct kstatfs *buf)
+static int reiserfs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
- struct reiserfs_super_block *rs = SB_DISK_SUPER_BLOCK(s);
+ struct reiserfs_super_block *rs = SB_DISK_SUPER_BLOCK(dentry->d_sb);
buf->f_namelen = (REISERFS_MAX_NAME(s->s_blocksize));
buf->f_bfree = sb_free_blocks(rs);
buf->f_bavail = buf->f_bfree;
buf->f_blocks = sb_block_count(rs) - sb_bmap_nr(rs) - 1;
- buf->f_bsize = s->s_blocksize;
+ buf->f_bsize = dentry->d_sb->s_blocksize;
/* changed to accommodate gcc folks. */
buf->f_type = REISERFS_SUPER_MAGIC;
return 0;
@@ -2249,11 +2249,12 @@ static ssize_t reiserfs_quota_write(struct super_block *sb, int type,
#endif
-static struct super_block *get_super_block(struct file_system_type *fs_type,
- int flags, const char *dev_name,
- void *data)
+static int get_super_block(struct file_system_type *fs_type,
+ int flags, const char *dev_name,
+ void *data, struct vfsmount *mnt)
{
- return get_sb_bdev(fs_type, flags, dev_name, data, reiserfs_fill_super);
+ return get_sb_bdev(fs_type, flags, dev_name, data, reiserfs_fill_super,
+ mnt);
}
static int __init init_reiserfs_fs(void)
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index ffb79c48c5bf..39fedaa88a0c 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -452,8 +452,7 @@ static struct page *reiserfs_get_page(struct inode *dir, unsigned long n)
/* We can deadlock if we try to free dentries,
and an unlink/rmdir has just occured - GFP_NOFS avoids this */
mapping_set_gfp_mask(mapping, GFP_NOFS);
- page = read_cache_page(mapping, n,
- (filler_t *) mapping->a_ops->readpage, NULL);
+ page = read_mapping_page(mapping, n, NULL);
if (!IS_ERR(page)) {
wait_on_page_locked(page);
kmap(page);
diff --git a/fs/romfs/inode.c b/fs/romfs/inode.c
index 9b9eda7b335c..283fbc6b8eea 100644
--- a/fs/romfs/inode.c
+++ b/fs/romfs/inode.c
@@ -179,12 +179,12 @@ outnobh:
/* That's simple too. */
static int
-romfs_statfs(struct super_block *sb, struct kstatfs *buf)
+romfs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
buf->f_type = ROMFS_MAGIC;
buf->f_bsize = ROMBSIZE;
buf->f_bfree = buf->f_bavail = buf->f_ffree;
- buf->f_blocks = (romfs_maxsize(sb)+ROMBSIZE-1)>>ROMBSBITS;
+ buf->f_blocks = (romfs_maxsize(dentry->d_sb)+ROMBSIZE-1)>>ROMBSBITS;
buf->f_namelen = ROMFS_MAXFN;
return 0;
}
@@ -607,10 +607,11 @@ static struct super_operations romfs_ops = {
.remount_fs = romfs_remount,
};
-static struct super_block *romfs_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int romfs_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_bdev(fs_type, flags, dev_name, data, romfs_fill_super);
+ return get_sb_bdev(fs_type, flags, dev_name, data, romfs_fill_super,
+ mnt);
}
static struct file_system_type romfs_fs_type = {
diff --git a/fs/select.c b/fs/select.c
index a8109baa5e46..9c4f0f2604f1 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -546,37 +546,38 @@ struct poll_list {
#define POLLFD_PER_PAGE ((PAGE_SIZE-sizeof(struct poll_list)) / sizeof(struct pollfd))
-static void do_pollfd(unsigned int num, struct pollfd * fdpage,
- poll_table ** pwait, int *count)
+/*
+ * Fish for pollable events on the pollfd->fd file descriptor. We're only
+ * interested in events matching the pollfd->events mask, and the result
+ * matching that mask is both recorded in pollfd->revents and returned. The
+ * pwait poll_table will be used by the fd-provided poll handler for waiting,
+ * if non-NULL.
+ */
+static inline unsigned int do_pollfd(struct pollfd *pollfd, poll_table *pwait)
{
- int i;
-
- for (i = 0; i < num; i++) {
- int fd;
- unsigned int mask;
- struct pollfd *fdp;
-
- mask = 0;
- fdp = fdpage+i;
- fd = fdp->fd;
- if (fd >= 0) {
- int fput_needed;
- struct file * file = fget_light(fd, &fput_needed);
- mask = POLLNVAL;
- if (file != NULL) {
- mask = DEFAULT_POLLMASK;
- if (file->f_op && file->f_op->poll)
- mask = file->f_op->poll(file, *pwait);
- mask &= fdp->events | POLLERR | POLLHUP;
- fput_light(file, fput_needed);
- }
- if (mask) {
- *pwait = NULL;
- (*count)++;
- }
+ unsigned int mask;
+ int fd;
+
+ mask = 0;
+ fd = pollfd->fd;
+ if (fd >= 0) {
+ int fput_needed;
+ struct file * file;
+
+ file = fget_light(fd, &fput_needed);
+ mask = POLLNVAL;
+ if (file != NULL) {
+ mask = DEFAULT_POLLMASK;
+ if (file->f_op && file->f_op->poll)
+ mask = file->f_op->poll(file, pwait);
+ /* Mask out unneeded events. */
+ mask &= pollfd->events | POLLERR | POLLHUP;
+ fput_light(file, fput_needed);
}
- fdp->revents = mask;
}
+ pollfd->revents = mask;
+
+ return mask;
}
static int do_poll(unsigned int nfds, struct poll_list *list,
@@ -594,11 +595,29 @@ static int do_poll(unsigned int nfds, struct poll_list *list,
long __timeout;
set_current_state(TASK_INTERRUPTIBLE);
- walk = list;
- while(walk != NULL) {
- do_pollfd( walk->len, walk->entries, &pt, &count);
- walk = walk->next;
+ for (walk = list; walk != NULL; walk = walk->next) {
+ struct pollfd * pfd, * pfd_end;
+
+ pfd = walk->entries;
+ pfd_end = pfd + walk->len;
+ for (; pfd != pfd_end; pfd++) {
+ /*
+ * Fish for events. If we found one, record it
+ * and kill the poll_table, so we don't
+ * needlessly register any other waiters after
+ * this. They'll get immediately deregistered
+ * when we break out and return.
+ */
+ if (do_pollfd(pfd, pt)) {
+ count++;
+ pt = NULL;
+ }
+ }
}
+ /*
+ * All waiters have already been registered, so don't provide
+ * a poll_table to them on the next loop iteration.
+ */
pt = NULL;
if (count || !*timeout || signal_pending(current))
break;
diff --git a/fs/smbfs/inode.c b/fs/smbfs/inode.c
index fdeabc0a34f7..506ff87c1d4b 100644
--- a/fs/smbfs/inode.c
+++ b/fs/smbfs/inode.c
@@ -48,7 +48,7 @@
static void smb_delete_inode(struct inode *);
static void smb_put_super(struct super_block *);
-static int smb_statfs(struct super_block *, struct kstatfs *);
+static int smb_statfs(struct dentry *, struct kstatfs *);
static int smb_show_options(struct seq_file *, struct vfsmount *);
static kmem_cache_t *smb_inode_cachep;
@@ -641,13 +641,13 @@ out_no_server:
}
static int
-smb_statfs(struct super_block *sb, struct kstatfs *buf)
+smb_statfs(struct dentry *dentry, struct kstatfs *buf)
{
int result;
lock_kernel();
- result = smb_proc_dskattr(sb, buf);
+ result = smb_proc_dskattr(dentry, buf);
unlock_kernel();
@@ -782,10 +782,10 @@ out:
return error;
}
-static struct super_block *smb_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int smb_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_nodev(fs_type, flags, data, smb_fill_super);
+ return get_sb_nodev(fs_type, flags, data, smb_fill_super, mnt);
}
static struct file_system_type smb_fs_type = {
diff --git a/fs/smbfs/proc.c b/fs/smbfs/proc.c
index b1b878b81730..c3495059889d 100644
--- a/fs/smbfs/proc.c
+++ b/fs/smbfs/proc.c
@@ -3226,9 +3226,9 @@ smb_proc_settime(struct dentry *dentry, struct smb_fattr *fattr)
}
int
-smb_proc_dskattr(struct super_block *sb, struct kstatfs *attr)
+smb_proc_dskattr(struct dentry *dentry, struct kstatfs *attr)
{
- struct smb_sb_info *server = SMB_SB(sb);
+ struct smb_sb_info *server = SMB_SB(dentry->d_sb);
int result;
char *p;
long unit;
diff --git a/fs/smbfs/proto.h b/fs/smbfs/proto.h
index 47664597e6b1..972ed7dad388 100644
--- a/fs/smbfs/proto.h
+++ b/fs/smbfs/proto.h
@@ -29,7 +29,7 @@ extern int smb_proc_getattr(struct dentry *dir, struct smb_fattr *fattr);
extern int smb_proc_setattr(struct dentry *dir, struct smb_fattr *fattr);
extern int smb_proc_setattr_unix(struct dentry *d, struct iattr *attr, unsigned int major, unsigned int minor);
extern int smb_proc_settime(struct dentry *dentry, struct smb_fattr *fattr);
-extern int smb_proc_dskattr(struct super_block *sb, struct kstatfs *attr);
+extern int smb_proc_dskattr(struct dentry *dentry, struct kstatfs *attr);
extern int smb_proc_read_link(struct smb_sb_info *server, struct dentry *d, char *buffer, int len);
extern int smb_proc_symlink(struct smb_sb_info *server, struct dentry *d, const char *oldpath);
extern int smb_proc_link(struct smb_sb_info *server, struct dentry *dentry, struct dentry *new_dentry);
diff --git a/fs/splice.c b/fs/splice.c
index a285fd746dc0..05fd2787be98 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -55,31 +55,43 @@ static int page_cache_pipe_buf_steal(struct pipe_inode_info *pipe,
struct pipe_buffer *buf)
{
struct page *page = buf->page;
- struct address_space *mapping = page_mapping(page);
+ struct address_space *mapping;
lock_page(page);
- WARN_ON(!PageUptodate(page));
+ mapping = page_mapping(page);
+ if (mapping) {
+ WARN_ON(!PageUptodate(page));
- /*
- * At least for ext2 with nobh option, we need to wait on writeback
- * completing on this page, since we'll remove it from the pagecache.
- * Otherwise truncate wont wait on the page, allowing the disk
- * blocks to be reused by someone else before we actually wrote our
- * data to them. fs corruption ensues.
- */
- wait_on_page_writeback(page);
+ /*
+ * At least for ext2 with nobh option, we need to wait on
+ * writeback completing on this page, since we'll remove it
+ * from the pagecache. Otherwise truncate wont wait on the
+ * page, allowing the disk blocks to be reused by someone else
+ * before we actually wrote our data to them. fs corruption
+ * ensues.
+ */
+ wait_on_page_writeback(page);
- if (PagePrivate(page))
- try_to_release_page(page, mapping_gfp_mask(mapping));
+ if (PagePrivate(page))
+ try_to_release_page(page, mapping_gfp_mask(mapping));
- if (!remove_mapping(mapping, page)) {
- unlock_page(page);
- return 1;
+ /*
+ * If we succeeded in removing the mapping, set LRU flag
+ * and return good.
+ */
+ if (remove_mapping(mapping, page)) {
+ buf->flags |= PIPE_BUF_FLAG_LRU;
+ return 0;
+ }
}
- buf->flags |= PIPE_BUF_FLAG_LRU;
- return 0;
+ /*
+ * Raced with truncate or failed to remove page from current
+ * address space, unlock and return failure.
+ */
+ unlock_page(page);
+ return 1;
}
static void page_cache_pipe_buf_release(struct pipe_inode_info *pipe,
diff --git a/fs/super.c b/fs/super.c
index 9d5c2add7228..057b5325b7ef 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -231,7 +231,7 @@ void generic_shutdown_super(struct super_block *sb)
if (root) {
sb->s_root = NULL;
shrink_dcache_parent(root);
- shrink_dcache_anon(sb);
+ shrink_dcache_sb(sb);
dput(root);
fsync_super(sb);
lock_super(sb);
@@ -486,7 +486,7 @@ asmlinkage long sys_ustat(unsigned dev, struct ustat __user * ubuf)
s = user_get_super(new_decode_dev(dev));
if (s == NULL)
goto out;
- err = vfs_statfs(s, &sbuf);
+ err = vfs_statfs(s->s_root, &sbuf);
drop_super(s);
if (err)
goto out;
@@ -676,9 +676,10 @@ static void bdev_uevent(struct block_device *bdev, enum kobject_action action)
}
}
-struct super_block *get_sb_bdev(struct file_system_type *fs_type,
+int get_sb_bdev(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data,
- int (*fill_super)(struct super_block *, void *, int))
+ int (*fill_super)(struct super_block *, void *, int),
+ struct vfsmount *mnt)
{
struct block_device *bdev;
struct super_block *s;
@@ -686,7 +687,7 @@ struct super_block *get_sb_bdev(struct file_system_type *fs_type,
bdev = open_bdev_excl(dev_name, flags, fs_type);
if (IS_ERR(bdev))
- return (struct super_block *)bdev;
+ return PTR_ERR(bdev);
/*
* once the super is inserted into the list by sget, s_umount
@@ -697,15 +698,17 @@ struct super_block *get_sb_bdev(struct file_system_type *fs_type,
s = sget(fs_type, test_bdev_super, set_bdev_super, bdev);
mutex_unlock(&bdev->bd_mount_mutex);
if (IS_ERR(s))
- goto out;
+ goto error_s;
if (s->s_root) {
if ((flags ^ s->s_flags) & MS_RDONLY) {
up_write(&s->s_umount);
deactivate_super(s);
- s = ERR_PTR(-EBUSY);
+ error = -EBUSY;
+ goto error_bdev;
}
- goto out;
+
+ close_bdev_excl(bdev);
} else {
char b[BDEVNAME_SIZE];
@@ -716,18 +719,21 @@ struct super_block *get_sb_bdev(struct file_system_type *fs_type,
if (error) {
up_write(&s->s_umount);
deactivate_super(s);
- s = ERR_PTR(error);
- } else {
- s->s_flags |= MS_ACTIVE;
- bdev_uevent(bdev, KOBJ_MOUNT);
+ goto error;
}
+
+ s->s_flags |= MS_ACTIVE;
+ bdev_uevent(bdev, KOBJ_MOUNT);
}
- return s;
+ return simple_set_mnt(mnt, s);
-out:
+error_s:
+ error = PTR_ERR(s);
+error_bdev:
close_bdev_excl(bdev);
- return s;
+error:
+ return error;
}
EXPORT_SYMBOL(get_sb_bdev);
@@ -744,15 +750,16 @@ void kill_block_super(struct super_block *sb)
EXPORT_SYMBOL(kill_block_super);
-struct super_block *get_sb_nodev(struct file_system_type *fs_type,
+int get_sb_nodev(struct file_system_type *fs_type,
int flags, void *data,
- int (*fill_super)(struct super_block *, void *, int))
+ int (*fill_super)(struct super_block *, void *, int),
+ struct vfsmount *mnt)
{
int error;
struct super_block *s = sget(fs_type, NULL, set_anon_super, NULL);
if (IS_ERR(s))
- return s;
+ return PTR_ERR(s);
s->s_flags = flags;
@@ -760,10 +767,10 @@ struct super_block *get_sb_nodev(struct file_system_type *fs_type,
if (error) {
up_write(&s->s_umount);
deactivate_super(s);
- return ERR_PTR(error);
+ return error;
}
s->s_flags |= MS_ACTIVE;
- return s;
+ return simple_set_mnt(mnt, s);
}
EXPORT_SYMBOL(get_sb_nodev);
@@ -773,94 +780,102 @@ static int compare_single(struct super_block *s, void *p)
return 1;
}
-struct super_block *get_sb_single(struct file_system_type *fs_type,
+int get_sb_single(struct file_system_type *fs_type,
int flags, void *data,
- int (*fill_super)(struct super_block *, void *, int))
+ int (*fill_super)(struct super_block *, void *, int),
+ struct vfsmount *mnt)
{
struct super_block *s;
int error;
s = sget(fs_type, compare_single, set_anon_super, NULL);
if (IS_ERR(s))
- return s;
+ return PTR_ERR(s);
if (!s->s_root) {
s->s_flags = flags;
error = fill_super(s, data, flags & MS_SILENT ? 1 : 0);
if (error) {
up_write(&s->s_umount);
deactivate_super(s);
- return ERR_PTR(error);
+ return error;
}
s->s_flags |= MS_ACTIVE;
}
do_remount_sb(s, flags, data, 0);
- return s;
+ return simple_set_mnt(mnt, s);
}
EXPORT_SYMBOL(get_sb_single);
struct vfsmount *
-do_kern_mount(const char *fstype, int flags, const char *name, void *data)
+vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void *data)
{
- struct file_system_type *type = get_fs_type(fstype);
- struct super_block *sb = ERR_PTR(-ENOMEM);
struct vfsmount *mnt;
- int error;
char *secdata = NULL;
+ int error;
if (!type)
return ERR_PTR(-ENODEV);
+ error = -ENOMEM;
mnt = alloc_vfsmnt(name);
if (!mnt)
goto out;
if (data) {
secdata = alloc_secdata();
- if (!secdata) {
- sb = ERR_PTR(-ENOMEM);
+ if (!secdata)
goto out_mnt;
- }
error = security_sb_copy_data(type, data, secdata);
- if (error) {
- sb = ERR_PTR(error);
+ if (error)
goto out_free_secdata;
- }
}
- sb = type->get_sb(type, flags, name, data);
- if (IS_ERR(sb))
+ error = type->get_sb(type, flags, name, data, mnt);
+ if (error < 0)
goto out_free_secdata;
- error = security_sb_kern_mount(sb, secdata);
+
+ error = security_sb_kern_mount(mnt->mnt_sb, secdata);
if (error)
goto out_sb;
- mnt->mnt_sb = sb;
- mnt->mnt_root = dget(sb->s_root);
- mnt->mnt_mountpoint = sb->s_root;
+
+ mnt->mnt_mountpoint = mnt->mnt_root;
mnt->mnt_parent = mnt;
- up_write(&sb->s_umount);
+ up_write(&mnt->mnt_sb->s_umount);
free_secdata(secdata);
- put_filesystem(type);
return mnt;
out_sb:
- up_write(&sb->s_umount);
- deactivate_super(sb);
- sb = ERR_PTR(error);
+ dput(mnt->mnt_root);
+ up_write(&mnt->mnt_sb->s_umount);
+ deactivate_super(mnt->mnt_sb);
out_free_secdata:
free_secdata(secdata);
out_mnt:
free_vfsmnt(mnt);
out:
+ return ERR_PTR(error);
+}
+
+EXPORT_SYMBOL_GPL(vfs_kern_mount);
+
+struct vfsmount *
+do_kern_mount(const char *fstype, int flags, const char *name, void *data)
+{
+ struct file_system_type *type = get_fs_type(fstype);
+ struct vfsmount *mnt;
+ if (!type)
+ return ERR_PTR(-ENODEV);
+ mnt = vfs_kern_mount(type, flags, name, data);
put_filesystem(type);
- return (struct vfsmount *)sb;
+ return mnt;
}
EXPORT_SYMBOL_GPL(do_kern_mount);
struct vfsmount *kern_mount(struct file_system_type *type)
{
- return do_kern_mount(type->name, 0, type->name, NULL);
+ return vfs_kern_mount(type, 0, type->name, NULL);
}
EXPORT_SYMBOL(kern_mount);
diff --git a/fs/sync.c b/fs/sync.c
index aab5ffe77e9f..955aef04da28 100644
--- a/fs/sync.c
+++ b/fs/sync.c
@@ -100,7 +100,7 @@ asmlinkage long sys_sync_file_range(int fd, loff_t offset, loff_t nbytes,
}
if (nbytes == 0)
- endbyte = -1;
+ endbyte = LLONG_MAX;
else
endbyte--; /* inclusive */
diff --git a/fs/sysfs/mount.c b/fs/sysfs/mount.c
index f1117e885bd6..40190c489271 100644
--- a/fs/sysfs/mount.c
+++ b/fs/sysfs/mount.c
@@ -66,10 +66,10 @@ static int sysfs_fill_super(struct super_block *sb, void *data, int silent)
return 0;
}
-static struct super_block *sysfs_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int sysfs_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_single(fs_type, flags, data, sysfs_fill_super);
+ return get_sb_single(fs_type, flags, data, sysfs_fill_super, mnt);
}
static struct file_system_type sysfs_fs_type = {
diff --git a/fs/sysv/dir.c b/fs/sysv/dir.c
index d7074341ee87..f2bef962d309 100644
--- a/fs/sysv/dir.c
+++ b/fs/sysv/dir.c
@@ -53,8 +53,7 @@ static int dir_commit_chunk(struct page *page, unsigned from, unsigned to)
static struct page * dir_get_page(struct inode *dir, unsigned long n)
{
struct address_space *mapping = dir->i_mapping;
- struct page *page = read_cache_page(mapping, n,
- (filler_t*)mapping->a_ops->readpage, NULL);
+ struct page *page = read_mapping_page(mapping, n, NULL);
if (!IS_ERR(page)) {
wait_on_page_locked(page);
kmap(page);
diff --git a/fs/sysv/inode.c b/fs/sysv/inode.c
index 3ff89cc5833a..58b2d22142ba 100644
--- a/fs/sysv/inode.c
+++ b/fs/sysv/inode.c
@@ -85,8 +85,9 @@ static void sysv_put_super(struct super_block *sb)
kfree(sbi);
}
-static int sysv_statfs(struct super_block *sb, struct kstatfs *buf)
+static int sysv_statfs(struct dentry *dentry, struct kstatfs *buf)
{
+ struct super_block *sb = dentry->d_sb;
struct sysv_sb_info *sbi = SYSV_SB(sb);
buf->f_type = sb->s_magic;
diff --git a/fs/sysv/super.c b/fs/sysv/super.c
index e92b991e6dda..876639b93321 100644
--- a/fs/sysv/super.c
+++ b/fs/sysv/super.c
@@ -506,16 +506,17 @@ failed:
/* Every kernel module contains stuff like this. */
-static struct super_block *sysv_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int sysv_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_bdev(fs_type, flags, dev_name, data, sysv_fill_super);
+ return get_sb_bdev(fs_type, flags, dev_name, data, sysv_fill_super,
+ mnt);
}
-static struct super_block *v7_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int v7_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_bdev(fs_type, flags, dev_name, data, v7_fill_super);
+ return get_sb_bdev(fs_type, flags, dev_name, data, v7_fill_super, mnt);
}
static struct file_system_type sysv_fs_type = {
diff --git a/fs/udf/super.c b/fs/udf/super.c
index e45789fe38e8..44fe2cb0bbb2 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -91,13 +91,13 @@ static void udf_load_partdesc(struct super_block *, struct buffer_head *);
static void udf_open_lvid(struct super_block *);
static void udf_close_lvid(struct super_block *);
static unsigned int udf_count_free(struct super_block *);
-static int udf_statfs(struct super_block *, struct kstatfs *);
+static int udf_statfs(struct dentry *, struct kstatfs *);
/* UDF filesystem type */
-static struct super_block *udf_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int udf_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_bdev(fs_type, flags, dev_name, data, udf_fill_super);
+ return get_sb_bdev(fs_type, flags, dev_name, data, udf_fill_super, mnt);
}
static struct file_system_type udf_fstype = {
@@ -1779,8 +1779,10 @@ udf_put_super(struct super_block *sb)
* Written, tested, and released.
*/
static int
-udf_statfs(struct super_block *sb, struct kstatfs *buf)
+udf_statfs(struct dentry *dentry, struct kstatfs *buf)
{
+ struct super_block *sb = dentry->d_sb;
+
buf->f_type = UDF_SUPER_MAGIC;
buf->f_bsize = sb->s_blocksize;
buf->f_blocks = UDF_SB_PARTLEN(sb, UDF_SB_PARTITION(sb));
diff --git a/fs/ufs/super.c b/fs/ufs/super.c
index db98a4c71e63..fe5ab2aa2899 100644
--- a/fs/ufs/super.c
+++ b/fs/ufs/super.c
@@ -1113,8 +1113,9 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data)
return 0;
}
-static int ufs_statfs (struct super_block *sb, struct kstatfs *buf)
+static int ufs_statfs (struct dentry *dentry, struct kstatfs *buf)
{
+ struct super_block *sb = dentry->d_sb;
struct ufs_sb_private_info * uspi;
struct ufs_super_block_first * usb1;
struct ufs_super_block * usb;
@@ -1311,10 +1312,10 @@ out:
#endif
-static struct super_block *ufs_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int ufs_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_bdev(fs_type, flags, dev_name, data, ufs_fill_super);
+ return get_sb_bdev(fs_type, flags, dev_name, data, ufs_fill_super, mnt);
}
static struct file_system_type ufs_fs_type = {
diff --git a/fs/vfat/namei.c b/fs/vfat/namei.c
index a56cec3be5f0..9a8f48bae956 100644
--- a/fs/vfat/namei.c
+++ b/fs/vfat/namei.c
@@ -1023,11 +1023,12 @@ static int vfat_fill_super(struct super_block *sb, void *data, int silent)
return 0;
}
-static struct super_block *vfat_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name,
- void *data)
+static int vfat_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name,
+ void *data, struct vfsmount *mnt)
{
- return get_sb_bdev(fs_type, flags, dev_name, data, vfat_fill_super);
+ return get_sb_bdev(fs_type, flags, dev_name, data, vfat_fill_super,
+ mnt);
}
static struct file_system_type vfat_fs_type = {
diff --git a/fs/xfs/linux-2.6/xfs_stats.c b/fs/xfs/linux-2.6/xfs_stats.c
index 1f0589a05eca..e480b6102051 100644
--- a/fs/xfs/linux-2.6/xfs_stats.c
+++ b/fs/xfs/linux-2.6/xfs_stats.c
@@ -62,7 +62,7 @@ xfs_read_xfsstats(
while (j < xstats[i].endpoint) {
val = 0;
/* sum over all cpus */
- for_each_cpu(c)
+ for_each_possible_cpu(c)
val += *(((__u32*)&per_cpu(xfsstats, c) + j));
len += sprintf(buffer + len, " %u", val);
j++;
@@ -70,7 +70,7 @@ xfs_read_xfsstats(
buffer[len++] = '\n';
}
/* extra precision counters */
- for_each_cpu(i) {
+ for_each_possible_cpu(i) {
xs_xstrat_bytes += per_cpu(xfsstats, i).xs_xstrat_bytes;
xs_write_bytes += per_cpu(xfsstats, i).xs_write_bytes;
xs_read_bytes += per_cpu(xfsstats, i).xs_read_bytes;
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index f2a0778536f4..9bdef9d51900 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -684,10 +684,11 @@ xfs_fs_sync_super(
STATIC int
xfs_fs_statfs(
- struct super_block *sb,
+ struct dentry *dentry,
struct kstatfs *statp)
{
- return -bhv_vfs_statvfs(vfs_from_sb(sb), statp, NULL);
+ return -bhv_vfs_statvfs(vfs_from_sb(dentry->d_sb), statp,
+ vn_from_inode(dentry->d_inode));
}
STATIC int
@@ -853,14 +854,16 @@ fail_vfsop:
return -error;
}
-STATIC struct super_block *
+STATIC int
xfs_fs_get_sb(
struct file_system_type *fs_type,
int flags,
const char *dev_name,
- void *data)
+ void *data,
+ struct vfsmount *mnt)
{
- return get_sb_bdev(fs_type, flags, dev_name, data, xfs_fs_fill_super);
+ return get_sb_bdev(fs_type, flags, dev_name, data, xfs_fs_fill_super,
+ mnt);
}
STATIC struct super_operations xfs_super_operations = {
diff --git a/fs/xfs/linux-2.6/xfs_sysctl.c b/fs/xfs/linux-2.6/xfs_sysctl.c
index 4af97682bec8..af246532fbfb 100644
--- a/fs/xfs/linux-2.6/xfs_sysctl.c
+++ b/fs/xfs/linux-2.6/xfs_sysctl.c
@@ -38,7 +38,7 @@ xfs_stats_clear_proc_handler(
if (!ret && write && *valp) {
printk("XFS Clearing xfsstats\n");
- for_each_cpu(c) {
+ for_each_possible_cpu(c) {
preempt_disable();
/* save vn_active, it's a universal truth! */
vn_active = per_cpu(xfsstats, c).vn_active;
diff --git a/include/acpi/acconfig.h b/include/acpi/acconfig.h
index e27dc8f29972..b9beceb33141 100644
--- a/include/acpi/acconfig.h
+++ b/include/acpi/acconfig.h
@@ -63,7 +63,7 @@
/* Current ACPICA subsystem version in YYYYMMDD format */
-#define ACPI_CA_VERSION 0x20060127
+#define ACPI_CA_VERSION 0x20060608
/*
* OS name, used for the _OS object. The _OS object is essentially obsolete,
@@ -81,6 +81,7 @@
#define ACPI_MAX_PARSE_CACHE_DEPTH 96 /* Parse tree objects */
#define ACPI_MAX_EXTPARSE_CACHE_DEPTH 96 /* Parse tree objects */
#define ACPI_MAX_OBJECT_CACHE_DEPTH 96 /* Interpreter operand objects */
+#define ACPI_MAX_NAMESPACE_CACHE_DEPTH 96 /* Namespace objects */
/*
* Should the subsystem abort the loading of an ACPI table if the
@@ -102,9 +103,9 @@
#define ACPI_MAX_SEMAPHORE_COUNT 256
-/* Max reference count (for debug only) */
+/* Maximum object reference count (detects object deletion issues) */
-#define ACPI_MAX_REFERENCE_COUNT 0x400
+#define ACPI_MAX_REFERENCE_COUNT 0x800
/* Size of cached memory mapping for system memory operation region */
@@ -171,12 +172,7 @@
/* Array sizes. Used for range checking also */
-#define ACPI_NUM_ACCESS_TYPES 6
-#define ACPI_NUM_UPDATE_RULES 3
-#define ACPI_NUM_LOCK_RULES 2
-#define ACPI_NUM_MATCH_OPS 6
-#define ACPI_NUM_OPCODES 256
-#define ACPI_NUM_FIELD_NAMES 2
+#define ACPI_MAX_MATCH_OPCODE 5
/* RSDP checksums */
@@ -187,10 +183,6 @@
#define ACPI_SMBUS_BUFFER_SIZE 34
-/* Number of strings associated with the _OSI reserved method */
-
-#define ACPI_NUM_OSI_STRINGS 10
-
/******************************************************************************
*
* ACPI AML Debugger
diff --git a/include/acpi/acdisasm.h b/include/acpi/acdisasm.h
index 11a8fe39cb04..9a7d6921f534 100644
--- a/include/acpi/acdisasm.h
+++ b/include/acpi/acdisasm.h
@@ -50,26 +50,72 @@
#define BLOCK_PAREN 1
#define BLOCK_BRACE 2
#define BLOCK_COMMA_LIST 4
+#define ACPI_DEFAULT_RESNAME *(u32 *) "__RD"
struct acpi_external_list {
char *path;
+ char *internal_path;
struct acpi_external_list *next;
+ u32 value;
+ u16 length;
+ u8 type;
};
extern struct acpi_external_list *acpi_gbl_external_list;
-/* Strings used for decoding flags to ASL keywords */
+typedef const struct acpi_dmtable_info {
+ u8 opcode;
+ u8 offset;
+ char *name;
+
+} acpi_dmtable_info;
+
+/*
+ * Values for Opcode above.
+ * Note: 0-7 must not change, used as a flag shift value
+ */
+#define ACPI_DMT_FLAG0 0
+#define ACPI_DMT_FLAG1 1
+#define ACPI_DMT_FLAG2 2
+#define ACPI_DMT_FLAG3 3
+#define ACPI_DMT_FLAG4 4
+#define ACPI_DMT_FLAG5 5
+#define ACPI_DMT_FLAG6 6
+#define ACPI_DMT_FLAG7 7
+#define ACPI_DMT_FLAGS0 8
+#define ACPI_DMT_FLAGS2 9
+#define ACPI_DMT_UINT8 10
+#define ACPI_DMT_UINT16 11
+#define ACPI_DMT_UINT24 12
+#define ACPI_DMT_UINT32 13
+#define ACPI_DMT_UINT56 14
+#define ACPI_DMT_UINT64 15
+#define ACPI_DMT_STRING 16
+#define ACPI_DMT_NAME4 17
+#define ACPI_DMT_NAME6 18
+#define ACPI_DMT_NAME8 19
+#define ACPI_DMT_CHKSUM 20
+#define ACPI_DMT_SPACEID 21
+#define ACPI_DMT_GAS 22
+#define ACPI_DMT_MADT 23
+#define ACPI_DMT_SRAT 24
+#define ACPI_DMT_EXIT 25
-extern const char *acpi_gbl_word_decode[4];
-extern const char *acpi_gbl_irq_decode[2];
-extern const char *acpi_gbl_lock_rule[ACPI_NUM_LOCK_RULES];
-extern const char *acpi_gbl_access_types[ACPI_NUM_ACCESS_TYPES];
-extern const char *acpi_gbl_update_rules[ACPI_NUM_UPDATE_RULES];
-extern const char *acpi_gbl_match_ops[ACPI_NUM_MATCH_OPS];
+typedef
+void (*ACPI_TABLE_HANDLER) (struct acpi_table_header * table);
+
+struct acpi_dmtable_data {
+ char *signature;
+ struct acpi_dmtable_info *table_info;
+ ACPI_TABLE_HANDLER table_handler;
+};
struct acpi_op_walk_info {
u32 level;
+ u32 last_level;
+ u32 count;
u32 bit_offset;
+ u32 flags;
struct acpi_walk_state *walk_state;
};
@@ -77,6 +123,100 @@ typedef
acpi_status(*asl_walk_callback) (union acpi_parse_object * op,
u32 level, void *context);
+struct acpi_resource_tag {
+ u32 bit_index;
+ char *tag;
+};
+
+/* Strings used for decoding flags to ASL keywords */
+
+extern const char *acpi_gbl_word_decode[];
+extern const char *acpi_gbl_irq_decode[];
+extern const char *acpi_gbl_lock_rule[];
+extern const char *acpi_gbl_access_types[];
+extern const char *acpi_gbl_update_rules[];
+extern const char *acpi_gbl_match_ops[];
+
+extern struct acpi_dmtable_info acpi_dm_table_info_asf0[];
+extern struct acpi_dmtable_info acpi_dm_table_info_asf1[];
+extern struct acpi_dmtable_info acpi_dm_table_info_asf2[];
+extern struct acpi_dmtable_info acpi_dm_table_info_asf3[];
+extern struct acpi_dmtable_info acpi_dm_table_info_asf4[];
+extern struct acpi_dmtable_info acpi_dm_table_info_asf_hdr[];
+extern struct acpi_dmtable_info acpi_dm_table_info_boot[];
+extern struct acpi_dmtable_info acpi_dm_table_info_cpep[];
+extern struct acpi_dmtable_info acpi_dm_table_info_cpep0[];
+extern struct acpi_dmtable_info acpi_dm_table_info_dbgp[];
+extern struct acpi_dmtable_info acpi_dm_table_info_ecdt[];
+extern struct acpi_dmtable_info acpi_dm_table_info_facs[];
+extern struct acpi_dmtable_info acpi_dm_table_info_fadt1[];
+extern struct acpi_dmtable_info acpi_dm_table_info_fadt2[];
+extern struct acpi_dmtable_info acpi_dm_table_info_gas[];
+extern struct acpi_dmtable_info acpi_dm_table_info_header[];
+extern struct acpi_dmtable_info acpi_dm_table_info_hpet[];
+extern struct acpi_dmtable_info acpi_dm_table_info_madt[];
+extern struct acpi_dmtable_info acpi_dm_table_info_madt0[];
+extern struct acpi_dmtable_info acpi_dm_table_info_madt1[];
+extern struct acpi_dmtable_info acpi_dm_table_info_madt2[];
+extern struct acpi_dmtable_info acpi_dm_table_info_madt3[];
+extern struct acpi_dmtable_info acpi_dm_table_info_madt4[];
+extern struct acpi_dmtable_info acpi_dm_table_info_madt5[];
+extern struct acpi_dmtable_info acpi_dm_table_info_madt6[];
+extern struct acpi_dmtable_info acpi_dm_table_info_madt7[];
+extern struct acpi_dmtable_info acpi_dm_table_info_madt8[];
+extern struct acpi_dmtable_info acpi_dm_table_info_madt_hdr[];
+extern struct acpi_dmtable_info acpi_dm_table_info_mcfg[];
+extern struct acpi_dmtable_info acpi_dm_table_info_mcfg0[];
+extern struct acpi_dmtable_info acpi_dm_table_info_rsdp1[];
+extern struct acpi_dmtable_info acpi_dm_table_info_rsdp2[];
+extern struct acpi_dmtable_info acpi_dm_table_info_sbst[];
+extern struct acpi_dmtable_info acpi_dm_table_info_slit[];
+extern struct acpi_dmtable_info acpi_dm_table_info_spcr[];
+extern struct acpi_dmtable_info acpi_dm_table_info_spmi[];
+extern struct acpi_dmtable_info acpi_dm_table_info_srat[];
+extern struct acpi_dmtable_info acpi_dm_table_info_srat0[];
+extern struct acpi_dmtable_info acpi_dm_table_info_srat1[];
+extern struct acpi_dmtable_info acpi_dm_table_info_tcpa[];
+extern struct acpi_dmtable_info acpi_dm_table_info_wdrt[];
+
+/*
+ * dmtable
+ */
+void acpi_dm_dump_data_table(struct acpi_table_header *table);
+
+void
+acpi_dm_dump_table(u32 table_length,
+ u32 table_offset,
+ void *table,
+ u32 sub_table_length, struct acpi_dmtable_info *info);
+
+void acpi_dm_line_header(u32 offset, u32 byte_length, char *name);
+
+void acpi_dm_line_header2(u32 offset, u32 byte_length, char *name, u32 value);
+
+/*
+ * dmtbdump
+ */
+void acpi_dm_dump_asf(struct acpi_table_header *table);
+
+void acpi_dm_dump_cpep(struct acpi_table_header *table);
+
+void acpi_dm_dump_fadt(struct acpi_table_header *table);
+
+void acpi_dm_dump_srat(struct acpi_table_header *table);
+
+void acpi_dm_dump_mcfg(struct acpi_table_header *table);
+
+void acpi_dm_dump_madt(struct acpi_table_header *table);
+
+u32 acpi_dm_dump_rsdp(struct acpi_table_header *table);
+
+void acpi_dm_dump_rsdt(struct acpi_table_header *table);
+
+void acpi_dm_dump_slit(struct acpi_table_header *table);
+
+void acpi_dm_dump_xsdt(struct acpi_table_header *table);
+
/*
* dmwalk
*/
@@ -84,6 +224,11 @@ void
acpi_dm_disassemble(struct acpi_walk_state *walk_state,
union acpi_parse_object *origin, u32 num_opcodes);
+void
+acpi_dm_walk_parse_tree(union acpi_parse_object *op,
+ asl_walk_callback descending_callback,
+ asl_walk_callback ascending_callback, void *context);
+
/*
* dmopcode
*/
@@ -166,6 +311,7 @@ void acpi_dm_dump_integer64(u64 value, char *name);
void
acpi_dm_resource_template(struct acpi_op_walk_info *info,
+ union acpi_parse_object *op,
u8 * byte_data, u32 byte_count);
u8 acpi_dm_is_resource_template(union acpi_parse_object *op);
@@ -176,6 +322,8 @@ void acpi_dm_bit_list(u16 mask);
void acpi_dm_decode_attribute(u8 attribute);
+void acpi_dm_descriptor_name(void);
+
/*
* dmresrcl
*/
@@ -248,6 +396,15 @@ acpi_dm_vendor_small_descriptor(union aml_resource *resource,
/*
* dmutils
*/
-void acpi_dm_add_to_external_list(char *path);
+void acpi_dm_add_to_external_list(char *path, u8 type, u32 value);
+
+/*
+ * dmrestag
+ */
+void acpi_dm_find_resources(union acpi_parse_object *root);
+
+void
+acpi_dm_check_resource_reference(union acpi_parse_object *op,
+ struct acpi_walk_state *walk_state);
#endif /* __ACDISASM_H__ */
diff --git a/include/acpi/acdispat.h b/include/acpi/acdispat.h
index c41a926ff317..288f84903af7 100644
--- a/include/acpi/acdispat.h
+++ b/include/acpi/acdispat.h
@@ -194,7 +194,9 @@ acpi_status
acpi_ds_restart_control_method(struct acpi_walk_state *walk_state,
union acpi_operand_object *return_desc);
-void acpi_ds_terminate_control_method(struct acpi_walk_state *walk_state);
+void
+acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
+ struct acpi_walk_state *walk_state);
acpi_status
acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node,
@@ -302,7 +304,7 @@ acpi_ds_init_aml_walk(struct acpi_walk_state *walk_state,
struct acpi_namespace_node *method_node,
u8 * aml_start,
u32 aml_length,
- struct acpi_parameter_info *info, u8 pass_number);
+ struct acpi_evaluate_info *info, u8 pass_number);
acpi_status
acpi_ds_obj_stack_pop_and_delete(u32 pop_count,
diff --git a/include/acpi/acevents.h b/include/acpi/acevents.h
index f2717be4fe0d..234142828e1a 100644
--- a/include/acpi/acevents.h
+++ b/include/acpi/acevents.h
@@ -93,7 +93,7 @@ struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device,
*/
u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info);
-acpi_status acpi_ev_walk_gpe_list(ACPI_GPE_CALLBACK gpe_walk_callback);
+acpi_status acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback);
acpi_status
acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
@@ -138,7 +138,7 @@ acpi_status
acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
u32 function,
acpi_physical_address address,
- u32 bit_width, void *value);
+ u32 bit_width, acpi_integer * value);
acpi_status
acpi_ev_attach_region(union acpi_operand_object *handler_obj,
diff --git a/include/acpi/acexcep.h b/include/acpi/acexcep.h
index dc768aa580e4..797ca1ea5214 100644
--- a/include/acpi/acexcep.h
+++ b/include/acpi/acexcep.h
@@ -160,8 +160,9 @@
#define AE_AML_BAD_RESOURCE_VALUE (acpi_status) (0x001F | AE_CODE_AML)
#define AE_AML_CIRCULAR_REFERENCE (acpi_status) (0x0020 | AE_CODE_AML)
#define AE_AML_BAD_RESOURCE_LENGTH (acpi_status) (0x0021 | AE_CODE_AML)
+#define AE_AML_ILLEGAL_ADDRESS (acpi_status) (0x0022 | AE_CODE_AML)
-#define AE_CODE_AML_MAX 0x0021
+#define AE_CODE_AML_MAX 0x0022
/*
* Internal exceptions used for control
@@ -275,7 +276,8 @@ char const *acpi_gbl_exception_names_aml[] = {
"AE_AML_NO_RESOURCE_END_TAG",
"AE_AML_BAD_RESOURCE_VALUE",
"AE_AML_CIRCULAR_REFERENCE",
- "AE_AML_BAD_RESOURCE_LENGTH"
+ "AE_AML_BAD_RESOURCE_LENGTH",
+ "AE_AML_ILLEGAL_ADDRESS"
};
char const *acpi_gbl_exception_names_ctrl[] = {
diff --git a/include/acpi/acglobal.h b/include/acpi/acglobal.h
index 734cc77bf2c7..14531d48f6b6 100644
--- a/include/acpi/acglobal.h
+++ b/include/acpi/acglobal.h
@@ -107,6 +107,7 @@ ACPI_EXTERN u32 acpi_gbl_trace_flags;
* 3) Allow access to uninitialized locals/args (auto-init to integer 0)
* 4) Allow ANY object type to be a source operand for the Store() operator
* 5) Allow unresolved references (invalid target name) in package objects
+ * 6) Enable warning messages for behavior that is not ACPI spec compliant
*/
ACPI_EXTERN u8 ACPI_INIT_GLOBAL(acpi_gbl_enable_interpreter_slack, FALSE);
@@ -114,7 +115,7 @@ ACPI_EXTERN u8 ACPI_INIT_GLOBAL(acpi_gbl_enable_interpreter_slack, FALSE);
* Automatically serialize ALL control methods? Default is FALSE, meaning
* to use the Serialized/not_serialized method flags on a per method basis.
* Only change this if the ASL code is poorly written and cannot handle
- * reentrancy even though methods are marked "not_serialized".
+ * reentrancy even though methods are marked "NotSerialized".
*/
ACPI_EXTERN u8 ACPI_INIT_GLOBAL(acpi_gbl_all_methods_serialized, FALSE);
@@ -149,10 +150,10 @@ ACPI_EXTERN u8 ACPI_INIT_GLOBAL(acpi_gbl_leave_wake_gpes_disabled, TRUE);
ACPI_EXTERN u32 acpi_gbl_table_flags;
ACPI_EXTERN u32 acpi_gbl_rsdt_table_count;
ACPI_EXTERN struct rsdp_descriptor *acpi_gbl_RSDP;
-ACPI_EXTERN XSDT_DESCRIPTOR *acpi_gbl_XSDT;
-ACPI_EXTERN FADT_DESCRIPTOR *acpi_gbl_FADT;
+ACPI_EXTERN struct xsdt_descriptor *acpi_gbl_XSDT;
+ACPI_EXTERN struct fadt_descriptor *acpi_gbl_FADT;
ACPI_EXTERN struct acpi_table_header *acpi_gbl_DSDT;
-ACPI_EXTERN FACS_DESCRIPTOR *acpi_gbl_FACS;
+ACPI_EXTERN struct facs_descriptor *acpi_gbl_FACS;
ACPI_EXTERN struct acpi_common_facs acpi_gbl_common_fACS;
/*
* Since there may be multiple SSDTs and PSDTs, a single pointer is not
@@ -177,15 +178,15 @@ ACPI_EXTERN u8 acpi_gbl_integer_nybble_width;
/*
* ACPI Table info arrays
*/
-extern struct acpi_table_list acpi_gbl_table_lists[NUM_ACPI_TABLE_TYPES];
-extern struct acpi_table_support acpi_gbl_table_data[NUM_ACPI_TABLE_TYPES];
+extern struct acpi_table_list acpi_gbl_table_lists[ACPI_TABLE_ID_MAX + 1];
+extern struct acpi_table_support acpi_gbl_table_data[ACPI_TABLE_ID_MAX + 1];
/*
* Predefined mutex objects. This array contains the
* actual OS mutex handles, indexed by the local ACPI_MUTEX_HANDLEs.
* (The table maps local handles to the real OS handles)
*/
-ACPI_EXTERN struct acpi_mutex_info acpi_gbl_mutex_info[NUM_MUTEX];
+ACPI_EXTERN struct acpi_mutex_info acpi_gbl_mutex_info[ACPI_NUM_MUTEX];
/*****************************************************************************
*
@@ -203,6 +204,7 @@ ACPI_EXTERN struct acpi_memory_list *acpi_gbl_ns_node_list;
/* Object caches */
+ACPI_EXTERN acpi_cache_t *acpi_gbl_namespace_cache;
ACPI_EXTERN acpi_cache_t *acpi_gbl_state_cache;
ACPI_EXTERN acpi_cache_t *acpi_gbl_ps_node_cache;
ACPI_EXTERN acpi_cache_t *acpi_gbl_ps_node_ext_cache;
@@ -244,7 +246,6 @@ extern const char *acpi_gbl_sleep_state_names[ACPI_S_STATE_COUNT];
extern const char *acpi_gbl_highest_dstate_names[4];
extern const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES];
extern const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS];
-extern const char *acpi_gbl_valid_osi_strings[ACPI_NUM_OSI_STRINGS];
/*****************************************************************************
*
@@ -291,14 +292,6 @@ ACPI_EXTERN u8 acpi_gbl_cm_single_step;
/*****************************************************************************
*
- * Parser globals
- *
- ****************************************************************************/
-
-ACPI_EXTERN union acpi_parse_object *acpi_gbl_parsed_namespace_root;
-
-/*****************************************************************************
- *
* Hardware globals
*
****************************************************************************/
@@ -321,7 +314,11 @@ ACPI_EXTERN struct acpi_fixed_event_handler
ACPI_EXTERN struct acpi_gpe_xrupt_info *acpi_gbl_gpe_xrupt_list_head;
ACPI_EXTERN struct acpi_gpe_block_info
*acpi_gbl_gpe_fadt_blocks[ACPI_MAX_GPE_BLOCKS];
+
+/* Spinlocks */
+
ACPI_EXTERN acpi_handle acpi_gbl_gpe_lock;
+ACPI_EXTERN acpi_handle acpi_gbl_hardware_lock;
/*****************************************************************************
*
diff --git a/include/acpi/aclocal.h b/include/acpi/aclocal.h
index 8361820d2970..1eeca7adca95 100644
--- a/include/acpi/aclocal.h
+++ b/include/acpi/aclocal.h
@@ -44,7 +44,10 @@
#ifndef __ACLOCAL_H__
#define __ACLOCAL_H__
+/* acpisrc:struct_defs -- for acpisrc conversion */
+
#define ACPI_WAIT_FOREVER 0xFFFF /* u16, as per ACPI spec */
+#define ACPI_INFINITE_CONCURRENCY 0xFF
typedef void *acpi_mutex;
typedef u32 acpi_mutex_handle;
@@ -69,52 +72,55 @@ union acpi_parse_object;
* Predefined handles for the mutex objects used within the subsystem
* All mutex objects are automatically created by acpi_ut_mutex_initialize.
*
- * The acquire/release ordering protocol is implied via this list. Mutexes
+ * The acquire/release ordering protocol is implied via this list. Mutexes
* with a lower value must be acquired before mutexes with a higher value.
*
- * NOTE: any changes here must be reflected in the acpi_gbl_mutex_names table also!
+ * NOTE: any changes here must be reflected in the acpi_gbl_mutex_names
+ * table below also!
*/
-#define ACPI_MTX_EXECUTE 0
-#define ACPI_MTX_INTERPRETER 1
-#define ACPI_MTX_PARSER 2
-#define ACPI_MTX_DISPATCHER 3
-#define ACPI_MTX_TABLES 4
-#define ACPI_MTX_OP_REGIONS 5
-#define ACPI_MTX_NAMESPACE 6
-#define ACPI_MTX_EVENTS 7
-#define ACPI_MTX_HARDWARE 8
-#define ACPI_MTX_CACHES 9
-#define ACPI_MTX_MEMORY 10
-#define ACPI_MTX_DEBUG_CMD_COMPLETE 11
-#define ACPI_MTX_DEBUG_CMD_READY 12
-
-#define MAX_MUTEX 12
-#define NUM_MUTEX MAX_MUTEX+1
+#define ACPI_MTX_INTERPRETER 0 /* AML Interpreter, main lock */
+#define ACPI_MTX_CONTROL_METHOD 1 /* Control method termination [TBD: may no longer be necessary] */
+#define ACPI_MTX_TABLES 2 /* Data for ACPI tables */
+#define ACPI_MTX_NAMESPACE 3 /* ACPI Namespace */
+#define ACPI_MTX_EVENTS 4 /* Data for ACPI events */
+#define ACPI_MTX_CACHES 5 /* Internal caches, general purposes */
+#define ACPI_MTX_MEMORY 6 /* Debug memory tracking lists */
+#define ACPI_MTX_DEBUG_CMD_COMPLETE 7 /* AML debugger */
+#define ACPI_MTX_DEBUG_CMD_READY 8 /* AML debugger */
+
+#define ACPI_MAX_MUTEX 8
+#define ACPI_NUM_MUTEX ACPI_MAX_MUTEX+1
#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
#ifdef DEFINE_ACPI_GLOBALS
-/* Names for the mutexes used in the subsystem */
+/* Debug names for the mutexes above */
-static char *acpi_gbl_mutex_names[] = {
- "ACPI_MTX_Execute",
+static char *acpi_gbl_mutex_names[ACPI_NUM_MUTEX] = {
"ACPI_MTX_Interpreter",
- "ACPI_MTX_Parser",
- "ACPI_MTX_Dispatcher",
+ "ACPI_MTX_Method",
"ACPI_MTX_Tables",
- "ACPI_MTX_op_regions",
"ACPI_MTX_Namespace",
"ACPI_MTX_Events",
- "ACPI_MTX_Hardware",
"ACPI_MTX_Caches",
"ACPI_MTX_Memory",
- "ACPI_MTX_debug_cmd_complete",
- "ACPI_MTX_debug_cmd_ready",
+ "ACPI_MTX_DebugCmdComplete",
+ "ACPI_MTX_DebugCmdReady"
};
#endif
#endif
+/*
+ * Predefined handles for spinlocks used within the subsystem.
+ * These spinlocks are created by acpi_ut_mutex_initialize
+ */
+#define ACPI_LOCK_GPES 0
+#define ACPI_LOCK_HARDWARE 1
+
+#define ACPI_MAX_LOCK 1
+#define ACPI_NUM_LOCK ACPI_MAX_LOCK+1
+
/* Owner IDs are used to track namespace nodes for selective deletion */
typedef u8 acpi_owner_id;
@@ -129,7 +135,7 @@ typedef u8 acpi_owner_id;
struct acpi_mutex_info {
acpi_mutex mutex;
u32 use_count;
- u32 thread_id;
+ acpi_thread_id thread_id;
};
/* Lock flag parameter for various interfaces */
@@ -144,6 +150,8 @@ struct acpi_mutex_info {
#define ACPI_FIELD_DWORD_GRANULARITY 4
#define ACPI_FIELD_QWORD_GRANULARITY 8
+#define ACPI_ENTRY_NOT_FOUND NULL
+
/*****************************************************************************
*
* Namespace typedefs and structs
@@ -158,49 +166,55 @@ typedef enum {
ACPI_IMODE_EXECUTE = 0x0E
} acpi_interpreter_mode;
-/*
- * The Node describes a named object that appears in the AML
- * An acpi_node is used to store Nodes.
- *
- * data_type is used to differentiate between internal descriptors, and MUST
- * be the first byte in this structure.
- */
union acpi_name_union {
u32 integer;
char ascii[4];
};
+/*
+ * The Namespace Node describes a named object that appears in the AML.
+ * descriptor_type is used to differentiate between internal descriptors.
+ *
+ * The node is optimized for both 32-bit and 64-bit platforms:
+ * 20 bytes for the 32-bit case, 32 bytes for the 64-bit case.
+ *
+ * Note: The descriptor_type and Type fields must appear in the identical
+ * position in both the struct acpi_namespace_node and union acpi_operand_object
+ * structures.
+ */
struct acpi_namespace_node {
- u8 descriptor; /* Used to differentiate object descriptor types */
- u8 type; /* Type associated with this name */
- u16 reference_count; /* Current count of references and children */
+ union acpi_operand_object *object; /* Interpreter object */
+ u8 descriptor_type; /* Differentiate object descriptor types */
+ u8 type; /* ACPI Type associated with this name */
+ u8 flags; /* Miscellaneous flags */
+ acpi_owner_id owner_id; /* Node creator */
union acpi_name_union name; /* ACPI Name, always 4 chars per ACPI spec */
- union acpi_operand_object *object; /* Pointer to attached ACPI object (optional) */
struct acpi_namespace_node *child; /* First child */
- struct acpi_namespace_node *peer; /* Next peer */
- u8 owner_id; /* Who created this node */
- u8 flags;
-
- /* Fields used by the ASL compiler only */
+ struct acpi_namespace_node *peer; /* Peer. Parent if ANOBJ_END_OF_PEER_LIST set */
-#ifdef ACPI_ASL_COMPILER
- u32 value;
+ /*
+ * The following fields are used by the ASL compiler and disassembler only
+ */
+#ifdef ACPI_LARGE_NAMESPACE_NODE
union acpi_parse_object *op;
+ u32 value;
+ u32 length;
#endif
};
-#define ACPI_ENTRY_NOT_FOUND NULL
+/* Namespace Node flags */
-/* Node flags */
+#define ANOBJ_END_OF_PEER_LIST 0x01 /* End-of-list, Peer field points to parent */
+#define ANOBJ_DATA_WIDTH_32 0x02 /* Parent table uses 32-bit math */
+#define ANOBJ_METHOD_ARG 0x04 /* Node is a method argument */
+#define ANOBJ_METHOD_LOCAL 0x08 /* Node is a method local */
+#define ANOBJ_SUBTREE_HAS_INI 0x10 /* Used to optimize device initialization */
-#define ANOBJ_RESERVED 0x01
-#define ANOBJ_END_OF_PEER_LIST 0x02
-#define ANOBJ_DATA_WIDTH_32 0x04 /* Parent table is 64-bits */
-#define ANOBJ_METHOD_ARG 0x08
-#define ANOBJ_METHOD_LOCAL 0x10
-#define ANOBJ_METHOD_NO_RETVAL 0x20
-#define ANOBJ_METHOD_SOME_NO_RETVAL 0x40
-#define ANOBJ_IS_BIT_OFFSET 0x80
+#define ANOBJ_IS_EXTERNAL 0x08 /* i_aSL only: This object created via External() */
+#define ANOBJ_METHOD_NO_RETVAL 0x10 /* i_aSL only: Method has no return value */
+#define ANOBJ_METHOD_SOME_NO_RETVAL 0x20 /* i_aSL only: Method has at least one return value */
+#define ANOBJ_IS_BIT_OFFSET 0x40 /* i_aSL only: Reference is a bit offset */
+#define ANOBJ_IS_REFERENCED 0x80 /* i_aSL only: Object was referenced */
/*
* ACPI Table Descriptor. One per ACPI table
@@ -212,8 +226,8 @@ struct acpi_table_desc {
struct acpi_table_header *pointer;
u8 *aml_start;
u64 physical_address;
- u32 aml_length;
acpi_size length;
+ u32 aml_length;
acpi_owner_id owner_id;
u8 type;
u8 allocation;
@@ -276,6 +290,9 @@ struct acpi_create_field_info {
u8 field_type;
};
+typedef
+acpi_status(*ACPI_INTERNAL_METHOD) (struct acpi_walk_state * walk_state);
+
/*
* Bitmapped ACPI types. Used internally only
*/
@@ -377,7 +394,7 @@ struct acpi_gpe_walk_info {
struct acpi_gpe_block_info *gpe_block;
};
-typedef acpi_status(*ACPI_GPE_CALLBACK) (struct acpi_gpe_xrupt_info *
+typedef acpi_status(*acpi_gpe_callback) (struct acpi_gpe_xrupt_info *
gpe_xrupt_info,
struct acpi_gpe_block_info *
gpe_block);
@@ -416,13 +433,14 @@ struct acpi_field_info {
#define ACPI_CONTROL_PREDICATE_FALSE 0xC3
#define ACPI_CONTROL_PREDICATE_TRUE 0xC4
-#define ACPI_STATE_COMMON /* Two 32-bit fields and a pointer */\
- u8 data_type; /* To differentiate various internal objs */\
- u8 flags; \
- u16 value; \
- u16 state; \
- u16 reserved; \
- void *next;
+#define ACPI_STATE_COMMON \
+ void *next; \
+ u8 descriptor_type; /* To differentiate various internal objs */\
+ u8 flags; \
+ u16 value; \
+ u16 state;
+
+ /* There are 2 bytes available here until the next natural alignment boundary */
struct acpi_common_state {
ACPI_STATE_COMMON};
@@ -438,12 +456,12 @@ struct acpi_update_state {
* Pkg state - used to traverse nested package structures
*/
struct acpi_pkg_state {
- ACPI_STATE_COMMON union acpi_operand_object *source_object;
+ ACPI_STATE_COMMON u16 index;
+ union acpi_operand_object *source_object;
union acpi_operand_object *dest_object;
struct acpi_walk_state *walk_state;
void *this_target_obj;
u32 num_packages;
- u16 index;
};
/*
@@ -451,10 +469,10 @@ struct acpi_pkg_state {
* Allows nesting of these constructs
*/
struct acpi_control_state {
- ACPI_STATE_COMMON union acpi_parse_object *predicate_op;
+ ACPI_STATE_COMMON u16 opcode;
+ union acpi_parse_object *predicate_op;
u8 *aml_predicate_start; /* Start of if/while predicate */
u8 *package_end; /* End of if/while block */
- u16 opcode;
};
/*
@@ -465,11 +483,11 @@ struct acpi_scope_state {
};
struct acpi_pscope_state {
- ACPI_STATE_COMMON union acpi_parse_object *op; /* Current op being parsed */
+ ACPI_STATE_COMMON u32 arg_count; /* Number of fixed arguments */
+ union acpi_parse_object *op; /* Current op being parsed */
u8 *arg_end; /* Current argument end */
u8 *pkg_end; /* Current package end */
u32 arg_list; /* Next argument to parse */
- u32 arg_count; /* Number of fixed arguments */
};
/*
@@ -477,10 +495,10 @@ struct acpi_pscope_state {
* states are created when there are nested control methods executing.
*/
struct acpi_thread_state {
- ACPI_STATE_COMMON struct acpi_walk_state *walk_state_list; /* Head of list of walk_states for this thread */
+ ACPI_STATE_COMMON u8 current_sync_level; /* Mutex Sync (nested acquire) level */
+ struct acpi_walk_state *walk_state_list; /* Head of list of walk_states for this thread */
union acpi_operand_object *acquired_mutex_list; /* List of all currently acquired mutexes */
- u32 thread_id; /* Running thread ID */
- u8 current_sync_level; /* Mutex Sync (nested acquire) level */
+ acpi_thread_id thread_id; /* Running thread ID */
};
/*
@@ -488,10 +506,9 @@ struct acpi_thread_state {
* AML arguments
*/
struct acpi_result_values {
- ACPI_STATE_COMMON
- union acpi_operand_object *obj_desc[ACPI_OBJ_NUM_OPERANDS];
- u8 num_results;
+ ACPI_STATE_COMMON u8 num_results;
u8 last_insert;
+ union acpi_operand_object *obj_desc[ACPI_OBJ_NUM_OPERANDS];
};
typedef
@@ -546,7 +563,7 @@ struct acpi_opcode_info {
#endif
u32 parse_args; /* Grammar/Parse time arguments */
u32 runtime_args; /* Interpret time arguments */
- u32 flags; /* Misc flags */
+ u16 flags; /* Misc flags */
u8 object_type; /* Corresponding internal object type */
u8 class; /* Opcode class */
u8 type; /* Opcode type */
@@ -563,29 +580,31 @@ union acpi_parse_value {
};
#define ACPI_PARSE_COMMON \
- u8 data_type; /* To differentiate various internal objs */\
- u8 flags; /* Type of Op */\
- u16 aml_opcode; /* AML opcode */\
- u32 aml_offset; /* Offset of declaration in AML */\
- union acpi_parse_object *parent; /* Parent op */\
- union acpi_parse_object *next; /* Next op */\
+ union acpi_parse_object *parent; /* Parent op */\
+ u8 descriptor_type; /* To differentiate various internal objs */\
+ u8 flags; /* Type of Op */\
+ u16 aml_opcode; /* AML opcode */\
+ u32 aml_offset; /* Offset of declaration in AML */\
+ union acpi_parse_object *next; /* Next op */\
+ struct acpi_namespace_node *node; /* For use by interpreter */\
+ union acpi_parse_value value; /* Value or args associated with the opcode */\
ACPI_DISASM_ONLY_MEMBERS (\
- u8 disasm_flags; /* Used during AML disassembly */\
- u8 disasm_opcode; /* Subtype used for disassembly */\
- char aml_op_name[16]) /* Op name (debug only) */\
- /* NON-DEBUG members below: */\
- struct acpi_namespace_node *node; /* For use by interpreter */\
- union acpi_parse_value value; /* Value or args associated with the opcode */
-
-#define ACPI_DASM_BUFFER 0x00
-#define ACPI_DASM_RESOURCE 0x01
-#define ACPI_DASM_STRING 0x02
-#define ACPI_DASM_UNICODE 0x03
-#define ACPI_DASM_EISAID 0x04
-#define ACPI_DASM_MATCHOP 0x05
+ u8 disasm_flags; /* Used during AML disassembly */\
+ u8 disasm_opcode; /* Subtype used for disassembly */\
+ char aml_op_name[16]) /* Op name (debug only) */
+
+#define ACPI_DASM_BUFFER 0x00
+#define ACPI_DASM_RESOURCE 0x01
+#define ACPI_DASM_STRING 0x02
+#define ACPI_DASM_UNICODE 0x03
+#define ACPI_DASM_EISAID 0x04
+#define ACPI_DASM_MATCHOP 0x05
+#define ACPI_DASM_LNOT_PREFIX 0x06
+#define ACPI_DASM_LNOT_SUFFIX 0x07
+#define ACPI_DASM_IGNORE 0x08
/*
- * generic operation (for example: If, While, Store)
+ * Generic operation (for example: If, While, Store)
*/
struct acpi_parse_obj_common {
ACPI_PARSE_COMMON};
@@ -601,7 +620,7 @@ struct acpi_parse_obj_named {
u32 name; /* 4-byte name or zero if no name */
};
-/* The parse node is the fundamental element of the parse tree */
+/* This version is used by the i_aSL compiler only */
#define ACPI_MAX_PARSEOP_NAME 20
@@ -643,7 +662,6 @@ union acpi_parse_object {
* method.
*/
struct acpi_parse_state {
- u32 aml_size;
u8 *aml_start; /* First AML byte */
u8 *aml; /* Next AML byte */
u8 *aml_end; /* (last + 1) AML byte */
@@ -653,22 +671,23 @@ struct acpi_parse_state {
struct acpi_namespace_node *start_node;
union acpi_generic_state *scope; /* Current scope */
union acpi_parse_object *start_scope;
+ u32 aml_size;
};
/* Parse object flags */
-#define ACPI_PARSEOP_GENERIC 0x01
-#define ACPI_PARSEOP_NAMED 0x02
-#define ACPI_PARSEOP_DEFERRED 0x04
-#define ACPI_PARSEOP_BYTELIST 0x08
-#define ACPI_PARSEOP_IN_CACHE 0x80
+#define ACPI_PARSEOP_GENERIC 0x01
+#define ACPI_PARSEOP_NAMED 0x02
+#define ACPI_PARSEOP_DEFERRED 0x04
+#define ACPI_PARSEOP_BYTELIST 0x08
+#define ACPI_PARSEOP_IN_CACHE 0x80
/* Parse object disasm_flags */
-#define ACPI_PARSEOP_IGNORE 0x01
-#define ACPI_PARSEOP_PARAMLIST 0x02
-#define ACPI_PARSEOP_EMPTY_TERMLIST 0x04
-#define ACPI_PARSEOP_SPECIAL 0x10
+#define ACPI_PARSEOP_IGNORE 0x01
+#define ACPI_PARSEOP_PARAMLIST 0x02
+#define ACPI_PARSEOP_EMPTY_TERMLIST 0x04
+#define ACPI_PARSEOP_SPECIAL 0x10
/*****************************************************************************
*
@@ -676,8 +695,8 @@ struct acpi_parse_state {
*
****************************************************************************/
-#define PCI_ROOT_HID_STRING "PNP0A03"
-#define PCI_EXPRESS_ROOT_HID_STRING "PNP0A08"
+#define PCI_ROOT_HID_STRING "PNP0A03"
+#define PCI_EXPRESS_ROOT_HID_STRING "PNP0A08"
struct acpi_bit_register_info {
u8 parent_register;
@@ -710,13 +729,14 @@ struct acpi_bit_register_info {
#define ACPI_BITMASK_PCIEXP_WAKE_STATUS 0x4000 /* ACPI 3.0 */
#define ACPI_BITMASK_WAKE_STATUS 0x8000
-#define ACPI_BITMASK_ALL_FIXED_STATUS (ACPI_BITMASK_TIMER_STATUS | \
- ACPI_BITMASK_BUS_MASTER_STATUS | \
- ACPI_BITMASK_GLOBAL_LOCK_STATUS | \
- ACPI_BITMASK_POWER_BUTTON_STATUS | \
- ACPI_BITMASK_SLEEP_BUTTON_STATUS | \
- ACPI_BITMASK_RT_CLOCK_STATUS | \
- ACPI_BITMASK_WAKE_STATUS)
+#define ACPI_BITMASK_ALL_FIXED_STATUS (\
+ ACPI_BITMASK_TIMER_STATUS | \
+ ACPI_BITMASK_BUS_MASTER_STATUS | \
+ ACPI_BITMASK_GLOBAL_LOCK_STATUS | \
+ ACPI_BITMASK_POWER_BUTTON_STATUS | \
+ ACPI_BITMASK_SLEEP_BUTTON_STATUS | \
+ ACPI_BITMASK_RT_CLOCK_STATUS | \
+ ACPI_BITMASK_WAKE_STATUS)
#define ACPI_BITMASK_TIMER_ENABLE 0x0001
#define ACPI_BITMASK_GLOBAL_LOCK_ENABLE 0x0020
@@ -820,7 +840,7 @@ struct acpi_bit_register_info {
*
****************************************************************************/
-#define ACPI_ASCII_ZERO 0x30
+#define ACPI_ASCII_ZERO 0x30
/*****************************************************************************
*
@@ -842,9 +862,9 @@ struct acpi_integrity_info {
u32 objects;
};
-#define ACPI_DB_REDIRECTABLE_OUTPUT 0x01
-#define ACPI_DB_CONSOLE_OUTPUT 0x02
-#define ACPI_DB_DUPLICATE_OUTPUT 0x03
+#define ACPI_DB_REDIRECTABLE_OUTPUT 0x01
+#define ACPI_DB_CONSOLE_OUTPUT 0x02
+#define ACPI_DB_DUPLICATE_OUTPUT 0x03
/*****************************************************************************
*
@@ -854,18 +874,18 @@ struct acpi_integrity_info {
/* Entry for a memory allocation (debug only) */
-#define ACPI_MEM_MALLOC 0
-#define ACPI_MEM_CALLOC 1
-#define ACPI_MAX_MODULE_NAME 16
+#define ACPI_MEM_MALLOC 0
+#define ACPI_MEM_CALLOC 1
+#define ACPI_MAX_MODULE_NAME 16
#define ACPI_COMMON_DEBUG_MEM_HEADER \
- struct acpi_debug_mem_block *previous; \
- struct acpi_debug_mem_block *next; \
- u32 size; \
- u32 component; \
- u32 line; \
- char module[ACPI_MAX_MODULE_NAME]; \
- u8 alloc_type;
+ struct acpi_debug_mem_block *previous; \
+ struct acpi_debug_mem_block *next; \
+ u32 size; \
+ u32 component; \
+ u32 line; \
+ char module[ACPI_MAX_MODULE_NAME]; \
+ u8 alloc_type;
struct acpi_debug_mem_header {
ACPI_COMMON_DEBUG_MEM_HEADER};
diff --git a/include/acpi/acmacros.h b/include/acpi/acmacros.h
index f2be2a881730..38f9aa4bef00 100644
--- a/include/acpi/acmacros.h
+++ b/include/acpi/acmacros.h
@@ -56,6 +56,10 @@
#define ACPI_CLEAR_BIT(target,bit) ((target) &= ~(bit))
#define ACPI_MIN(a,b) (((a)<(b))?(a):(b))
+/* Size calculation */
+
+#define ACPI_ARRAY_LENGTH(x) (sizeof(x) / sizeof((x)[0]))
+
#if ACPI_MACHINE_WIDTH == 16
/*
@@ -99,7 +103,7 @@
* printf() format helpers
*/
-/* Split 64-bit integer into two 32-bit values. Use with %8.8X%8.8X */
+/* Split 64-bit integer into two 32-bit values. Use with %8.8_x%8.8_x */
#define ACPI_FORMAT_UINT64(i) ACPI_HIDWORD(i),ACPI_LODWORD(i)
@@ -130,7 +134,6 @@
#define ACPI_TO_POINTER(i) ACPI_ADD_PTR (void,(void *) NULL,(acpi_native_uint) i)
#define ACPI_TO_INTEGER(p) ACPI_PTR_DIFF (p,(void *) NULL)
#define ACPI_OFFSET(d,f) (acpi_size) ACPI_PTR_DIFF (&(((d *)0)->f),(void *) NULL)
-#define ACPI_FADT_OFFSET(f) ACPI_OFFSET (FADT_DESCRIPTOR, f)
#if ACPI_MACHINE_WIDTH == 16
#define ACPI_STORE_POINTER(d,s) ACPI_MOVE_32_TO_32(d,s)
@@ -141,6 +144,12 @@
#define ACPI_PTR_TO_PHYSADDR(i) ACPI_TO_INTEGER(i)
#endif
+#ifndef ACPI_MISALIGNMENT_NOT_SUPPORTED
+#define ACPI_COMPARE_NAME(a,b) (*ACPI_CAST_PTR (u32,(a)) == *ACPI_CAST_PTR (u32,(b)))
+#else
+#define ACPI_COMPARE_NAME(a,b) (!ACPI_STRNCMP (ACPI_CAST_PTR (char,(a)), ACPI_CAST_PTR (char,(b)), ACPI_NAME_SIZE))
+#endif
+
/*
* Macros for moving data around to/from buffers that are possibly unaligned.
* If the hardware supports the transfer of unaligned data, just do the store.
@@ -341,29 +350,33 @@
/*
* Rounding macros (Power of two boundaries only)
*/
-#define ACPI_ROUND_DOWN(value,boundary) (((acpi_native_uint)(value)) & \
+#define ACPI_ROUND_DOWN(value,boundary) (((acpi_native_uint)(value)) & \
(~(((acpi_native_uint) boundary)-1)))
-#define ACPI_ROUND_UP(value,boundary) ((((acpi_native_uint)(value)) + \
+#define ACPI_ROUND_UP(value,boundary) ((((acpi_native_uint)(value)) + \
(((acpi_native_uint) boundary)-1)) & \
(~(((acpi_native_uint) boundary)-1)))
-#define ACPI_ROUND_DOWN_TO_32_BITS(a) ACPI_ROUND_DOWN(a,4)
-#define ACPI_ROUND_DOWN_TO_64_BITS(a) ACPI_ROUND_DOWN(a,8)
-#define ACPI_ROUND_DOWN_TO_NATIVE_WORD(a) ACPI_ROUND_DOWN(a,ALIGNED_ADDRESS_BOUNDARY)
+/* Note: sizeof(acpi_native_uint) evaluates to either 2, 4, or 8 */
+
+#define ACPI_ROUND_DOWN_TO_32BIT(a) ACPI_ROUND_DOWN(a,4)
+#define ACPI_ROUND_DOWN_TO_64BIT(a) ACPI_ROUND_DOWN(a,8)
+#define ACPI_ROUND_DOWN_TO_NATIVE_WORD(a) ACPI_ROUND_DOWN(a,sizeof(acpi_native_uint))
-#define ACPI_ROUND_UP_to_32_bITS(a) ACPI_ROUND_UP(a,4)
-#define ACPI_ROUND_UP_to_64_bITS(a) ACPI_ROUND_UP(a,8)
-#define ACPI_ROUND_UP_TO_NATIVE_WORD(a) ACPI_ROUND_UP(a,ALIGNED_ADDRESS_BOUNDARY)
+#define ACPI_ROUND_UP_TO_32BIT(a) ACPI_ROUND_UP(a,4)
+#define ACPI_ROUND_UP_TO_64BIT(a) ACPI_ROUND_UP(a,8)
+#define ACPI_ROUND_UP_TO_NATIVE_WORD(a) ACPI_ROUND_UP(a,sizeof(acpi_native_uint))
-#define ACPI_ROUND_BITS_UP_TO_BYTES(a) ACPI_DIV_8((a) + 7)
-#define ACPI_ROUND_BITS_DOWN_TO_BYTES(a) ACPI_DIV_8((a))
+#define ACPI_ROUND_BITS_UP_TO_BYTES(a) ACPI_DIV_8((a) + 7)
+#define ACPI_ROUND_BITS_DOWN_TO_BYTES(a) ACPI_DIV_8((a))
-#define ACPI_ROUND_UP_TO_1K(a) (((a) + 1023) >> 10)
+#define ACPI_ROUND_UP_TO_1K(a) (((a) + 1023) >> 10)
/* Generic (non-power-of-two) rounding */
-#define ACPI_ROUND_UP_TO(value,boundary) (((value) + ((boundary)-1)) / (boundary))
+#define ACPI_ROUND_UP_TO(value,boundary) (((value) + ((boundary)-1)) / (boundary))
+
+#define ACPI_IS_MISALIGNED(value) (((acpi_native_uint)value) & (sizeof(acpi_native_uint)-1))
/*
* Bitmask creation
@@ -371,10 +384,10 @@
* MASK_BITS_ABOVE creates a mask starting AT the position and above
* MASK_BITS_BELOW creates a mask starting one bit BELOW the position
*/
-#define ACPI_MASK_BITS_ABOVE(position) (~((ACPI_INTEGER_MAX) << ((u32) (position))))
-#define ACPI_MASK_BITS_BELOW(position) ((ACPI_INTEGER_MAX) << ((u32) (position)))
+#define ACPI_MASK_BITS_ABOVE(position) (~((ACPI_INTEGER_MAX) << ((u32) (position))))
+#define ACPI_MASK_BITS_BELOW(position) ((ACPI_INTEGER_MAX) << ((u32) (position)))
-#define ACPI_IS_OCTAL_DIGIT(d) (((char)(d) >= '0') && ((char)(d) <= '7'))
+#define ACPI_IS_OCTAL_DIGIT(d) (((char)(d) >= '0') && ((char)(d) <= '7'))
/* Bitfields within ACPI registers */
@@ -396,8 +409,8 @@
*
* The "Descriptor" field is the first field in both structures.
*/
-#define ACPI_GET_DESCRIPTOR_TYPE(d) (((union acpi_descriptor *)(void *)(d))->descriptor_id)
-#define ACPI_SET_DESCRIPTOR_TYPE(d,t) (((union acpi_descriptor *)(void *)(d))->descriptor_id = t)
+#define ACPI_GET_DESCRIPTOR_TYPE(d) (((union acpi_descriptor *)(void *)(d))->common.descriptor_type)
+#define ACPI_SET_DESCRIPTOR_TYPE(d,t) (((union acpi_descriptor *)(void *)(d))->common.descriptor_type = t)
/* Macro to test the object type */
@@ -486,7 +499,6 @@
#define ACPI_ERROR(plist)
#define ACPI_ERROR_NAMESPACE(s,e)
#define ACPI_ERROR_METHOD(s,n,p,e)
-
#endif
/*
@@ -514,12 +526,12 @@
#define ACPI_GET_FUNCTION_NAME _acpi_function_name
/*
* The Name parameter should be the procedure name as a quoted string.
- * This is declared as a local string ("my_function_name") so that it can
+ * This is declared as a local string ("MyFunctionName") so that it can
* be also used by the function exit macros below.
* Note: (const char) is used to be compatible with the debug interfaces
* and macros such as __FUNCTION__.
*/
-#define ACPI_FUNCTION_NAME(name) const char *_acpi_function_name = name;
+#define ACPI_FUNCTION_NAME(name) const char *_acpi_function_name = #name;
#else
/* Compiler supports __FUNCTION__ (or equivalent) -- Ignore this macro */
@@ -528,13 +540,13 @@
#endif
#define ACPI_FUNCTION_TRACE(a) ACPI_FUNCTION_NAME(a) \
- acpi_ut_trace(ACPI_DEBUG_PARAMETERS)
+ acpi_ut_trace(ACPI_DEBUG_PARAMETERS)
#define ACPI_FUNCTION_TRACE_PTR(a,b) ACPI_FUNCTION_NAME(a) \
- acpi_ut_trace_ptr(ACPI_DEBUG_PARAMETERS,(void *)b)
+ acpi_ut_trace_ptr(ACPI_DEBUG_PARAMETERS,(void *)b)
#define ACPI_FUNCTION_TRACE_U32(a,b) ACPI_FUNCTION_NAME(a) \
- acpi_ut_trace_u32(ACPI_DEBUG_PARAMETERS,(u32)b)
+ acpi_ut_trace_u32(ACPI_DEBUG_PARAMETERS,(u32)b)
#define ACPI_FUNCTION_TRACE_STR(a,b) ACPI_FUNCTION_NAME(a) \
- acpi_ut_trace_str(ACPI_DEBUG_PARAMETERS,(char *)b)
+ acpi_ut_trace_str(ACPI_DEBUG_PARAMETERS,(char *)b)
#define ACPI_FUNCTION_ENTRY() acpi_ut_track_stack_ptr()
@@ -543,7 +555,7 @@
* WARNING: These macros include a return statement. This is usually considered
* bad form, but having a separate exit macro is very ugly and difficult to maintain.
* One of the FUNCTION_TRACE macros above must be used in conjunction with these macros
- * so that "_acpi_function_name" is defined.
+ * so that "_AcpiFunctionName" is defined.
*
* Note: the DO_WHILE0 macro is used to prevent some compilers from complaining
* about these constructs.
@@ -654,6 +666,7 @@
#define ACPI_DUMP_STACK_ENTRY(a)
#define ACPI_DUMP_OPERANDS(a,b,c,d,e)
#define ACPI_DUMP_ENTRY(a,b)
+#define ACPI_DUMP_TABLES(a,b)
#define ACPI_DUMP_PATHNAME(a,b,c,d)
#define ACPI_DUMP_RESOURCE_LIST(a)
#define ACPI_DUMP_BUFFER(a,b)
@@ -709,19 +722,19 @@
/* Memory allocation */
-#define ACPI_MEM_ALLOCATE(a) acpi_ut_allocate((acpi_size)(a),_COMPONENT,_acpi_module_name,__LINE__)
-#define ACPI_MEM_CALLOCATE(a) acpi_ut_callocate((acpi_size)(a), _COMPONENT,_acpi_module_name,__LINE__)
-#define ACPI_MEM_FREE(a) acpi_os_free(a)
+#define ACPI_ALLOCATE(a) acpi_ut_allocate((acpi_size)(a),_COMPONENT,_acpi_module_name,__LINE__)
+#define ACPI_ALLOCATE_ZEROED(a) acpi_ut_allocate_zeroed((acpi_size)(a), _COMPONENT,_acpi_module_name,__LINE__)
+#define ACPI_FREE(a) acpi_os_free(a)
#define ACPI_MEM_TRACKING(a)
#else
/* Memory allocation */
-#define ACPI_MEM_ALLOCATE(a) acpi_ut_allocate_and_track((acpi_size)(a),_COMPONENT,_acpi_module_name,__LINE__)
-#define ACPI_MEM_CALLOCATE(a) acpi_ut_callocate_and_track((acpi_size)(a), _COMPONENT,_acpi_module_name,__LINE__)
-#define ACPI_MEM_FREE(a) acpi_ut_free_and_track(a,_COMPONENT,_acpi_module_name,__LINE__)
-#define ACPI_MEM_TRACKING(a) a
+#define ACPI_ALLOCATE(a) acpi_ut_allocate_and_track((acpi_size)(a),_COMPONENT,_acpi_module_name,__LINE__)
+#define ACPI_ALLOCATE_ZEROED(a) acpi_ut_allocate_zeroed_and_track((acpi_size)(a), _COMPONENT,_acpi_module_name,__LINE__)
+#define ACPI_FREE(a) acpi_ut_free_and_track(a,_COMPONENT,_acpi_module_name,__LINE__)
+#define ACPI_MEM_TRACKING(a) a
#endif /* ACPI_DBG_TRACK_ALLOCATIONS */
diff --git a/include/acpi/acnamesp.h b/include/acpi/acnamesp.h
index b667a804fc8a..83b52f9f899a 100644
--- a/include/acpi/acnamesp.h
+++ b/include/acpi/acnamesp.h
@@ -63,6 +63,8 @@
#define ACPI_NS_DONT_OPEN_SCOPE 0x02
#define ACPI_NS_NO_PEER_SEARCH 0x04
#define ACPI_NS_ERROR_IF_FOUND 0x08
+#define ACPI_NS_PREFIX_IS_SCOPE 0x10
+#define ACPI_NS_EXTERNAL 0x20
#define ACPI_NS_WALK_UNLOCK TRUE
#define ACPI_NS_WALK_NO_UNLOCK FALSE
@@ -171,19 +173,17 @@ acpi_ns_dump_objects(acpi_object_type type,
/*
* nseval - Namespace evaluation functions
*/
-acpi_status acpi_ns_evaluate_by_handle(struct acpi_parameter_info *info);
-
-acpi_status
-acpi_ns_evaluate_by_name(char *pathname, struct acpi_parameter_info *info);
-
-acpi_status
-acpi_ns_evaluate_relative(char *pathname, struct acpi_parameter_info *info);
+acpi_status acpi_ns_evaluate(struct acpi_evaluate_info *info);
/*
* nsnames - Name and Scope manipulation
*/
u32 acpi_ns_opens_scope(acpi_object_type type);
+void
+acpi_ns_build_external_path(struct acpi_namespace_node *node,
+ acpi_size size, char *name_buffer);
+
char *acpi_ns_get_external_pathname(struct acpi_namespace_node *node);
char *acpi_ns_name_of_current_scope(struct acpi_walk_state *walk_state);
@@ -196,9 +196,9 @@ u8
acpi_ns_pattern_match(struct acpi_namespace_node *obj_node, char *search_for);
acpi_status
-acpi_ns_get_node_by_path(char *external_pathname,
- struct acpi_namespace_node *in_prefix_node,
- u32 flags, struct acpi_namespace_node **out_node);
+acpi_ns_get_node(struct acpi_namespace_node *prefix_node,
+ char *external_pathname,
+ u32 flags, struct acpi_namespace_node **out_node);
acpi_size acpi_ns_get_pathname_length(struct acpi_namespace_node *node);
@@ -241,10 +241,10 @@ acpi_ns_search_and_enter(u32 entry_name,
u32 flags, struct acpi_namespace_node **ret_node);
acpi_status
-acpi_ns_search_node(u32 entry_name,
- struct acpi_namespace_node *node,
- acpi_object_type type,
- struct acpi_namespace_node **ret_node);
+acpi_ns_search_one_scope(u32 entry_name,
+ struct acpi_namespace_node *node,
+ acpi_object_type type,
+ struct acpi_namespace_node **ret_node);
void
acpi_ns_install_node(struct acpi_walk_state *walk_state,
diff --git a/include/acpi/acobject.h b/include/acpi/acobject.h
index d130cfed8d55..1747d94084d8 100644
--- a/include/acpi/acobject.h
+++ b/include/acpi/acobject.h
@@ -1,7 +1,7 @@
/******************************************************************************
*
- * Name: acobject.h - Definition of union acpi_operand_object (Internal object only)
+ * Name: acobject.h - Definition of union acpi_operand_object (Internal object only)
*
*****************************************************************************/
@@ -45,10 +45,12 @@
#ifndef _ACOBJECT_H
#define _ACOBJECT_H
+/* acpisrc:struct_defs -- for acpisrc conversion */
+
/*
- * The union acpi_operand_object is used to pass AML operands from the dispatcher
+ * The union acpi_operand_object is used to pass AML operands from the dispatcher
* to the interpreter, and to keep track of the various handlers such as
- * address space handlers and notify handlers. The object is a constant
+ * address space handlers and notify handlers. The object is a constant
* size in order to allow it to be cached and reused.
*/
@@ -61,17 +63,25 @@
/*
* Common area for all objects.
*
- * data_type is used to differentiate between internal descriptors, and MUST
- * be the first byte in this structure.
+ * descriptor_type is used to differentiate between internal descriptors, and
+ * must be in the same place across all descriptors
+ *
+ * Note: The descriptor_type and Type fields must appear in the identical
+ * position in both the struct acpi_namespace_node and union acpi_operand_object
+ * structures.
*/
-#define ACPI_OBJECT_COMMON_HEADER /* SIZE/ALIGNMENT: 32 bits, one ptr plus trailing 8-bit flag */\
- u8 descriptor; /* To differentiate various internal objs */\
- u8 type; /* acpi_object_type */\
- u16 reference_count; /* For object deletion management */\
- union acpi_operand_object *next_object; /* Objects linked to parent NS node */\
- u8 flags;
-
-/* Values for flag byte above */
+#define ACPI_OBJECT_COMMON_HEADER \
+ union acpi_operand_object *next_object; /* Objects linked to parent NS node */\
+ u8 descriptor_type; /* To differentiate various internal objs */\
+ u8 type; /* acpi_object_type */\
+ u16 reference_count; /* For object deletion management */\
+ u8 flags;
+ /*
+ * Note: There are 3 bytes available here before the
+ * next natural alignment boundary (for both 32/64 cases)
+ */
+
+/* Values for Flag byte above */
#define AOPOBJ_AML_CONSTANT 0x01
#define AOPOBJ_STATIC_POINTER 0x02
@@ -79,36 +89,7 @@
#define AOPOBJ_OBJECT_INITIALIZED 0x08
#define AOPOBJ_SETUP_COMPLETE 0x10
#define AOPOBJ_SINGLE_DATUM 0x20
-
-/*
- * Common bitfield for the field objects
- * "Field Datum" -- a datum from the actual field object
- * "Buffer Datum" -- a datum from a user buffer, read from or to be written to the field
- */
-#define ACPI_COMMON_FIELD_INFO /* SIZE/ALIGNMENT: 24 bits + three 32-bit values */\
- u8 field_flags; /* Access, update, and lock bits */\
- u8 attribute; /* From access_as keyword */\
- u8 access_byte_width; /* Read/Write size in bytes */\
- u32 bit_length; /* Length of field in bits */\
- u32 base_byte_offset; /* Byte offset within containing object */\
- u8 start_field_bit_offset;/* Bit offset within first field datum (0-63) */\
- u8 access_bit_width; /* Read/Write size in bits (8-64) */\
- u32 value; /* Value to store into the Bank or Index register */\
- struct acpi_namespace_node *node; /* Link back to parent node */
-
-/*
- * Fields common to both Strings and Buffers
- */
-#define ACPI_COMMON_BUFFER_INFO \
- u32 length;
-
-/*
- * Common fields for objects that support ASL notifications
- */
-#define ACPI_COMMON_NOTIFY_INFO \
- union acpi_operand_object *system_notify; /* Handler for system notifies */\
- union acpi_operand_object *device_notify; /* Handler for driver notifies */\
- union acpi_operand_object *handler; /* Handler for Address space */
+#define AOPOBJ_INVALID 0x40 /* Used if host OS won't allow an op_region address */
/******************************************************************************
*
@@ -125,25 +106,31 @@ struct acpi_object_integer {
/*
* Note: The String and Buffer object must be identical through the Pointer
- * element. There is code that depends on this.
+ * and length elements. There is code that depends on this.
+ *
+ * Fields common to both Strings and Buffers
*/
+#define ACPI_COMMON_BUFFER_INFO(_type) \
+ _type *pointer; \
+ u32 length;
+
struct acpi_object_string { /* Null terminated, ASCII characters only */
- ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_BUFFER_INFO char *pointer; /* String in AML stream or allocated string */
+ ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_BUFFER_INFO(char) /* String in AML stream or allocated string */
};
struct acpi_object_buffer {
- ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_BUFFER_INFO u8 * pointer; /* Buffer in AML stream or allocated buffer */
- struct acpi_namespace_node *node; /* Link back to parent node */
- u8 *aml_start;
+ ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_BUFFER_INFO(u8) /* Buffer in AML stream or allocated buffer */
u32 aml_length;
+ u8 *aml_start;
+ struct acpi_namespace_node *node; /* Link back to parent node */
};
struct acpi_object_package {
- ACPI_OBJECT_COMMON_HEADER u32 count; /* # of elements in package */
- u32 aml_length;
- u8 *aml_start;
- struct acpi_namespace_node *node; /* Link back to parent node */
+ ACPI_OBJECT_COMMON_HEADER struct acpi_namespace_node *node; /* Link back to parent node */
union acpi_operand_object **elements; /* Array of pointers to acpi_objects */
+ u8 *aml_start;
+ u32 aml_length;
+ u32 count; /* # of elements in package */
};
/******************************************************************************
@@ -156,23 +143,6 @@ struct acpi_object_event {
ACPI_OBJECT_COMMON_HEADER void *semaphore;
};
-#define ACPI_INFINITE_CONCURRENCY 0xFF
-
-typedef
-acpi_status(*ACPI_INTERNAL_METHOD) (struct acpi_walk_state * walk_state);
-
-struct acpi_object_method {
- ACPI_OBJECT_COMMON_HEADER u8 method_flags;
- u8 param_count;
- u32 aml_length;
- void *semaphore;
- u8 *aml_start;
- ACPI_INTERNAL_METHOD implementation;
- u8 concurrency;
- u8 thread_count;
- acpi_owner_id owner_id;
-};
-
struct acpi_object_mutex {
ACPI_OBJECT_COMMON_HEADER u8 sync_level; /* 0-15, specified in Mutex() call */
u16 acquisition_depth; /* Allow multiple Acquires, same thread */
@@ -186,11 +156,23 @@ struct acpi_object_mutex {
struct acpi_object_region {
ACPI_OBJECT_COMMON_HEADER u8 space_id;
- union acpi_operand_object *handler; /* Handler for region access */
struct acpi_namespace_node *node; /* Containing namespace node */
+ union acpi_operand_object *handler; /* Handler for region access */
union acpi_operand_object *next;
- u32 length;
acpi_physical_address address;
+ u32 length;
+};
+
+struct acpi_object_method {
+ ACPI_OBJECT_COMMON_HEADER u8 method_flags;
+ u8 param_count;
+ u8 concurrency;
+ void *semaphore;
+ u8 *aml_start;
+ ACPI_INTERNAL_METHOD implementation;
+ u32 aml_length;
+ u8 thread_count;
+ acpi_owner_id owner_id;
};
/******************************************************************************
@@ -199,6 +181,14 @@ struct acpi_object_region {
*
*****************************************************************************/
+/*
+ * Common fields for objects that support ASL notifications
+ */
+#define ACPI_COMMON_NOTIFY_INFO \
+ union acpi_operand_object *system_notify; /* Handler for system notifies */\
+ union acpi_operand_object *device_notify; /* Handler for driver notifies */\
+ union acpi_operand_object *handler; /* Handler for Address space */
+
struct acpi_object_notify_common { /* COMMON NOTIFY for POWER, PROCESSOR, DEVICE, and THERMAL */
ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_NOTIFY_INFO};
@@ -213,9 +203,9 @@ struct acpi_object_power_resource {
};
struct acpi_object_processor {
- ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_NOTIFY_INFO u32 proc_id;
- u32 length;
- acpi_io_address address;
+ ACPI_OBJECT_COMMON_HEADER u8 proc_id;
+ u8 length;
+ ACPI_COMMON_NOTIFY_INFO acpi_io_address address;
};
struct acpi_object_thermal_zone {
@@ -227,9 +217,24 @@ ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_NOTIFY_INFO};
*
*****************************************************************************/
+/*
+ * Common bitfield for the field objects
+ * "Field Datum" -- a datum from the actual field object
+ * "Buffer Datum" -- a datum from a user buffer, read from or to be written to the field
+ */
+#define ACPI_COMMON_FIELD_INFO \
+ u8 field_flags; /* Access, update, and lock bits */\
+ u8 attribute; /* From access_as keyword */\
+ u8 access_byte_width; /* Read/Write size in bytes */\
+ struct acpi_namespace_node *node; /* Link back to parent node */\
+ u32 bit_length; /* Length of field in bits */\
+ u32 base_byte_offset; /* Byte offset within containing object */\
+ u32 value; /* Value to store into the Bank or Index register */\
+ u8 start_field_bit_offset;/* Bit offset within first field datum (0-63) */\
+ u8 access_bit_width; /* Read/Write size in bits (8-64) */
+
struct acpi_object_field_common { /* COMMON FIELD (for BUFFER, REGION, BANK, and INDEX fields) */
- ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO union acpi_operand_object *region_obj; /* Containing Operation Region object */
- /* (REGION/BANK fields only) */
+ ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO union acpi_operand_object *region_obj; /* Parent Operation Region object (REGION/BANK fields only) */
};
struct acpi_object_region_field {
@@ -244,7 +249,7 @@ struct acpi_object_bank_field {
struct acpi_object_index_field {
ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO
/*
- * No "region_obj" pointer needed since the Index and Data registers
+ * No "RegionObj" pointer needed since the Index and Data registers
* are each field definitions unto themselves.
*/
union acpi_operand_object *index_obj; /* Index register */
@@ -269,13 +274,9 @@ struct acpi_object_notify_handler {
void *context;
};
-/* Flags for address handler */
-
-#define ACPI_ADDR_HANDLER_DEFAULT_INSTALLED 0x1
-
struct acpi_object_addr_handler {
ACPI_OBJECT_COMMON_HEADER u8 space_id;
- u16 hflags;
+ u8 handler_flags;
acpi_adr_space_handler handler;
struct acpi_namespace_node *node; /* Parent device */
void *context;
@@ -284,6 +285,10 @@ struct acpi_object_addr_handler {
union acpi_operand_object *next;
};
+/* Flags for address handler (handler_flags) */
+
+#define ACPI_ADDR_HANDLER_DEFAULT_INSTALLED 0x01
+
/******************************************************************************
*
* Special internal objects
@@ -297,10 +302,10 @@ struct acpi_object_addr_handler {
struct acpi_object_reference {
ACPI_OBJECT_COMMON_HEADER u8 target_type; /* Used for index_op */
u16 opcode;
- u32 offset; /* Used for arg_op, local_op, and index_op */
- void *object; /* name_op=>HANDLE to obj, index_op=>union acpi_operand_object */
+ void *object; /* name_op=>HANDLE to obj, index_op=>union acpi_operand_object */
struct acpi_namespace_node *node;
union acpi_operand_object **where;
+ u32 offset; /* Used for arg_op, local_op, and index_op */
};
/*
@@ -311,12 +316,10 @@ struct acpi_object_reference {
* Currently: Region and field_unit types
*/
struct acpi_object_extra {
- ACPI_OBJECT_COMMON_HEADER u8 byte_fill1;
- u16 word_fill1;
- u32 aml_length;
- u8 *aml_start;
- struct acpi_namespace_node *method_REG; /* _REG method for this region (if any) */
+ ACPI_OBJECT_COMMON_HEADER struct acpi_namespace_node *method_REG; /* _REG method for this region (if any) */
void *region_context; /* Region-specific data */
+ u8 *aml_start;
+ u32 aml_length;
};
/* Additional data that can be attached to namespace nodes */
@@ -391,8 +394,13 @@ union acpi_operand_object {
#define ACPI_DESC_TYPE_NAMED 0x0F
#define ACPI_DESC_TYPE_MAX 0x0F
+struct acpi_common_descriptor {
+ void *common_pointer;
+ u8 descriptor_type; /* To differentiate various internal objs */
+};
+
union acpi_descriptor {
- u8 descriptor_id; /* To differentiate various internal objs */
+ struct acpi_common_descriptor common;
union acpi_operand_object object;
struct acpi_namespace_node node;
union acpi_parse_object op;
diff --git a/include/acpi/acopcode.h b/include/acpi/acopcode.h
index e6d78bd9e90a..7659a46bc432 100644
--- a/include/acpi/acopcode.h
+++ b/include/acpi/acopcode.h
@@ -94,7 +94,7 @@
#define ARGP_CONCAT_RES_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_TARGET)
#define ARGP_COND_REF_OF_OP ARGP_LIST2 (ARGP_SUPERNAME, ARGP_SUPERNAME)
#define ARGP_CONTINUE_OP ARG_NONE
-#define ARGP_COPY_OP ARGP_LIST2 (ARGP_SUPERNAME, ARGP_SIMPLENAME)
+#define ARGP_COPY_OP ARGP_LIST2 (ARGP_TERMARG, ARGP_SIMPLENAME)
#define ARGP_CREATE_BIT_FIELD_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_NAME)
#define ARGP_CREATE_BYTE_FIELD_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_NAME)
#define ARGP_CREATE_DWORD_FIELD_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_NAME)
diff --git a/include/acpi/acoutput.h b/include/acpi/acoutput.h
index 7785d481dc3e..8d5039d0b430 100644
--- a/include/acpi/acoutput.h
+++ b/include/acpi/acoutput.h
@@ -50,7 +50,7 @@
* component basis and a per-exception-type basis.
*/
-/* Component IDs are used in the global "debug_layer" */
+/* Component IDs are used in the global "DebugLayer" */
#define ACPI_UTILITIES 0x00000001
#define ACPI_HARDWARE 0x00000002
@@ -121,7 +121,7 @@
#define ACPI_LV_INTERRUPTS 0x08000000
#define ACPI_LV_VERBOSITY3 0x0F000000 | ACPI_LV_VERBOSITY2
-/* Exceptionally verbose output -- also used in the global "debug_level" */
+/* Exceptionally verbose output -- also used in the global "DebugLevel" */
#define ACPI_LV_AML_DISASSEMBLE 0x10000000
#define ACPI_LV_VERBOSE_INFO 0x20000000
@@ -135,7 +135,7 @@
*/
#define ACPI_DEBUG_LEVEL(dl) (u32) dl,ACPI_DEBUG_PARAMETERS
-/* Exception level -- used in the global "debug_level" */
+/* Exception level -- used in the global "DebugLevel" */
#define ACPI_DB_INIT ACPI_DEBUG_LEVEL (ACPI_LV_INIT)
#define ACPI_DB_DEBUG_OBJECT ACPI_DEBUG_LEVEL (ACPI_LV_DEBUG_OBJECT)
@@ -144,13 +144,13 @@
/*
* These two levels are essentially obsolete, all instances in the
- * ACPICA core code have been replaced by REPORT_ERROR and REPORT_WARNING
+ * ACPICA core code have been replaced by ACPI_ERROR and ACPI_WARNING
* (Kept here because some drivers may still use them)
*/
#define ACPI_DB_ERROR ACPI_DEBUG_LEVEL (ACPI_LV_ERROR)
#define ACPI_DB_WARN ACPI_DEBUG_LEVEL (ACPI_LV_WARN)
-/* Trace level -- also used in the global "debug_level" */
+/* Trace level -- also used in the global "DebugLevel" */
#define ACPI_DB_INIT_NAMES ACPI_DEBUG_LEVEL (ACPI_LV_INIT_NAMES)
#define ACPI_DB_THREADS ACPI_DEBUG_LEVEL (ACPI_LV_THREADS)
diff --git a/include/acpi/acparser.h b/include/acpi/acparser.h
index 5a1ff484af33..9d49d3c41cd9 100644
--- a/include/acpi/acparser.h
+++ b/include/acpi/acparser.h
@@ -46,7 +46,7 @@
#define OP_HAS_RETURN_VALUE 1
-/* variable # arguments */
+/* Variable number of arguments. This field must be 32 bits */
#define ACPI_VAR_ARGS ACPI_UINT32_MAX
@@ -71,7 +71,7 @@
/*
* psxface - Parser external interfaces
*/
-acpi_status acpi_ps_execute_method(struct acpi_parameter_info *info);
+acpi_status acpi_ps_execute_method(struct acpi_evaluate_info *info);
/*
* psargs - Parse AML opcode arguments
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 6dca3d542080..a2b3e390a503 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -26,7 +26,7 @@
#ifndef __ACPI_BUS_H__
#define __ACPI_BUS_H__
-#include <linux/kobject.h>
+#include <linux/device.h>
#include <acpi/acpi.h>
@@ -59,7 +59,7 @@ acpi_evaluate_reference(acpi_handle handle,
#define ACPI_BUS_FILE_ROOT "acpi"
extern struct proc_dir_entry *acpi_root_dir;
-extern FADT_DESCRIPTOR acpi_fadt;
+extern struct fadt_descriptor acpi_fadt;
enum acpi_bus_removal_type {
ACPI_BUS_REMOVAL_NORMAL = 0,
@@ -169,7 +169,8 @@ struct acpi_device_flags {
u32 power_manageable:1;
u32 performance_manageable:1;
u32 wake_capable:1; /* Wakeup(_PRW) supported? */
- u32 reserved:20;
+ u32 force_power_state:1;
+ u32 reserved:19;
};
/* File System */
@@ -296,6 +297,7 @@ struct acpi_device {
struct acpi_driver *driver;
void *driver_data;
struct kobject kobj;
+ struct device dev;
};
#define acpi_driver_data(d) ((d)->driver_data)
@@ -327,7 +329,7 @@ int acpi_bus_set_power(acpi_handle handle, int state);
int acpi_bus_generate_event(struct acpi_device *device, u8 type, int data);
int acpi_bus_receive_event(struct acpi_bus_event *event);
int acpi_bus_register_driver(struct acpi_driver *driver);
-int acpi_bus_unregister_driver(struct acpi_driver *driver);
+void acpi_bus_unregister_driver(struct acpi_driver *driver);
int acpi_bus_add(struct acpi_device **child, struct acpi_device *parent,
acpi_handle handle, int type);
int acpi_bus_trim(struct acpi_device *start, int rmdevice);
diff --git a/include/acpi/acpi_numa.h b/include/acpi/acpi_numa.h
new file mode 100644
index 000000000000..1049f2a0a6db
--- /dev/null
+++ b/include/acpi/acpi_numa.h
@@ -0,0 +1,23 @@
+#ifndef __ACPI_NUMA_H
+#define __ACPI_NUMA_H
+
+#ifdef CONFIG_ACPI_NUMA
+#include <linux/kernel.h>
+
+/* Proximity bitmap length */
+#if MAX_NUMNODES > 256
+#define MAX_PXM_DOMAINS MAX_NUMNODES
+#else
+#define MAX_PXM_DOMAINS (256) /* Old pxm spec is defined 8 bit */
+#endif
+
+extern int __cpuinitdata pxm_to_node_map[MAX_PXM_DOMAINS];
+extern int __cpuinitdata node_to_pxm_map[MAX_NUMNODES];
+
+extern int __cpuinit pxm_to_node(int);
+extern int __cpuinit node_to_pxm(int);
+extern int __cpuinit acpi_map_pxm_to_node(int);
+extern void __cpuinit acpi_unmap_pxm_to_node(int);
+
+#endif /* CONFIG_ACPI_NUMA */
+#endif /* __ACP_NUMA_H */
diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h
index 970e9a6372c3..8f473c83b7c4 100644
--- a/include/acpi/acpiosxf.h
+++ b/include/acpi/acpiosxf.h
@@ -50,12 +50,16 @@
#include "platform/acenv.h"
#include "actypes.h"
-/* Priorities for acpi_os_queue_for_execution */
+/* Types for acpi_os_execute */
-#define OSD_PRIORITY_GPE 1
-#define OSD_PRIORITY_HIGH 2
-#define OSD_PRIORITY_MED 3
-#define OSD_PRIORITY_LO 4
+typedef enum {
+ OSL_GLOBAL_LOCK_HANDLER,
+ OSL_NOTIFY_HANDLER,
+ OSL_GPE_HANDLER,
+ OSL_DEBUGGER_THREAD,
+ OSL_EC_POLL_HANDLER,
+ OSL_EC_BURST_HANDLER
+} acpi_execute_type;
#define ACPI_NO_UNIT_LIMIT ((u32) -1)
#define ACPI_MUTEX_SEM 1
@@ -161,13 +165,11 @@ acpi_os_remove_interrupt_handler(u32 gsi, acpi_osd_handler service_routine);
/*
* Threads and Scheduling
*/
-u32 acpi_os_get_thread_id(void);
+acpi_thread_id acpi_os_get_thread_id(void);
acpi_status
-acpi_os_queue_for_execution(u32 priority,
- acpi_osd_exec_callback function, void *context);
-
-void acpi_os_wait_events_complete(void *context);
+acpi_os_execute(acpi_execute_type type,
+ acpi_osd_exec_callback function, void *context);
void acpi_os_wait_events_complete(void *context);
@@ -214,6 +216,12 @@ acpi_os_derive_pci_id(acpi_handle rhandle,
/*
* Miscellaneous
*/
+acpi_status acpi_os_validate_interface(char *interface);
+
+acpi_status
+acpi_os_validate_address(u8 space_id,
+ acpi_physical_address address, acpi_size length);
+
u8 acpi_os_readable(void *pointer, acpi_size length);
#ifdef ACPI_FUTURE_USAGE
@@ -255,11 +263,4 @@ char *acpi_os_get_next_filename(void *dir_handle);
void acpi_os_close_directory(void *dir_handle);
-/*
- * Debug
- */
-void
-acpi_os_dbg_assert(void *failed_assertion,
- void *file_name, u32 line_number, char *message);
-
#endif /* __ACPIOSXF_H__ */
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index 66cf2ecef57a..049e9aa1b867 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -268,7 +268,7 @@ acpi_status acpi_remove_gpe_block(acpi_handle gpe_device);
* Resource interfaces
*/
typedef
-acpi_status(*ACPI_WALK_RESOURCE_CALLBACK) (struct acpi_resource * resource,
+acpi_status(*acpi_walk_resource_callback) (struct acpi_resource * resource,
void *context);
acpi_status
@@ -290,7 +290,7 @@ acpi_get_possible_resources(acpi_handle device_handle,
acpi_status
acpi_walk_resources(acpi_handle device_handle,
char *name,
- ACPI_WALK_RESOURCE_CALLBACK user_function, void *context);
+ acpi_walk_resource_callback user_function, void *context);
acpi_status
acpi_set_current_resources(acpi_handle device_handle,
diff --git a/include/acpi/acresrc.h b/include/acpi/acresrc.h
index fa02e8083381..ad11fc13fbef 100644
--- a/include/acpi/acresrc.h
+++ b/include/acpi/acresrc.h
@@ -164,23 +164,26 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
/*
* rsutils
*/
+
acpi_status
-acpi_rs_get_prt_method_data(acpi_handle handle, struct acpi_buffer *ret_buffer);
+acpi_rs_get_prt_method_data(struct acpi_namespace_node *node,
+ struct acpi_buffer *ret_buffer);
acpi_status
-acpi_rs_get_crs_method_data(acpi_handle handle, struct acpi_buffer *ret_buffer);
+acpi_rs_get_crs_method_data(struct acpi_namespace_node *node,
+ struct acpi_buffer *ret_buffer);
-#ifdef ACPI_FUTURE_USAGE
acpi_status
-acpi_rs_get_prs_method_data(acpi_handle handle, struct acpi_buffer *ret_buffer);
-#endif /* ACPI_FUTURE_USAGE */
+acpi_rs_get_prs_method_data(struct acpi_namespace_node *node,
+ struct acpi_buffer *ret_buffer);
acpi_status
acpi_rs_get_method_data(acpi_handle handle,
char *path, struct acpi_buffer *ret_buffer);
acpi_status
-acpi_rs_set_srs_method_data(acpi_handle handle, struct acpi_buffer *ret_buffer);
+acpi_rs_set_srs_method_data(struct acpi_namespace_node *node,
+ struct acpi_buffer *ret_buffer);
/*
* rscalc
@@ -198,8 +201,9 @@ acpi_rs_get_pci_routing_table_length(union acpi_operand_object *package_object,
acpi_size * buffer_size_needed);
acpi_status
-acpi_rs_convert_aml_to_resources(u8 * aml_buffer,
- u32 aml_buffer_length, u8 * output_buffer);
+acpi_rs_convert_aml_to_resources(u8 * aml,
+ u32 length,
+ u32 offset, u8 resource_index, void **context);
acpi_status
acpi_rs_convert_resources_to_aml(struct acpi_resource *resource,
diff --git a/include/acpi/acstruct.h b/include/acpi/acstruct.h
index d8c1c2cdac0c..5e8095f0f78f 100644
--- a/include/acpi/acstruct.h
+++ b/include/acpi/acstruct.h
@@ -44,6 +44,8 @@
#ifndef __ACSTRUCT_H__
#define __ACSTRUCT_H__
+/* acpisrc:struct_defs -- for acpisrc conversion */
+
/*****************************************************************************
*
* Tree walking typedefs and structs
@@ -51,67 +53,76 @@
****************************************************************************/
/*
- * Walk state - current state of a parse tree walk. Used for both a leisurely stroll through
- * the tree (for whatever reason), and for control method execution.
+ * Walk state - current state of a parse tree walk. Used for both a leisurely
+ * stroll through the tree (for whatever reason), and for control method
+ * execution.
*/
#define ACPI_NEXT_OP_DOWNWARD 1
#define ACPI_NEXT_OP_UPWARD 2
+/*
+ * Groups of definitions for walk_type used for different implementations of
+ * walkers (never simultaneously) - flags for interpreter:
+ */
#define ACPI_WALK_NON_METHOD 0
-#define ACPI_WALK_METHOD 1
-#define ACPI_WALK_METHOD_RESTART 2
-#define ACPI_WALK_CONST_REQUIRED 3
-#define ACPI_WALK_CONST_OPTIONAL 4
+#define ACPI_WALK_METHOD 0x01
+#define ACPI_WALK_METHOD_RESTART 0x02
+
+/* Flags for i_aSL compiler only */
+
+#define ACPI_WALK_CONST_REQUIRED 0x10
+#define ACPI_WALK_CONST_OPTIONAL 0x20
struct acpi_walk_state {
- u8 data_type; /* To differentiate various internal objs MUST BE FIRST! */
+ struct acpi_walk_state *next; /* Next walk_state in list */
+ u8 descriptor_type; /* To differentiate various internal objs */
u8 walk_type;
- acpi_owner_id owner_id; /* Owner of objects created during the walk */
- u8 last_predicate; /* Result of last predicate */
- u8 current_result; /* */
+ u16 opcode; /* Current AML opcode */
u8 next_op_info; /* Info about next_op */
u8 num_operands; /* Stack pointer for Operands[] array */
+ acpi_owner_id owner_id; /* Owner of objects created during the walk */
+ u8 last_predicate; /* Result of last predicate */
+ u8 current_result;
u8 return_used;
- u16 opcode; /* Current AML opcode */
u8 scope_depth;
u8 pass_number; /* Parse pass during table load */
- u32 arg_count; /* push for fixed or var args */
u32 aml_offset;
u32 arg_types;
u32 method_breakpoint; /* For single stepping */
u32 user_breakpoint; /* User AML breakpoint */
u32 parse_flags;
+
+ struct acpi_parse_state parser_state; /* Current state of parser */
u32 prev_arg_types;
+ u32 arg_count; /* push for fixed or var args */
- u8 *aml_last_while;
struct acpi_namespace_node arguments[ACPI_METHOD_NUM_ARGS]; /* Control method arguments */
+ struct acpi_namespace_node local_variables[ACPI_METHOD_NUM_LOCALS]; /* Control method locals */
+ union acpi_operand_object *operands[ACPI_OBJ_NUM_OPERANDS + 1]; /* Operands passed to the interpreter (+1 for NULL terminator) */
+ union acpi_operand_object **params;
+
+ u8 *aml_last_while;
union acpi_operand_object **caller_return_desc;
union acpi_generic_state *control_state; /* List of control states (nested IFs) */
struct acpi_namespace_node *deferred_node; /* Used when executing deferred opcodes */
struct acpi_gpe_event_info *gpe_event_info; /* Info for GPE (_Lxx/_Exx methods only */
union acpi_operand_object *implicit_return_obj;
- struct acpi_namespace_node local_variables[ACPI_METHOD_NUM_LOCALS]; /* Control method locals */
struct acpi_namespace_node *method_call_node; /* Called method Node */
union acpi_parse_object *method_call_op; /* method_call Op if running a method */
union acpi_operand_object *method_desc; /* Method descriptor if running a method */
struct acpi_namespace_node *method_node; /* Method node if running a method. */
union acpi_parse_object *op; /* Current parser op */
- union acpi_operand_object *operands[ACPI_OBJ_NUM_OPERANDS + 1]; /* Operands passed to the interpreter (+1 for NULL terminator) */
const struct acpi_opcode_info *op_info; /* Info on current opcode */
union acpi_parse_object *origin; /* Start of walk [Obsolete] */
- union acpi_operand_object **params;
- struct acpi_parse_state parser_state; /* Current state of parser */
union acpi_operand_object *result_obj;
union acpi_generic_state *results; /* Stack of accumulated results */
union acpi_operand_object *return_desc; /* Return object, if any */
union acpi_generic_state *scope_info; /* Stack of nested scopes */
-
union acpi_parse_object *prev_op; /* Last op that was processed */
union acpi_parse_object *next_op; /* next op to be processed */
+ struct acpi_thread_state *thread;
acpi_parse_downwards descending_callback;
acpi_parse_upwards ascending_callback;
- struct acpi_thread_state *thread;
- struct acpi_walk_state *next; /* Next walk_state in list */
};
/* Info used by acpi_ps_init_objects */
@@ -131,32 +142,6 @@ struct acpi_init_walk_info {
struct acpi_table_desc *table_desc;
};
-/* Info used by acpi_ns_initialize_devices */
-
-struct acpi_device_walk_info {
- u16 device_count;
- u16 num_STA;
- u16 num_INI;
- struct acpi_table_desc *table_desc;
-};
-
-/* TBD: [Restructure] Merge with struct above */
-
-struct acpi_walk_info {
- u32 debug_level;
- u32 count;
- acpi_owner_id owner_id;
- u8 display_type;
-};
-
-/* Display Types */
-
-#define ACPI_DISPLAY_SUMMARY (u8) 0
-#define ACPI_DISPLAY_OBJECTS (u8) 1
-#define ACPI_DISPLAY_MASK (u8) 1
-
-#define ACPI_DISPLAY_SHORT (u8) 2
-
struct acpi_get_devices_info {
acpi_walk_callback user_function;
void *context;
@@ -189,16 +174,21 @@ union acpi_aml_operands {
} mid;
};
-/* Internal method parameter list */
-
-struct acpi_parameter_info {
- struct acpi_namespace_node *node;
+/*
+ * Structure used to pass object evaluation parameters.
+ * Purpose is to reduce CPU stack use.
+ */
+struct acpi_evaluate_info {
+ struct acpi_namespace_node *prefix_node;
+ char *pathname;
union acpi_operand_object *obj_desc;
union acpi_operand_object **parameters;
+ struct acpi_namespace_node *resolved_node;
union acpi_operand_object *return_object;
u8 pass_number;
u8 parameter_type;
u8 return_object_type;
+ u8 flags;
};
/* Types for parameter_type above */
@@ -206,4 +196,35 @@ struct acpi_parameter_info {
#define ACPI_PARAM_ARGS 0
#define ACPI_PARAM_GPE 1
+/* Values for Flags above */
+
+#define ACPI_IGNORE_RETURN_VALUE 1
+
+/* Info used by acpi_ns_initialize_devices */
+
+struct acpi_device_walk_info {
+ u16 device_count;
+ u16 num_STA;
+ u16 num_INI;
+ struct acpi_table_desc *table_desc;
+ struct acpi_evaluate_info *evaluate_info;
+};
+
+/* TBD: [Restructure] Merge with struct above */
+
+struct acpi_walk_info {
+ u32 debug_level;
+ u32 count;
+ acpi_owner_id owner_id;
+ u8 display_type;
+};
+
+/* Display Types */
+
+#define ACPI_DISPLAY_SUMMARY (u8) 0
+#define ACPI_DISPLAY_OBJECTS (u8) 1
+#define ACPI_DISPLAY_MASK (u8) 1
+
+#define ACPI_DISPLAY_SHORT (u8) 2
+
#endif
diff --git a/include/acpi/actables.h b/include/acpi/actables.h
index 30a47542e1c8..4dbaf02fe526 100644
--- a/include/acpi/actables.h
+++ b/include/acpi/actables.h
@@ -136,7 +136,11 @@ acpi_status acpi_tb_is_table_installed(struct acpi_table_desc *new_table_desc);
acpi_status
acpi_tb_verify_table_checksum(struct acpi_table_header *table_header);
-u8 acpi_tb_generate_checksum(void *buffer, u32 length);
+u8 acpi_tb_sum_table(void *buffer, u32 length);
+
+u8 acpi_tb_generate_checksum(struct acpi_table_header *table);
+
+void acpi_tb_set_checksum(struct acpi_table_header *table);
acpi_status
acpi_tb_validate_table_header(struct acpi_table_header *table_header);
diff --git a/include/acpi/actbl.h b/include/acpi/actbl.h
index ed53f842dad4..b125ceed9cb7 100644
--- a/include/acpi/actbl.h
+++ b/include/acpi/actbl.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Name: actbl.h - Table data structures defined in ACPI specification
+ * Name: actbl.h - Basic ACPI Table Definitions
*
*****************************************************************************/
@@ -45,66 +45,45 @@
#define __ACTBL_H__
/*
- * Note about bitfields: The u8 type is used for bitfields in ACPI tables.
- * This is the only type that is even remotely portable. Anything else is not
- * portable, so do not use any other bitfield types.
- */
-
-/*
- * Values for description table header signatures
+ * Values for description table header signatures. Useful because they make
+ * it more difficult to inadvertently type in the wrong signature.
*/
-#define RSDP_NAME "RSDP"
-#define RSDP_SIG "RSD PTR " /* RSDT Pointer signature */
-#define APIC_SIG "APIC" /* Multiple APIC Description Table */
#define DSDT_SIG "DSDT" /* Differentiated System Description Table */
#define FADT_SIG "FACP" /* Fixed ACPI Description Table */
#define FACS_SIG "FACS" /* Firmware ACPI Control Structure */
#define PSDT_SIG "PSDT" /* Persistent System Description Table */
+#define RSDP_SIG "RSD PTR " /* Root System Description Pointer */
#define RSDT_SIG "RSDT" /* Root System Description Table */
#define XSDT_SIG "XSDT" /* Extended System Description Table */
#define SSDT_SIG "SSDT" /* Secondary System Description Table */
-#define SBST_SIG "SBST" /* Smart Battery Specification Table */
-#define SPIC_SIG "SPIC" /* IOSAPIC table */
-#define BOOT_SIG "BOOT" /* Boot table */
-
-#define GL_OWNED 0x02 /* Ownership of global lock is bit 1 */
+#define RSDP_NAME "RSDP"
/*
- * Common table types. The base code can remain
- * constant if the underlying tables are changed
+ * All tables and structures must be byte-packed to match the ACPI
+ * specification, since the tables are provided by the system BIOS
*/
-#define RSDT_DESCRIPTOR struct rsdt_descriptor_rev2
-#define XSDT_DESCRIPTOR struct xsdt_descriptor_rev2
-#define FACS_DESCRIPTOR struct facs_descriptor_rev2
-#define FADT_DESCRIPTOR struct fadt_descriptor_rev2
-
#pragma pack(1)
/*
- * ACPI Version-independent tables
+ * These are the ACPI tables that are directly consumed by the subsystem.
+ *
+ * The RSDP and FACS do not use the common ACPI table header. All other ACPI
+ * tables use the header.
*
- * NOTE: The tables that are specific to ACPI versions (1.0, 2.0, etc.)
- * are in separate files.
+ * Note about bitfields: The u8 type is used for bitfields in ACPI tables.
+ * This is the only type that is even remotely portable. Anything else is not
+ * portable, so do not use any other bitfield types.
*/
-struct rsdp_descriptor { /* Root System Descriptor Pointer */
- char signature[8]; /* ACPI signature, contains "RSD PTR " */
- u8 checksum; /* ACPI 1.0 checksum */
- char oem_id[6]; /* OEM identification */
- u8 revision; /* Must be (0) for ACPI 1.0 or (2) for ACPI 2.0+ */
- u32 rsdt_physical_address; /* 32-bit physical address of the RSDT */
- u32 length; /* XSDT Length in bytes, including header */
- u64 xsdt_physical_address; /* 64-bit physical address of the XSDT */
- u8 extended_checksum; /* Checksum of entire table (ACPI 2.0) */
- char reserved[3]; /* Reserved, must be zero */
-};
-struct acpi_common_facs { /* Common FACS for internal use */
- u32 *global_lock;
- u64 *firmware_waking_vector;
- u8 vector_width;
-};
+/*******************************************************************************
+ *
+ * ACPI Table Header. This common header is used by all tables except the
+ * RSDP and FACS. The define is used for direct inclusion of header into
+ * other ACPI tables
+ *
+ ******************************************************************************/
-#define ACPI_TABLE_HEADER_DEF /* ACPI common table header */ \
+#define ACPI_TABLE_HEADER_DEF \
char signature[4]; /* ASCII table signature */\
u32 length; /* Length of table in bytes, including this header */\
u8 revision; /* ACPI Specification minor version # */\
@@ -112,154 +91,239 @@ struct acpi_common_facs { /* Common FACS for internal use */
char oem_id[6]; /* ASCII OEM identification */\
char oem_table_id[8]; /* ASCII OEM table identification */\
u32 oem_revision; /* OEM revision number */\
- char asl_compiler_id [4]; /* ASCII ASL compiler vendor ID */\
+ char asl_compiler_id[4]; /* ASCII ASL compiler vendor ID */\
u32 asl_compiler_revision; /* ASL compiler version */
-struct acpi_table_header { /* ACPI common table header */
+struct acpi_table_header {
ACPI_TABLE_HEADER_DEF};
/*
- * MADT values and structures
+ * GAS - Generic Address Structure (ACPI 2.0+)
*/
+struct acpi_generic_address {
+ u8 address_space_id; /* Address space where struct or register exists */
+ u8 register_bit_width; /* Size in bits of given register */
+ u8 register_bit_offset; /* Bit offset within the register */
+ u8 access_width; /* Minimum Access size (ACPI 3.0) */
+ u64 address; /* 64-bit address of struct or register */
+};
-/* Values for MADT PCATCompat */
+/*******************************************************************************
+ *
+ * RSDP - Root System Description Pointer (Signature is "RSD PTR ")
+ *
+ ******************************************************************************/
+
+struct rsdp_descriptor {
+ char signature[8]; /* ACPI signature, contains "RSD PTR " */
+ u8 checksum; /* ACPI 1.0 checksum */
+ char oem_id[6]; /* OEM identification */
+ u8 revision; /* Must be (0) for ACPI 1.0 or (2) for ACPI 2.0+ */
+ u32 rsdt_physical_address; /* 32-bit physical address of the RSDT */
+ u32 length; /* Table length in bytes, including header (ACPI 2.0+) */
+ u64 xsdt_physical_address; /* 64-bit physical address of the XSDT (ACPI 2.0+) */
+ u8 extended_checksum; /* Checksum of entire table (ACPI 2.0+) */
+ u8 reserved[3]; /* Reserved, must be zero */
+};
-#define DUAL_PIC 0
-#define MULTIPLE_APIC 1
+#define ACPI_RSDP_REV0_SIZE 20 /* Size of original ACPI 1.0 RSDP */
-/* Master MADT */
+/*******************************************************************************
+ *
+ * RSDT/XSDT - Root System Description Tables
+ *
+ ******************************************************************************/
-struct multiple_apic_table {
- ACPI_TABLE_HEADER_DEF /* ACPI common table header */
- u32 local_apic_address; /* Physical address of local APIC */
+struct rsdt_descriptor {
+ ACPI_TABLE_HEADER_DEF u32 table_offset_entry[1]; /* Array of pointers to ACPI tables */
+};
+
+struct xsdt_descriptor {
+ ACPI_TABLE_HEADER_DEF u64 table_offset_entry[1]; /* Array of pointers to ACPI tables */
+};
+
+/*******************************************************************************
+ *
+ * FACS - Firmware ACPI Control Structure (FACS)
+ *
+ ******************************************************************************/
+
+struct facs_descriptor {
+ char signature[4]; /* ASCII table signature */
+ u32 length; /* Length of structure, in bytes */
+ u32 hardware_signature; /* Hardware configuration signature */
+ u32 firmware_waking_vector; /* 32-bit physical address of the Firmware Waking Vector */
+ u32 global_lock; /* Global Lock for shared hardware resources */
/* Flags (32 bits) */
- u8 PCATcompat:1; /* 00: System also has dual 8259s */
+ u8 S4bios_f:1; /* 00: S4BIOS support is present */
u8:7; /* 01-07: Reserved, must be zero */
u8 reserved1[3]; /* 08-31: Reserved, must be zero */
-};
-/* Values for Type in APIC_HEADER_DEF */
+ u64 xfirmware_waking_vector; /* 64-bit version of the Firmware Waking Vector (ACPI 2.0+) */
+ u8 version; /* Version of this table (ACPI 2.0+) */
+ u8 reserved[31]; /* Reserved, must be zero */
+};
-#define APIC_PROCESSOR 0
-#define APIC_IO 1
-#define APIC_XRUPT_OVERRIDE 2
-#define APIC_NMI 3
-#define APIC_LOCAL_NMI 4
-#define APIC_ADDRESS_OVERRIDE 5
-#define APIC_IO_SAPIC 6
-#define APIC_LOCAL_SAPIC 7
-#define APIC_XRUPT_SOURCE 8
-#define APIC_RESERVED 9 /* 9 and greater are reserved */
+#define ACPI_GLOCK_PENDING 0x01 /* 00: Pending global lock ownership */
+#define ACPI_GLOCK_OWNED 0x02 /* 01: Global lock is owned */
/*
- * MADT sub-structures (Follow MULTIPLE_APIC_DESCRIPTION_TABLE)
+ * Common FACS - This is a version-independent FACS structure used for internal use only
*/
-#define APIC_HEADER_DEF /* Common APIC sub-structure header */\
- u8 type; \
- u8 length;
-
-struct apic_header {
-APIC_HEADER_DEF};
-
-/* Values for MPS INTI flags */
-
-#define POLARITY_CONFORMS 0
-#define POLARITY_ACTIVE_HIGH 1
-#define POLARITY_RESERVED 2
-#define POLARITY_ACTIVE_LOW 3
-
-#define TRIGGER_CONFORMS 0
-#define TRIGGER_EDGE 1
-#define TRIGGER_RESERVED 2
-#define TRIGGER_LEVEL 3
-
-/* Common flag definitions (16 bits each) */
-
-#define MPS_INTI_FLAGS \
- u8 polarity : 2; /* 00-01: Polarity of APIC I/O input signals */\
- u8 trigger_mode : 2; /* 02-03: Trigger mode of APIC input signals */\
- u8 : 4; /* 04-07: Reserved, must be zero */\
- u8 reserved1; /* 08-15: Reserved, must be zero */
-
-#define LOCAL_APIC_FLAGS \
- u8 processor_enabled: 1; /* 00: Processor is usable if set */\
- u8 : 7; /* 01-07: Reserved, must be zero */\
- u8 reserved2; /* 08-15: Reserved, must be zero */
-
-/* Sub-structures for MADT */
-
-struct madt_processor_apic {
- APIC_HEADER_DEF u8 processor_id; /* ACPI processor id */
- u8 local_apic_id; /* Processor's local APIC id */
- LOCAL_APIC_FLAGS};
-
-struct madt_io_apic {
- APIC_HEADER_DEF u8 io_apic_id; /* I/O APIC ID */
- u8 reserved; /* Reserved - must be zero */
- u32 address; /* APIC physical address */
- u32 interrupt; /* Global system interrupt where INTI
- * lines start */
+struct acpi_common_facs {
+ u32 *global_lock;
+ u64 *firmware_waking_vector;
+ u8 vector_width;
};
-struct madt_interrupt_override {
- APIC_HEADER_DEF u8 bus; /* 0 - ISA */
- u8 source; /* Interrupt source (IRQ) */
- u32 interrupt; /* Global system interrupt */
- MPS_INTI_FLAGS};
+/*******************************************************************************
+ *
+ * FADT - Fixed ACPI Description Table (Signature "FACP")
+ *
+ ******************************************************************************/
+
+/* Fields common to all versions of the FADT */
+
+#define ACPI_FADT_COMMON \
+ ACPI_TABLE_HEADER_DEF \
+ u32 V1_firmware_ctrl; /* 32-bit physical address of FACS */ \
+ u32 V1_dsdt; /* 32-bit physical address of DSDT */ \
+ u8 reserved1; /* System Interrupt Model isn't used in ACPI 2.0*/ \
+ u8 prefer_PM_profile; /* Conveys preferred power management profile to OSPM. */ \
+ u16 sci_int; /* System vector of SCI interrupt */ \
+ u32 smi_cmd; /* Port address of SMI command port */ \
+ u8 acpi_enable; /* Value to write to smi_cmd to enable ACPI */ \
+ u8 acpi_disable; /* Value to write to smi_cmd to disable ACPI */ \
+ u8 S4bios_req; /* Value to write to SMI CMD to enter S4BIOS state */ \
+ u8 pstate_cnt; /* Processor performance state control*/ \
+ u32 V1_pm1a_evt_blk; /* Port address of Power Mgt 1a Event Reg Blk */ \
+ u32 V1_pm1b_evt_blk; /* Port address of Power Mgt 1b Event Reg Blk */ \
+ u32 V1_pm1a_cnt_blk; /* Port address of Power Mgt 1a Control Reg Blk */ \
+ u32 V1_pm1b_cnt_blk; /* Port address of Power Mgt 1b Control Reg Blk */ \
+ u32 V1_pm2_cnt_blk; /* Port address of Power Mgt 2 Control Reg Blk */ \
+ u32 V1_pm_tmr_blk; /* Port address of Power Mgt Timer Ctrl Reg Blk */ \
+ u32 V1_gpe0_blk; /* Port addr of General Purpose acpi_event 0 Reg Blk */ \
+ u32 V1_gpe1_blk; /* Port addr of General Purpose acpi_event 1 Reg Blk */ \
+ u8 pm1_evt_len; /* Byte Length of ports at pm1_x_evt_blk */ \
+ u8 pm1_cnt_len; /* Byte Length of ports at pm1_x_cnt_blk */ \
+ u8 pm2_cnt_len; /* Byte Length of ports at pm2_cnt_blk */ \
+ u8 pm_tm_len; /* Byte Length of ports at pm_tm_blk */ \
+ u8 gpe0_blk_len; /* Byte Length of ports at gpe0_blk */ \
+ u8 gpe1_blk_len; /* Byte Length of ports at gpe1_blk */ \
+ u8 gpe1_base; /* Offset in gpe model where gpe1 events start */ \
+ u8 cst_cnt; /* Support for the _CST object and C States change notification.*/ \
+ u16 plvl2_lat; /* Worst case HW latency to enter/exit C2 state */ \
+ u16 plvl3_lat; /* Worst case HW latency to enter/exit C3 state */ \
+ u16 flush_size; /* Processor's memory cache line width, in bytes */ \
+ u16 flush_stride; /* Number of flush strides that need to be read */ \
+ u8 duty_offset; /* Processor's duty cycle index in processor's P_CNT reg*/ \
+ u8 duty_width; /* Processor's duty cycle value bit width in P_CNT register.*/ \
+ u8 day_alrm; /* Index to day-of-month alarm in RTC CMOS RAM */ \
+ u8 mon_alrm; /* Index to month-of-year alarm in RTC CMOS RAM */ \
+ u8 century; /* Index to century in RTC CMOS RAM */ \
+ u16 iapc_boot_arch; /* IA-PC Boot Architecture Flags. See Table 5-10 for description*/ \
+ u8 reserved2; /* Reserved, must be zero */
-struct madt_nmi_source {
- APIC_HEADER_DEF MPS_INTI_FLAGS u32 interrupt; /* Global system interrupt */
+/*
+ * ACPI 2.0+ FADT
+ */
+struct fadt_descriptor {
+ ACPI_FADT_COMMON
+ /* Flags (32 bits) */
+ u8 wb_invd:1; /* 00: The wbinvd instruction works properly */
+ u8 wb_invd_flush:1; /* 01: The wbinvd flushes but does not invalidate */
+ u8 proc_c1:1; /* 02: All processors support C1 state */
+ u8 plvl2_up:1; /* 03: C2 state works on MP system */
+ u8 pwr_button:1; /* 04: Power button is handled as a generic feature */
+ u8 sleep_button:1; /* 05: Sleep button is handled as a generic feature, or not present */
+ u8 fixed_rTC:1; /* 06: RTC wakeup stat not in fixed register space */
+ u8 rtcs4:1; /* 07: RTC wakeup stat not possible from S4 */
+ u8 tmr_val_ext:1; /* 08: tmr_val is 32 bits 0=24-bits */
+ u8 dock_cap:1; /* 09: Docking supported */
+ u8 reset_reg_sup:1; /* 10: System reset via the FADT RESET_REG supported */
+ u8 sealed_case:1; /* 11: No internal expansion capabilities and case is sealed */
+ u8 headless:1; /* 12: No local video capabilities or local input devices */
+ u8 cpu_sw_sleep:1; /* 13: Must execute native instruction after writing SLP_TYPx register */
+
+ u8 pci_exp_wak:1; /* 14: System supports PCIEXP_WAKE (STS/EN) bits (ACPI 3.0) */
+ u8 use_platform_clock:1; /* 15: OSPM should use platform-provided timer (ACPI 3.0) */
+ u8 S4rtc_sts_valid:1; /* 16: Contents of RTC_STS valid after S4 wake (ACPI 3.0) */
+ u8 remote_power_on_capable:1; /* 17: System is compatible with remote power on (ACPI 3.0) */
+ u8 force_apic_cluster_model:1; /* 18: All local APICs must use cluster model (ACPI 3.0) */
+ u8 force_apic_physical_destination_mode:1; /* 19: All local x_aPICs must use physical dest mode (ACPI 3.0) */
+ u8:4; /* 20-23: Reserved, must be zero */
+ u8 reserved3; /* 24-31: Reserved, must be zero */
+
+ struct acpi_generic_address reset_register; /* Reset register address in GAS format */
+ u8 reset_value; /* Value to write to the reset_register port to reset the system */
+ u8 reserved4[3]; /* These three bytes must be zero */
+ u64 xfirmware_ctrl; /* 64-bit physical address of FACS */
+ u64 Xdsdt; /* 64-bit physical address of DSDT */
+ struct acpi_generic_address xpm1a_evt_blk; /* Extended Power Mgt 1a acpi_event Reg Blk address */
+ struct acpi_generic_address xpm1b_evt_blk; /* Extended Power Mgt 1b acpi_event Reg Blk address */
+ struct acpi_generic_address xpm1a_cnt_blk; /* Extended Power Mgt 1a Control Reg Blk address */
+ struct acpi_generic_address xpm1b_cnt_blk; /* Extended Power Mgt 1b Control Reg Blk address */
+ struct acpi_generic_address xpm2_cnt_blk; /* Extended Power Mgt 2 Control Reg Blk address */
+ struct acpi_generic_address xpm_tmr_blk; /* Extended Power Mgt Timer Ctrl Reg Blk address */
+ struct acpi_generic_address xgpe0_blk; /* Extended General Purpose acpi_event 0 Reg Blk address */
+ struct acpi_generic_address xgpe1_blk; /* Extended General Purpose acpi_event 1 Reg Blk address */
};
-struct madt_local_apic_nmi {
- APIC_HEADER_DEF u8 processor_id; /* ACPI processor id */
- MPS_INTI_FLAGS u8 lint; /* LINTn to which NMI is connected */
+/*
+ * "Down-revved" ACPI 2.0 FADT descriptor
+ * Defined here to allow compiler to generate the length of the struct
+ */
+struct fadt_descriptor_rev2_minus {
+ ACPI_FADT_COMMON u32 flags;
+ struct acpi_generic_address reset_register; /* Reset register address in GAS format */
+ u8 reset_value; /* Value to write to the reset_register port to reset the system. */
+ u8 reserved7[3]; /* Reserved, must be zero */
};
-struct madt_address_override {
- APIC_HEADER_DEF u16 reserved; /* Reserved, must be zero */
- u64 address; /* APIC physical address */
+/*
+ * ACPI 1.0 FADT
+ * Defined here to allow compiler to generate the length of the struct
+ */
+struct fadt_descriptor_rev1 {
+ ACPI_FADT_COMMON u32 flags;
};
-struct madt_io_sapic {
- APIC_HEADER_DEF u8 io_sapic_id; /* I/O SAPIC ID */
- u8 reserved; /* Reserved, must be zero */
- u32 interrupt_base; /* Glocal interrupt for SAPIC start */
- u64 address; /* SAPIC physical address */
-};
+/* FADT: Prefered Power Management Profiles */
-struct madt_local_sapic {
- APIC_HEADER_DEF u8 processor_id; /* ACPI processor id */
- u8 local_sapic_id; /* SAPIC ID */
- u8 local_sapic_eid; /* SAPIC EID */
- u8 reserved[3]; /* Reserved, must be zero */
- LOCAL_APIC_FLAGS u32 processor_uID; /* Numeric UID - ACPI 3.0 */
- char processor_uIDstring[1]; /* String UID - ACPI 3.0 */
-};
+#define PM_UNSPECIFIED 0
+#define PM_DESKTOP 1
+#define PM_MOBILE 2
+#define PM_WORKSTATION 3
+#define PM_ENTERPRISE_SERVER 4
+#define PM_SOHO_SERVER 5
+#define PM_APPLIANCE_PC 6
-struct madt_interrupt_source {
- APIC_HEADER_DEF MPS_INTI_FLAGS u8 interrupt_type; /* 1=PMI, 2=INIT, 3=corrected */
- u8 processor_id; /* Processor ID */
- u8 processor_eid; /* Processor EID */
- u8 io_sapic_vector; /* Vector value for PMI interrupts */
- u32 interrupt; /* Global system interrupt */
- u32 flags; /* Interrupt Source Flags */
-};
+/* FADT: Boot Arch Flags */
-/*
- * Smart Battery
- */
-struct smart_battery_table {
- ACPI_TABLE_HEADER_DEF u32 warning_level;
- u32 low_level;
- u32 critical_level;
-};
+#define BAF_LEGACY_DEVICES 0x0001
+#define BAF_8042_KEYBOARD_CONTROLLER 0x0002
+
+#define FADT2_REVISION_ID 3
+#define FADT2_MINUS_REVISION_ID 2
+
+/* Reset to default packing */
#pragma pack()
/*
+ * This macro is temporary until the table bitfield flag definitions
+ * are removed and replaced by a Flags field.
+ */
+#define ACPI_FLAG_OFFSET(d,f,o) (u8) (ACPI_OFFSET (d,f) + \
+ sizeof(((d *)0)->f) + o)
+/*
+ * Get the remaining ACPI tables
+ */
+#include "actbl1.h"
+
+/*
* ACPI Table information. We save the table address, length,
* and type of memory allocation (mapped or allocated) for each
* table for 1) when we exit, and 2) if a new table is installed
@@ -290,27 +354,17 @@ struct acpi_table_support {
u8 flags;
};
-/*
- * Get the ACPI version-specific tables
- */
-#include "actbl1.h" /* Acpi 1.0 table definitions */
-#include "actbl2.h" /* Acpi 2.0 table definitions */
-
extern u8 acpi_fadt_is_v1; /* is set to 1 if FADT is revision 1,
* needed for certain workarounds */
+/* Macros used to generate offsets to specific table fields */
-#pragma pack(1)
-/*
- * High performance timer
- */
-struct hpet_table {
- ACPI_TABLE_HEADER_DEF u32 hardware_id;
- struct acpi_generic_address base_address;
- u8 hpet_number;
- u16 clock_tick;
- u8 attributes;
-};
+#define ACPI_FACS_OFFSET(f) (u8) ACPI_OFFSET (struct facs_descriptor,f)
+#define ACPI_FADT_OFFSET(f) (u8) ACPI_OFFSET (struct fadt_descriptor, f)
+#define ACPI_GAS_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_generic_address,f)
+#define ACPI_HDR_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_table_header,f)
+#define ACPI_RSDP_OFFSET(f) (u8) ACPI_OFFSET (struct rsdp_descriptor,f)
-#pragma pack()
+#define ACPI_FADT_FLAG_OFFSET(f,o) ACPI_FLAG_OFFSET (struct fadt_descriptor,f,o)
+#define ACPI_FACS_FLAG_OFFSET(f,o) ACPI_FLAG_OFFSET (struct facs_descriptor,f,o)
#endif /* __ACTBL_H__ */
diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h
index cd428d57add0..745a6445a4f9 100644
--- a/include/acpi/actbl1.h
+++ b/include/acpi/actbl1.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Name: actbl1.h - ACPI 1.0 tables
+ * Name: actbl1.h - Additional ACPI table definitions
*
*****************************************************************************/
@@ -44,92 +44,599 @@
#ifndef __ACTBL1_H__
#define __ACTBL1_H__
+/*******************************************************************************
+ *
+ * Additional ACPI Tables
+ *
+ * These tables are not consumed directly by the ACPICA subsystem, but are
+ * included here to support device drivers and the AML disassembler.
+ *
+ ******************************************************************************/
+
+/*
+ * Values for description table header signatures. Useful because they make
+ * it more difficult to inadvertently type in the wrong signature.
+ */
+#define ACPI_SIG_ASF "ASF!" /* Alert Standard Format table */
+#define ACPI_SIG_BOOT "BOOT" /* Simple Boot Flag Table */
+#define ACPI_SIG_CPEP "CPEP" /* Corrected Platform Error Polling table */
+#define ACPI_SIG_DBGP "DBGP" /* Debug Port table */
+#define ACPI_SIG_ECDT "ECDT" /* Embedded Controller Boot Resources Table */
+#define ACPI_SIG_HPET "HPET" /* High Precision Event Timer table */
+#define ACPI_SIG_MADT "APIC" /* Multiple APIC Description Table */
+#define ACPI_SIG_MCFG "MCFG" /* PCI Memory Mapped Configuration table */
+#define ACPI_SIG_SBST "SBST" /* Smart Battery Specification Table */
+#define ACPI_SIG_SLIT "SLIT" /* System Locality Distance Information Table */
+#define ACPI_SIG_SPCR "SPCR" /* Serial Port Console Redirection table */
+#define ACPI_SIG_SPMI "SPMI" /* Server Platform Management Interface table */
+#define ACPI_SIG_SRAT "SRAT" /* System Resource Affinity Table */
+#define ACPI_SIG_TCPA "TCPA" /* Trusted Computing Platform Alliance table */
+#define ACPI_SIG_WDRT "WDRT" /* Watchdog Resource Table */
+
+/* Legacy names */
+
+#define APIC_SIG "APIC" /* Multiple APIC Description Table */
+#define BOOT_SIG "BOOT" /* Simple Boot Flag Table */
+#define SBST_SIG "SBST" /* Smart Battery Specification Table */
+
+/*
+ * All tables must be byte-packed to match the ACPI specification, since
+ * the tables are provided by the system BIOS.
+ */
#pragma pack(1)
/*
- * ACPI 1.0 Root System Description Table (RSDT)
+ * Note about bitfields: The u8 type is used for bitfields in ACPI tables.
+ * This is the only type that is even remotely portable. Anything else is not
+ * portable, so do not use any other bitfield types.
*/
-struct rsdt_descriptor_rev1 {
- ACPI_TABLE_HEADER_DEF /* ACPI common table header */
- u32 table_offset_entry[1]; /* Array of pointers to ACPI tables */
-};
+
+/*******************************************************************************
+ *
+ * ASF - Alert Standard Format table (Signature "ASF!")
+ *
+ ******************************************************************************/
+
+struct acpi_table_asf {
+ACPI_TABLE_HEADER_DEF};
+
+#define ACPI_ASF_HEADER_DEF \
+ u8 type; \
+ u8 reserved; \
+ u16 length;
+
+struct acpi_asf_header {
+ACPI_ASF_HEADER_DEF};
+
+/* Values for Type field */
+
+#define ASF_INFO 0
+#define ASF_ALERT 1
+#define ASF_CONTROL 2
+#define ASF_BOOT 3
+#define ASF_ADDRESS 4
+#define ASF_RESERVED 5
/*
- * ACPI 1.0 Firmware ACPI Control Structure (FACS)
+ * ASF subtables
*/
-struct facs_descriptor_rev1 {
- char signature[4]; /* ASCII table signature */
- u32 length; /* Length of structure in bytes */
- u32 hardware_signature; /* Hardware configuration signature */
- u32 firmware_waking_vector; /* ACPI OS waking vector */
- u32 global_lock; /* Global Lock */
+
+/* 0: ASF Information */
+
+struct acpi_asf_info {
+ ACPI_ASF_HEADER_DEF u8 min_reset_value;
+ u8 min_poll_interval;
+ u16 system_id;
+ u32 mfg_id;
+ u8 flags;
+ u8 reserved2[3];
+};
+
+/* 1: ASF Alerts */
+
+struct acpi_asf_alert {
+ ACPI_ASF_HEADER_DEF u8 assert_mask;
+ u8 deassert_mask;
+ u8 alerts;
+ u8 data_length;
+ u8 array[1];
+};
+
+/* 2: ASF Remote Control */
+
+struct acpi_asf_remote {
+ ACPI_ASF_HEADER_DEF u8 controls;
+ u8 data_length;
+ u16 reserved2;
+ u8 array[1];
+};
+
+/* 3: ASF RMCP Boot Options */
+
+struct acpi_asf_rmcp {
+ ACPI_ASF_HEADER_DEF u8 capabilities[7];
+ u8 completion_code;
+ u32 enterprise_id;
+ u8 command;
+ u16 parameter;
+ u16 boot_options;
+ u16 oem_parameters;
+};
+
+/* 4: ASF Address */
+
+struct acpi_asf_address {
+ ACPI_ASF_HEADER_DEF u8 eprom_address;
+ u8 devices;
+ u8 smbus_addresses[1];
+};
+
+/*******************************************************************************
+ *
+ * BOOT - Simple Boot Flag Table
+ *
+ ******************************************************************************/
+
+struct acpi_table_boot {
+ ACPI_TABLE_HEADER_DEF u8 cmos_index; /* Index in CMOS RAM for the boot register */
+ u8 reserved[3];
+};
+
+/*******************************************************************************
+ *
+ * CPEP - Corrected Platform Error Polling table
+ *
+ ******************************************************************************/
+
+struct acpi_table_cpep {
+ ACPI_TABLE_HEADER_DEF u64 reserved;
+};
+
+/* Subtable */
+
+struct acpi_cpep_polling {
+ u8 type;
+ u8 length;
+ u8 processor_id; /* Processor ID */
+ u8 processor_eid; /* Processor EID */
+ u32 polling_interval; /* Polling interval (msec) */
+};
+
+/*******************************************************************************
+ *
+ * DBGP - Debug Port table
+ *
+ ******************************************************************************/
+
+struct acpi_table_dbgp {
+ ACPI_TABLE_HEADER_DEF u8 interface_type; /* 0=full 16550, 1=subset of 16550 */
+ u8 reserved[3];
+ struct acpi_generic_address debug_port;
+};
+
+/*******************************************************************************
+ *
+ * ECDT - Embedded Controller Boot Resources Table
+ *
+ ******************************************************************************/
+
+struct ec_boot_resources {
+ ACPI_TABLE_HEADER_DEF struct acpi_generic_address ec_control; /* Address of EC command/status register */
+ struct acpi_generic_address ec_data; /* Address of EC data register */
+ u32 uid; /* Unique ID - must be same as the EC _UID method */
+ u8 gpe_bit; /* The GPE for the EC */
+ u8 ec_id[1]; /* Full namepath of the EC in the ACPI namespace */
+};
+
+/*******************************************************************************
+ *
+ * HPET - High Precision Event Timer table
+ *
+ ******************************************************************************/
+
+struct acpi_hpet_table {
+ ACPI_TABLE_HEADER_DEF u32 hardware_id; /* Hardware ID of event timer block */
+ struct acpi_generic_address base_address; /* Address of event timer block */
+ u8 hpet_number; /* HPET sequence number */
+ u16 clock_tick; /* Main counter min tick, periodic mode */
+ u8 attributes;
+};
+
+#if 0 /* HPET flags to be converted to macros */
+struct { /* Flags (8 bits) */
+ u8 page_protect:1; /* 00: No page protection */
+ u8 page_protect4:1; /* 01: 4_kB page protected */
+ u8 page_protect64:1; /* 02: 64_kB page protected */
+ u8:5; /* 03-07: Reserved, must be zero */
+} flags;
+#endif
+
+/*******************************************************************************
+ *
+ * MADT - Multiple APIC Description Table
+ *
+ ******************************************************************************/
+
+struct multiple_apic_table {
+ ACPI_TABLE_HEADER_DEF u32 local_apic_address; /* Physical address of local APIC */
/* Flags (32 bits) */
- u8 S4bios_f:1; /* 00: S4BIOS support is present */
+ u8 PCATcompat:1; /* 00: System also has dual 8259s */
u8:7; /* 01-07: Reserved, must be zero */
u8 reserved1[3]; /* 08-31: Reserved, must be zero */
-
- u8 reserved2[40]; /* Reserved, must be zero */
};
+/* Values for MADT PCATCompat */
+
+#define DUAL_PIC 0
+#define MULTIPLE_APIC 1
+
+/* Common MADT Sub-table header */
+
+#define APIC_HEADER_DEF \
+ u8 type; \
+ u8 length;
+
+struct apic_header {
+APIC_HEADER_DEF};
+
+/* Values for Type in struct apic_header */
+
+#define APIC_PROCESSOR 0
+#define APIC_IO 1
+#define APIC_XRUPT_OVERRIDE 2
+#define APIC_NMI 3
+#define APIC_LOCAL_NMI 4
+#define APIC_ADDRESS_OVERRIDE 5
+#define APIC_IO_SAPIC 6
+#define APIC_LOCAL_SAPIC 7
+#define APIC_XRUPT_SOURCE 8
+#define APIC_RESERVED 9 /* 9 and greater are reserved */
+
+/* Flag definitions for MADT sub-tables */
+
+#define ACPI_MADT_IFLAGS /* INTI flags (16 bits) */ \
+ u8 polarity : 2; /* 00-01: Polarity of APIC I/O input signals */\
+ u8 trigger_mode : 2; /* 02-03: Trigger mode of APIC input signals */\
+ u8 : 4; /* 04-07: Reserved, must be zero */\
+ u8 reserved1; /* 08-15: Reserved, must be zero */
+
+#define ACPI_MADT_LFLAGS /* Local Sapic flags (32 bits) */ \
+ u8 processor_enabled: 1; /* 00: Processor is usable if set */\
+ u8 : 7; /* 01-07: Reserved, must be zero */\
+ u8 reserved2[3]; /* 08-31: Reserved, must be zero */
+
+/* Values for MPS INTI flags */
+
+#define POLARITY_CONFORMS 0
+#define POLARITY_ACTIVE_HIGH 1
+#define POLARITY_RESERVED 2
+#define POLARITY_ACTIVE_LOW 3
+
+#define TRIGGER_CONFORMS 0
+#define TRIGGER_EDGE 1
+#define TRIGGER_RESERVED 2
+#define TRIGGER_LEVEL 3
+
/*
- * ACPI 1.0 Fixed ACPI Description Table (FADT)
+ * MADT Sub-tables, correspond to Type in struct apic_header
*/
-struct fadt_descriptor_rev1 {
- ACPI_TABLE_HEADER_DEF /* ACPI common table header */
- u32 firmware_ctrl; /* Physical address of FACS */
- u32 dsdt; /* Physical address of DSDT */
- u8 model; /* System Interrupt Model */
- u8 reserved1; /* Reserved, must be zero */
- u16 sci_int; /* System vector of SCI interrupt */
- u32 smi_cmd; /* Port address of SMI command port */
- u8 acpi_enable; /* Value to write to smi_cmd to enable ACPI */
- u8 acpi_disable; /* Value to write to smi_cmd to disable ACPI */
- u8 S4bios_req; /* Value to write to SMI CMD to enter S4BIOS state */
- u8 reserved2; /* Reserved, must be zero */
- u32 pm1a_evt_blk; /* Port address of Power Mgt 1a acpi_event Reg Blk */
- u32 pm1b_evt_blk; /* Port address of Power Mgt 1b acpi_event Reg Blk */
- u32 pm1a_cnt_blk; /* Port address of Power Mgt 1a Control Reg Blk */
- u32 pm1b_cnt_blk; /* Port address of Power Mgt 1b Control Reg Blk */
- u32 pm2_cnt_blk; /* Port address of Power Mgt 2 Control Reg Blk */
- u32 pm_tmr_blk; /* Port address of Power Mgt Timer Ctrl Reg Blk */
- u32 gpe0_blk; /* Port addr of General Purpose acpi_event 0 Reg Blk */
- u32 gpe1_blk; /* Port addr of General Purpose acpi_event 1 Reg Blk */
- u8 pm1_evt_len; /* Byte length of ports at pm1_x_evt_blk */
- u8 pm1_cnt_len; /* Byte length of ports at pm1_x_cnt_blk */
- u8 pm2_cnt_len; /* Byte Length of ports at pm2_cnt_blk */
- u8 pm_tm_len; /* Byte Length of ports at pm_tm_blk */
- u8 gpe0_blk_len; /* Byte Length of ports at gpe0_blk */
- u8 gpe1_blk_len; /* Byte Length of ports at gpe1_blk */
- u8 gpe1_base; /* Offset in gpe model where gpe1 events start */
- u8 reserved3; /* Reserved, must be zero */
- u16 plvl2_lat; /* Worst case HW latency to enter/exit C2 state */
- u16 plvl3_lat; /* Worst case HW latency to enter/exit C3 state */
- u16 flush_size; /* Size of area read to flush caches */
- u16 flush_stride; /* Stride used in flushing caches */
- u8 duty_offset; /* Bit location of duty cycle field in p_cnt reg */
- u8 duty_width; /* Bit width of duty cycle field in p_cnt reg */
- u8 day_alrm; /* Index to day-of-month alarm in RTC CMOS RAM */
- u8 mon_alrm; /* Index to month-of-year alarm in RTC CMOS RAM */
- u8 century; /* Index to century in RTC CMOS RAM */
- u8 reserved4[3]; /* Reserved, must be zero */
+
+/* 0: processor APIC */
+
+struct madt_processor_apic {
+ APIC_HEADER_DEF u8 processor_id; /* ACPI processor id */
+ u8 local_apic_id; /* Processor's local APIC id */
+ ACPI_MADT_LFLAGS};
+
+/* 1: IO APIC */
+
+struct madt_io_apic {
+ APIC_HEADER_DEF u8 io_apic_id; /* I/O APIC ID */
+ u8 reserved; /* Reserved - must be zero */
+ u32 address; /* APIC physical address */
+ u32 interrupt; /* Global system interrupt where INTI lines start */
+};
+
+/* 2: Interrupt Override */
+
+struct madt_interrupt_override {
+ APIC_HEADER_DEF u8 bus; /* 0 - ISA */
+ u8 source; /* Interrupt source (IRQ) */
+ u32 interrupt; /* Global system interrupt */
+ ACPI_MADT_IFLAGS};
+
+/* 3: NMI Sources */
+
+struct madt_nmi_source {
+ APIC_HEADER_DEF ACPI_MADT_IFLAGS u32 interrupt; /* Global system interrupt */
+};
+
+/* 4: Local APIC NMI */
+
+struct madt_local_apic_nmi {
+ APIC_HEADER_DEF u8 processor_id; /* ACPI processor id */
+ ACPI_MADT_IFLAGS u8 lint; /* LINTn to which NMI is connected */
+};
+
+/* 5: Address Override */
+
+struct madt_address_override {
+ APIC_HEADER_DEF u16 reserved; /* Reserved, must be zero */
+ u64 address; /* APIC physical address */
+};
+
+/* 6: I/O Sapic */
+
+struct madt_io_sapic {
+ APIC_HEADER_DEF u8 io_sapic_id; /* I/O SAPIC ID */
+ u8 reserved; /* Reserved, must be zero */
+ u32 interrupt_base; /* Glocal interrupt for SAPIC start */
+ u64 address; /* SAPIC physical address */
+};
+
+/* 7: Local Sapic */
+
+struct madt_local_sapic {
+ APIC_HEADER_DEF u8 processor_id; /* ACPI processor id */
+ u8 local_sapic_id; /* SAPIC ID */
+ u8 local_sapic_eid; /* SAPIC EID */
+ u8 reserved[3]; /* Reserved, must be zero */
+ ACPI_MADT_LFLAGS u32 processor_uID; /* Numeric UID - ACPI 3.0 */
+ char processor_uIDstring[1]; /* String UID - ACPI 3.0 */
+};
+
+/* 8: Platform Interrupt Source */
+
+struct madt_interrupt_source {
+ APIC_HEADER_DEF ACPI_MADT_IFLAGS u8 interrupt_type; /* 1=PMI, 2=INIT, 3=corrected */
+ u8 processor_id; /* Processor ID */
+ u8 processor_eid; /* Processor EID */
+ u8 io_sapic_vector; /* Vector value for PMI interrupts */
+ u32 interrupt; /* Global system interrupt */
+ u32 flags; /* Interrupt Source Flags */
+};
+
+#ifdef DUPLICATE_DEFINITION_WITH_LINUX_ACPI_H
+/*******************************************************************************
+ *
+ * MCFG - PCI Memory Mapped Configuration table and sub-table
+ *
+ ******************************************************************************/
+
+struct acpi_table_mcfg {
+ ACPI_TABLE_HEADER_DEF u8 reserved[8];
+};
+
+struct acpi_mcfg_allocation {
+ u64 base_address; /* Base address, processor-relative */
+ u16 pci_segment; /* PCI segment group number */
+ u8 start_bus_number; /* Starting PCI Bus number */
+ u8 end_bus_number; /* Final PCI Bus number */
+ u32 reserved;
+};
+#endif
+
+/*******************************************************************************
+ *
+ * SBST - Smart Battery Specification Table
+ *
+ ******************************************************************************/
+
+struct smart_battery_table {
+ ACPI_TABLE_HEADER_DEF u32 warning_level;
+ u32 low_level;
+ u32 critical_level;
+};
+
+/*******************************************************************************
+ *
+ * SLIT - System Locality Distance Information Table
+ *
+ ******************************************************************************/
+
+struct system_locality_info {
+ ACPI_TABLE_HEADER_DEF u64 locality_count;
+ u8 entry[1][1];
+};
+
+/*******************************************************************************
+ *
+ * SPCR - Serial Port Console Redirection table
+ *
+ ******************************************************************************/
+
+struct acpi_table_spcr {
+ ACPI_TABLE_HEADER_DEF u8 interface_type; /* 0=full 16550, 1=subset of 16550 */
+ u8 reserved[3];
+ struct acpi_generic_address serial_port;
+ u8 interrupt_type;
+ u8 pc_interrupt;
+ u32 interrupt;
+ u8 baud_rate;
+ u8 parity;
+ u8 stop_bits;
+ u8 flow_control;
+ u8 terminal_type;
+ u8 reserved2;
+ u16 pci_device_id;
+ u16 pci_vendor_id;
+ u8 pci_bus;
+ u8 pci_device;
+ u8 pci_function;
+ u32 pci_flags;
+ u8 pci_segment;
+ u32 reserved3;
+};
+
+/*******************************************************************************
+ *
+ * SPMI - Server Platform Management Interface table
+ *
+ ******************************************************************************/
+
+struct acpi_table_spmi {
+ ACPI_TABLE_HEADER_DEF u8 reserved;
+ u8 interface_type;
+ u16 spec_revision; /* Version of IPMI */
+ u8 interrupt_type;
+ u8 gpe_number; /* GPE assigned */
+ u8 reserved2;
+ u8 pci_device_flag;
+ u32 interrupt;
+ struct acpi_generic_address ipmi_register;
+ u8 pci_segment;
+ u8 pci_bus;
+ u8 pci_device;
+ u8 pci_function;
+};
+
+/*******************************************************************************
+ *
+ * SRAT - System Resource Affinity Table
+ *
+ ******************************************************************************/
+
+struct system_resource_affinity {
+ ACPI_TABLE_HEADER_DEF u32 reserved1; /* Must be value '1' */
+ u64 reserved2; /* Reserved, must be zero */
+};
+
+/* SRAT common sub-table header */
+
+#define SRAT_SUBTABLE_HEADER \
+ u8 type; \
+ u8 length;
+
+/* Values for Type above */
+
+#define SRAT_CPU_AFFINITY 0
+#define SRAT_MEMORY_AFFINITY 1
+#define SRAT_RESERVED 2
+
+/* SRAT sub-tables */
+
+struct static_resource_alloc {
+ SRAT_SUBTABLE_HEADER u8 proximity_domain_lo;
+ u8 apic_id;
+
+ /* Flags (32 bits) */
+
+ u8 enabled:1; /* 00: Use affinity structure */
+ u8:7; /* 01-07: Reserved, must be zero */
+ u8 reserved3[3]; /* 08-31: Reserved, must be zero */
+
+ u8 local_sapic_eid;
+ u8 proximity_domain_hi[3];
+ u32 reserved4; /* Reserved, must be zero */
+};
+
+struct memory_affinity {
+ SRAT_SUBTABLE_HEADER u32 proximity_domain;
+ u16 reserved3;
+ u64 base_address;
+ u64 address_length;
+ u32 reserved4;
/* Flags (32 bits) */
- u8 wb_invd:1; /* 00: The wbinvd instruction works properly */
- u8 wb_invd_flush:1; /* 01: The wbinvd flushes but does not invalidate */
- u8 proc_c1:1; /* 02: All processors support C1 state */
- u8 plvl2_up:1; /* 03: C2 state works on MP system */
- u8 pwr_button:1; /* 04: Power button is handled as a generic feature */
- u8 sleep_button:1; /* 05: Sleep button is handled as a generic feature, or not present */
- u8 fixed_rTC:1; /* 06: RTC wakeup stat not in fixed register space */
- u8 rtcs4:1; /* 07: RTC wakeup stat not possible from S4 */
- u8 tmr_val_ext:1; /* 08: tmr_val width is 32 bits (0 = 24 bits) */
- u8:7; /* 09-15: Reserved, must be zero */
- u8 reserved5[2]; /* 16-31: Reserved, must be zero */
+ u8 enabled:1; /* 00: Use affinity structure */
+ u8 hot_pluggable:1; /* 01: Memory region is hot pluggable */
+ u8 non_volatile:1; /* 02: Memory is non-volatile */
+ u8:5; /* 03-07: Reserved, must be zero */
+ u8 reserved5[3]; /* 08-31: Reserved, must be zero */
+
+ u64 reserved6; /* Reserved, must be zero */
+};
+
+/*******************************************************************************
+ *
+ * TCPA - Trusted Computing Platform Alliance table
+ *
+ ******************************************************************************/
+
+struct acpi_table_tcpa {
+ ACPI_TABLE_HEADER_DEF u16 reserved;
+ u32 max_log_length; /* Maximum length for the event log area */
+ u64 log_address; /* Address of the event log area */
};
+/*******************************************************************************
+ *
+ * WDRT - Watchdog Resource Table
+ *
+ ******************************************************************************/
+
+struct acpi_table_wdrt {
+ ACPI_TABLE_HEADER_DEF u32 header_length; /* Watchdog Header Length */
+ u8 pci_segment; /* PCI Segment number */
+ u8 pci_bus; /* PCI Bus number */
+ u8 pci_device; /* PCI Device number */
+ u8 pci_function; /* PCI Function number */
+ u32 timer_period; /* Period of one timer count (msec) */
+ u32 max_count; /* Maximum counter value supported */
+ u32 min_count; /* Minimum counter value */
+ u8 flags;
+ u8 reserved[3];
+ u32 entries; /* Number of watchdog entries that follow */
+};
+
+#if 0 /* Flags, will be converted to macros */
+u8 enabled:1; /* 00: Timer enabled */
+u8:6; /* 01-06: Reserved */
+u8 sleep_stop:1; /* 07: Timer stopped in sleep state */
+#endif
+
+/* Macros used to generate offsets to specific table fields */
+
+#define ACPI_ASF0_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_asf_info,f)
+#define ACPI_ASF1_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_asf_alert,f)
+#define ACPI_ASF2_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_asf_remote,f)
+#define ACPI_ASF3_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_asf_rmcp,f)
+#define ACPI_ASF4_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_asf_address,f)
+#define ACPI_BOOT_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_table_boot,f)
+#define ACPI_CPEP_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_table_cpep,f)
+#define ACPI_CPEP0_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_cpep_polling,f)
+#define ACPI_DBGP_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_table_dbgp,f)
+#define ACPI_ECDT_OFFSET(f) (u8) ACPI_OFFSET (struct ec_boot_resources,f)
+#define ACPI_HPET_OFFSET(f) (u8) ACPI_OFFSET (struct hpet_table,f)
+#define ACPI_MADT_OFFSET(f) (u8) ACPI_OFFSET (struct multiple_apic_table,f)
+#define ACPI_MADT0_OFFSET(f) (u8) ACPI_OFFSET (struct madt_processor_apic,f)
+#define ACPI_MADT1_OFFSET(f) (u8) ACPI_OFFSET (struct madt_io_apic,f)
+#define ACPI_MADT2_OFFSET(f) (u8) ACPI_OFFSET (struct madt_interrupt_override,f)
+#define ACPI_MADT3_OFFSET(f) (u8) ACPI_OFFSET (struct madt_nmi_source,f)
+#define ACPI_MADT4_OFFSET(f) (u8) ACPI_OFFSET (struct madt_local_apic_nmi,f)
+#define ACPI_MADT5_OFFSET(f) (u8) ACPI_OFFSET (struct madt_address_override,f)
+#define ACPI_MADT6_OFFSET(f) (u8) ACPI_OFFSET (struct madt_io_sapic,f)
+#define ACPI_MADT7_OFFSET(f) (u8) ACPI_OFFSET (struct madt_local_sapic,f)
+#define ACPI_MADT8_OFFSET(f) (u8) ACPI_OFFSET (struct madt_interrupt_source,f)
+#define ACPI_MADTH_OFFSET(f) (u8) ACPI_OFFSET (struct apic_header,f)
+#define ACPI_MCFG_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_table_mcfg,f)
+#define ACPI_MCFG0_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_mcfg_allocation,f)
+#define ACPI_SBST_OFFSET(f) (u8) ACPI_OFFSET (struct smart_battery_table,f)
+#define ACPI_SLIT_OFFSET(f) (u8) ACPI_OFFSET (struct system_locality_info,f)
+#define ACPI_SPCR_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_table_spcr,f)
+#define ACPI_SPMI_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_table_spmi,f)
+#define ACPI_SRAT_OFFSET(f) (u8) ACPI_OFFSET (struct system_resource_affinity,f)
+#define ACPI_SRAT0_OFFSET(f) (u8) ACPI_OFFSET (struct static_resource_alloc,f)
+#define ACPI_SRAT1_OFFSET(f) (u8) ACPI_OFFSET (struct memory_affinity,f)
+#define ACPI_TCPA_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_table_tcpa,f)
+#define ACPI_WDRT_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_table_wdrt,f)
+
+#define ACPI_HPET_FLAG_OFFSET(f,o) ACPI_FLAG_OFFSET (struct hpet_table,f,o)
+#define ACPI_SRAT0_FLAG_OFFSET(f,o) ACPI_FLAG_OFFSET (struct static_resource_alloc,f,o)
+#define ACPI_SRAT1_FLAG_OFFSET(f,o) ACPI_FLAG_OFFSET (struct memory_affinity,f,o)
+#define ACPI_MADT_FLAG_OFFSET(f,o) ACPI_FLAG_OFFSET (struct multiple_apic_table,f,o)
+#define ACPI_MADT0_FLAG_OFFSET(f,o) ACPI_FLAG_OFFSET (struct madt_processor_apic,f,o)
+#define ACPI_MADT2_FLAG_OFFSET(f,o) ACPI_FLAG_OFFSET (struct madt_interrupt_override,f,o)
+#define ACPI_MADT3_FLAG_OFFSET(f,o) ACPI_FLAG_OFFSET (struct madt_nmi_source,f,o)
+#define ACPI_MADT4_FLAG_OFFSET(f,o) ACPI_FLAG_OFFSET (struct madt_local_apic_nmi,f,o)
+#define ACPI_MADT7_FLAG_OFFSET(f,o) ACPI_FLAG_OFFSET (struct madt_local_sapic,f,o)
+#define ACPI_MADT8_FLAG_OFFSET(f,o) ACPI_FLAG_OFFSET (struct madt_interrupt_source,f,o)
+
+/* Reset to default packing */
+
#pragma pack()
#endif /* __ACTBL1_H__ */
diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h
index dfc7ac1094bb..67efe6cad27b 100644
--- a/include/acpi/actbl2.h
+++ b/include/acpi/actbl2.h
@@ -44,234 +44,6 @@
#ifndef __ACTBL2_H__
#define __ACTBL2_H__
-/*
- * Prefered Power Management Profiles
- */
-#define PM_UNSPECIFIED 0
-#define PM_DESKTOP 1
-#define PM_MOBILE 2
-#define PM_WORKSTATION 3
-#define PM_ENTERPRISE_SERVER 4
-#define PM_SOHO_SERVER 5
-#define PM_APPLIANCE_PC 6
-
-/*
- * ACPI Boot Arch Flags
- */
-#define BAF_LEGACY_DEVICES 0x0001
-#define BAF_8042_KEYBOARD_CONTROLLER 0x0002
-
-#define FADT2_REVISION_ID 3
-#define FADT2_MINUS_REVISION_ID 2
-
-#pragma pack(1)
-
-/*
- * ACPI 2.0 Root System Description Table (RSDT)
- */
-struct rsdt_descriptor_rev2 {
- ACPI_TABLE_HEADER_DEF /* ACPI common table header */
- u32 table_offset_entry[1]; /* Array of pointers to ACPI tables */
-};
-
-/*
- * ACPI 2.0 Extended System Description Table (XSDT)
- */
-struct xsdt_descriptor_rev2 {
- ACPI_TABLE_HEADER_DEF /* ACPI common table header */
- u64 table_offset_entry[1]; /* Array of pointers to ACPI tables */
-};
-
-/*
- * ACPI 2.0 Firmware ACPI Control Structure (FACS)
- */
-struct facs_descriptor_rev2 {
- char signature[4]; /* ASCII table signature */
- u32 length; /* Length of structure, in bytes */
- u32 hardware_signature; /* Hardware configuration signature */
- u32 firmware_waking_vector; /* 32-bit physical address of the Firmware Waking Vector. */
- u32 global_lock; /* Global Lock used to synchronize access to shared hardware resources */
-
- /* Flags (32 bits) */
-
- u8 S4bios_f:1; /* 00: S4BIOS support is present */
- u8:7; /* 01-07: Reserved, must be zero */
- u8 reserved1[3]; /* 08-31: Reserved, must be zero */
-
- u64 xfirmware_waking_vector; /* 64-bit physical address of the Firmware Waking Vector. */
- u8 version; /* Version of this table */
- u8 reserved3[31]; /* Reserved, must be zero */
-};
-
-/*
- * ACPI 2.0+ Generic Address Structure (GAS)
- */
-struct acpi_generic_address {
- u8 address_space_id; /* Address space where struct or register exists. */
- u8 register_bit_width; /* Size in bits of given register */
- u8 register_bit_offset; /* Bit offset within the register */
- u8 access_width; /* Minimum Access size (ACPI 3.0) */
- u64 address; /* 64-bit address of struct or register */
-};
-
-#define FADT_REV2_COMMON \
- u32 V1_firmware_ctrl; /* 32-bit physical address of FACS */ \
- u32 V1_dsdt; /* 32-bit physical address of DSDT */ \
- u8 reserved1; /* System Interrupt Model isn't used in ACPI 2.0*/ \
- u8 prefer_PM_profile; /* Conveys preferred power management profile to OSPM. */ \
- u16 sci_int; /* System vector of SCI interrupt */ \
- u32 smi_cmd; /* Port address of SMI command port */ \
- u8 acpi_enable; /* Value to write to smi_cmd to enable ACPI */ \
- u8 acpi_disable; /* Value to write to smi_cmd to disable ACPI */ \
- u8 S4bios_req; /* Value to write to SMI CMD to enter S4BIOS state */ \
- u8 pstate_cnt; /* Processor performance state control*/ \
- u32 V1_pm1a_evt_blk; /* Port address of Power Mgt 1a acpi_event Reg Blk */ \
- u32 V1_pm1b_evt_blk; /* Port address of Power Mgt 1b acpi_event Reg Blk */ \
- u32 V1_pm1a_cnt_blk; /* Port address of Power Mgt 1a Control Reg Blk */ \
- u32 V1_pm1b_cnt_blk; /* Port address of Power Mgt 1b Control Reg Blk */ \
- u32 V1_pm2_cnt_blk; /* Port address of Power Mgt 2 Control Reg Blk */ \
- u32 V1_pm_tmr_blk; /* Port address of Power Mgt Timer Ctrl Reg Blk */ \
- u32 V1_gpe0_blk; /* Port addr of General Purpose acpi_event 0 Reg Blk */ \
- u32 V1_gpe1_blk; /* Port addr of General Purpose acpi_event 1 Reg Blk */ \
- u8 pm1_evt_len; /* Byte length of ports at pm1_x_evt_blk */ \
- u8 pm1_cnt_len; /* Byte length of ports at pm1_x_cnt_blk */ \
- u8 pm2_cnt_len; /* Byte Length of ports at pm2_cnt_blk */ \
- u8 pm_tm_len; /* Byte Length of ports at pm_tm_blk */ \
- u8 gpe0_blk_len; /* Byte Length of ports at gpe0_blk */ \
- u8 gpe1_blk_len; /* Byte Length of ports at gpe1_blk */ \
- u8 gpe1_base; /* Offset in gpe model where gpe1 events start */ \
- u8 cst_cnt; /* Support for the _CST object and C States change notification.*/ \
- u16 plvl2_lat; /* Worst case HW latency to enter/exit C2 state */ \
- u16 plvl3_lat; /* Worst case HW latency to enter/exit C3 state */ \
- u16 flush_size; /* Number of flush strides that need to be read */ \
- u16 flush_stride; /* Processor's memory cache line width, in bytes */ \
- u8 duty_offset; /* Processor's duty cycle index in processor's P_CNT reg*/ \
- u8 duty_width; /* Processor's duty cycle value bit width in P_CNT register.*/ \
- u8 day_alrm; /* Index to day-of-month alarm in RTC CMOS RAM */ \
- u8 mon_alrm; /* Index to month-of-year alarm in RTC CMOS RAM */ \
- u8 century; /* Index to century in RTC CMOS RAM */ \
- u16 iapc_boot_arch; /* IA-PC Boot Architecture Flags. See Table 5-10 for description*/
-
-/*
- * ACPI 2.0+ Fixed ACPI Description Table (FADT)
- */
-struct fadt_descriptor_rev2 {
- ACPI_TABLE_HEADER_DEF /* ACPI common table header */
- FADT_REV2_COMMON u8 reserved2; /* Reserved, must be zero */
-
- /* Flags (32 bits) */
-
- u8 wb_invd:1; /* 00: The wbinvd instruction works properly */
- u8 wb_invd_flush:1; /* 01: The wbinvd flushes but does not invalidate */
- u8 proc_c1:1; /* 02: All processors support C1 state */
- u8 plvl2_up:1; /* 03: C2 state works on MP system */
- u8 pwr_button:1; /* 04: Power button is handled as a generic feature */
- u8 sleep_button:1; /* 05: Sleep button is handled as a generic feature, or not present */
- u8 fixed_rTC:1; /* 06: RTC wakeup stat not in fixed register space */
- u8 rtcs4:1; /* 07: RTC wakeup stat not possible from S4 */
- u8 tmr_val_ext:1; /* 08: tmr_val is 32 bits 0=24-bits */
- u8 dock_cap:1; /* 09: Docking supported */
- u8 reset_reg_sup:1; /* 10: System reset via the FADT RESET_REG supported */
- u8 sealed_case:1; /* 11: No internal expansion capabilities and case is sealed */
- u8 headless:1; /* 12: No local video capabilities or local input devices */
- u8 cpu_sw_sleep:1; /* 13: Must execute native instruction after writing SLP_TYPx register */
-
- u8 pci_exp_wak:1; /* 14: System supports PCIEXP_WAKE (STS/EN) bits (ACPI 3.0) */
- u8 use_platform_clock:1; /* 15: OSPM should use platform-provided timer (ACPI 3.0) */
- u8 S4rtc_sts_valid:1; /* 16: Contents of RTC_STS valid after S4 wake (ACPI 3.0) */
- u8 remote_power_on_capable:1; /* 17: System is compatible with remote power on (ACPI 3.0) */
- u8 force_apic_cluster_model:1; /* 18: All local APICs must use cluster model (ACPI 3.0) */
- u8 force_apic_physical_destination_mode:1; /* 19: all local x_aPICs must use physical dest mode (ACPI 3.0) */
- u8:4; /* 20-23: Reserved, must be zero */
- u8 reserved3; /* 24-31: Reserved, must be zero */
-
- struct acpi_generic_address reset_register; /* Reset register address in GAS format */
- u8 reset_value; /* Value to write to the reset_register port to reset the system */
- u8 reserved4[3]; /* These three bytes must be zero */
- u64 xfirmware_ctrl; /* 64-bit physical address of FACS */
- u64 Xdsdt; /* 64-bit physical address of DSDT */
- struct acpi_generic_address xpm1a_evt_blk; /* Extended Power Mgt 1a acpi_event Reg Blk address */
- struct acpi_generic_address xpm1b_evt_blk; /* Extended Power Mgt 1b acpi_event Reg Blk address */
- struct acpi_generic_address xpm1a_cnt_blk; /* Extended Power Mgt 1a Control Reg Blk address */
- struct acpi_generic_address xpm1b_cnt_blk; /* Extended Power Mgt 1b Control Reg Blk address */
- struct acpi_generic_address xpm2_cnt_blk; /* Extended Power Mgt 2 Control Reg Blk address */
- struct acpi_generic_address xpm_tmr_blk; /* Extended Power Mgt Timer Ctrl Reg Blk address */
- struct acpi_generic_address xgpe0_blk; /* Extended General Purpose acpi_event 0 Reg Blk address */
- struct acpi_generic_address xgpe1_blk; /* Extended General Purpose acpi_event 1 Reg Blk address */
-};
-
-/* "Down-revved" ACPI 2.0 FADT descriptor */
-
-struct fadt_descriptor_rev2_minus {
- ACPI_TABLE_HEADER_DEF /* ACPI common table header */
- FADT_REV2_COMMON u8 reserved2; /* Reserved, must be zero */
- u32 flags;
- struct acpi_generic_address reset_register; /* Reset register address in GAS format */
- u8 reset_value; /* Value to write to the reset_register port to reset the system. */
- u8 reserved7[3]; /* Reserved, must be zero */
-};
-
-/* ECDT - Embedded Controller Boot Resources Table */
-
-struct ec_boot_resources {
- ACPI_TABLE_HEADER_DEF struct acpi_generic_address ec_control; /* Address of EC command/status register */
- struct acpi_generic_address ec_data; /* Address of EC data register */
- u32 uid; /* Unique ID - must be same as the EC _UID method */
- u8 gpe_bit; /* The GPE for the EC */
- u8 ec_id[1]; /* Full namepath of the EC in the ACPI namespace */
-};
-
-/* SRAT - System Resource Affinity Table */
-
-struct static_resource_alloc {
- u8 type;
- u8 length;
- u8 proximity_domain_lo;
- u8 apic_id;
-
- /* Flags (32 bits) */
-
- u8 enabled:1; /* 00: Use affinity structure */
- u8:7; /* 01-07: Reserved, must be zero */
- u8 reserved3[3]; /* 08-31: Reserved, must be zero */
-
- u8 local_sapic_eid;
- u8 proximity_domain_hi[3];
- u32 reserved4; /* Reserved, must be zero */
-};
-
-struct memory_affinity {
- u8 type;
- u8 length;
- u32 proximity_domain;
- u16 reserved3;
- u64 base_address;
- u64 address_length;
- u32 reserved4;
-
- /* Flags (32 bits) */
-
- u8 enabled:1; /* 00: Use affinity structure */
- u8 hot_pluggable:1; /* 01: Memory region is hot pluggable */
- u8 non_volatile:1; /* 02: Memory is non-volatile */
- u8:5; /* 03-07: Reserved, must be zero */
- u8 reserved5[3]; /* 08-31: Reserved, must be zero */
-
- u64 reserved6; /* Reserved, must be zero */
-};
-
-struct system_resource_affinity {
- ACPI_TABLE_HEADER_DEF u32 reserved1; /* Must be value '1' */
- u64 reserved2; /* Reserved, must be zero */
-};
-
-/* SLIT - System Locality Distance Information Table */
-
-struct system_locality_info {
- ACPI_TABLE_HEADER_DEF u64 locality_count;
- u8 entry[1][1];
-};
-
-#pragma pack()
+/* Code moved to both actbl.h and actbl1.h */
#endif /* __ACTBL2_H__ */
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index 7ca89cde706e..77cf1236b05a 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -44,6 +44,8 @@
#ifndef __ACTYPES_H__
#define __ACTYPES_H__
+/* acpisrc:struct_defs -- for acpisrc conversion */
+
/*
* ACPI_MACHINE_WIDTH must be specified in an OS- or compiler-dependent header
* and must be either 16, 32, or 64
@@ -154,7 +156,6 @@ typedef u64 acpi_physical_address;
#define ACPI_MAX_PTR ACPI_UINT64_MAX
#define ACPI_SIZE_MAX ACPI_UINT64_MAX
-#define ALIGNED_ADDRESS_BOUNDARY 0x00000008
#define ACPI_USE_NATIVE_DIVIDE /* Has native 64-bit integer support */
/*
@@ -195,8 +196,6 @@ typedef u64 acpi_physical_address;
#define ACPI_MAX_PTR ACPI_UINT32_MAX
#define ACPI_SIZE_MAX ACPI_UINT32_MAX
-#define ALIGNED_ADDRESS_BOUNDARY 0x00000004
-
/*******************************************************************************
*
* Types specific to 16-bit targets
@@ -223,7 +222,6 @@ typedef char *acpi_physical_address;
#define ACPI_MAX_PTR ACPI_UINT16_MAX
#define ACPI_SIZE_MAX ACPI_UINT16_MAX
-#define ALIGNED_ADDRESS_BOUNDARY 0x00000002
#define ACPI_USE_NATIVE_DIVIDE /* No 64-bit integers, ok to use native divide */
/* 64-bit integers cannot be supported */
@@ -254,7 +252,7 @@ typedef acpi_native_uint acpi_size;
/* Use C99 uintptr_t for pointer casting if available, "void *" otherwise */
#ifndef acpi_uintptr_t
-#define acpi_uintptr_t void *
+#define acpi_uintptr_t void *
#endif
/*
@@ -263,7 +261,7 @@ typedef acpi_native_uint acpi_size;
* manager implementation is to be used (ACPI_USE_LOCAL_CACHE)
*/
#ifndef acpi_cache_t
-#define acpi_cache_t struct acpi_memory_list
+#define acpi_cache_t struct acpi_memory_list
#endif
/*
@@ -271,7 +269,7 @@ typedef acpi_native_uint acpi_size;
* lock and unlock OSL interfaces.
*/
#ifndef acpi_cpu_flags
-#define acpi_cpu_flags acpi_native_uint
+#define acpi_cpu_flags acpi_native_uint
#endif
/*
@@ -292,6 +290,21 @@ typedef acpi_native_uint acpi_size;
#define ACPI_UNUSED_VAR
#endif
+/*
+ * All ACPICA functions that are available to the rest of the kernel are
+ * tagged with this macro which can be defined as appropriate for the host.
+ */
+#ifndef ACPI_EXPORT_SYMBOL
+#define ACPI_EXPORT_SYMBOL(symbol)
+#endif
+
+/*
+ * thread_id is returned by acpi_os_get_thread_id.
+ */
+#ifndef acpi_thread_id
+#define acpi_thread_id acpi_native_uint
+#endif
+
/*******************************************************************************
*
* Independent types
@@ -477,15 +490,15 @@ typedef u64 acpi_integer;
*/
typedef u32 acpi_table_type;
-#define ACPI_TABLE_RSDP (acpi_table_type) 0
-#define ACPI_TABLE_DSDT (acpi_table_type) 1
-#define ACPI_TABLE_FADT (acpi_table_type) 2
-#define ACPI_TABLE_FACS (acpi_table_type) 3
-#define ACPI_TABLE_PSDT (acpi_table_type) 4
-#define ACPI_TABLE_SSDT (acpi_table_type) 5
-#define ACPI_TABLE_XSDT (acpi_table_type) 6
-#define ACPI_TABLE_MAX 6
-#define NUM_ACPI_TABLE_TYPES (ACPI_TABLE_MAX+1)
+#define ACPI_TABLE_ID_RSDP (acpi_table_type) 0
+#define ACPI_TABLE_ID_DSDT (acpi_table_type) 1
+#define ACPI_TABLE_ID_FADT (acpi_table_type) 2
+#define ACPI_TABLE_ID_FACS (acpi_table_type) 3
+#define ACPI_TABLE_ID_PSDT (acpi_table_type) 4
+#define ACPI_TABLE_ID_SSDT (acpi_table_type) 5
+#define ACPI_TABLE_ID_XSDT (acpi_table_type) 6
+#define ACPI_TABLE_ID_MAX 6
+#define ACPI_NUM_TABLE_TYPES (ACPI_TABLE_ID_MAX+1)
/*
* Types associated with ACPI names and objects. The first group of
@@ -816,7 +829,7 @@ struct acpi_system_info {
u32 debug_level;
u32 debug_layer;
u32 num_table_types;
- struct acpi_table_info table_info[NUM_ACPI_TABLE_TYPES];
+ struct acpi_table_info table_info[ACPI_TABLE_ID_MAX + 1];
};
/*
@@ -858,7 +871,7 @@ acpi_status(*acpi_adr_space_handler) (u32 function,
void *handler_context,
void *region_context);
-#define ACPI_DEFAULT_HANDLER NULL
+#define ACPI_DEFAULT_HANDLER NULL
typedef
acpi_status(*acpi_adr_space_setup) (acpi_handle region_handle,
@@ -911,12 +924,13 @@ struct acpi_compatible_id_list {
#define ACPI_STA_DEVICE_PRESENT 0x01
#define ACPI_STA_DEVICE_ENABLED 0x02
#define ACPI_STA_DEVICE_UI 0x04
-#define ACPI_STA_DEVICE_OK 0x08
+#define ACPI_STA_DEVICE_FUNCTIONING 0x08
+#define ACPI_STA_DEVICE_OK 0x08 /* Synonym */
#define ACPI_STA_BATTERY_PRESENT 0x10
#define ACPI_COMMON_OBJ_INFO \
- acpi_object_type type; /* ACPI object type */ \
- acpi_name name /* ACPI object Name */
+ acpi_object_type type; /* ACPI object type */ \
+ acpi_name name /* ACPI object Name */
struct acpi_obj_info_header {
ACPI_COMMON_OBJ_INFO;
@@ -957,7 +971,7 @@ struct acpi_mem_space_context {
* Definitions for Resource Attributes
*/
typedef u16 acpi_rs_length; /* Resource Length field is fixed at 16 bits */
-typedef u32 acpi_rsdesc_size; /* Max Resource Descriptor size is (length+3) = (64_k-1)+3 */
+typedef u32 acpi_rsdesc_size; /* Max Resource Descriptor size is (Length+3) = (64_k-1)+3 */
/*
* Memory Attributes
@@ -972,8 +986,8 @@ typedef u32 acpi_rsdesc_size; /* Max Resource Descriptor size is (length+3) = (6
/*
* IO Attributes
- * The ISA Io ranges are: n000-n0_ffh, n400-n4_ffh, n800-n8_ffh, n_c00-n_cFFh.
- * The non-ISA Io ranges are: n100-n3_ffh, n500-n7_ffh, n900-n_bFfh, n_cd0-n_fFFh.
+ * The ISA IO ranges are: n000-n0_fFh, n400-n4_fFh, n800-n8_fFh, n_c00-n_cFFh.
+ * The non-ISA IO ranges are: n100-n3_fFh, n500-n7_fFh, n900-n_bFFh, n_cd0-n_fFFh.
*/
#define ACPI_NON_ISA_ONLY_RANGES (u8) 0x01
#define ACPI_ISA_ONLY_RANGES (u8) 0x02
@@ -1171,12 +1185,12 @@ struct acpi_resource_source {
/* Fields common to all address descriptors, 16/32/64 bit */
#define ACPI_RESOURCE_ADDRESS_COMMON \
- u8 resource_type; \
- u8 producer_consumer; \
- u8 decode; \
- u8 min_address_fixed; \
- u8 max_address_fixed; \
- union acpi_resource_attribute info;
+ u8 resource_type; \
+ u8 producer_consumer; \
+ u8 decode; \
+ u8 min_address_fixed; \
+ u8 max_address_fixed; \
+ union acpi_resource_attribute info;
struct acpi_resource_address {
ACPI_RESOURCE_ADDRESS_COMMON};
@@ -1297,16 +1311,6 @@ struct acpi_resource {
#define ACPI_NEXT_RESOURCE(res) (struct acpi_resource *)((u8 *) res + res->length)
-#ifndef ACPI_MISALIGNMENT_NOT_SUPPORTED
-#define ACPI_ALIGN_RESOURCE_SIZE(length) (length)
-#else
-#define ACPI_ALIGN_RESOURCE_SIZE(length) ACPI_ROUND_UP_TO_NATIVE_WORD(length)
-#endif
-
-/*
- * END: of definitions for Resource Attributes
- */
-
struct acpi_pci_routing_table {
u32 length;
u32 pin;
@@ -1315,8 +1319,4 @@ struct acpi_pci_routing_table {
char source[4]; /* pad to 64 bits so sizeof() works in all cases */
};
-/*
- * END: of definitions for PCI Routing tables
- */
-
#endif /* __ACTYPES_H__ */
diff --git a/include/acpi/acutils.h b/include/acpi/acutils.h
index 0927765df6aa..ba039ea1a057 100644
--- a/include/acpi/acutils.h
+++ b/include/acpi/acutils.h
@@ -50,24 +50,24 @@ extern const u8 acpi_gbl_resource_aml_sizes[];
#if defined(ACPI_DISASSEMBLER) || defined (ACPI_DEBUGGER)
-extern const char *acpi_gbl_BMdecode[2];
-extern const char *acpi_gbl_config_decode[4];
-extern const char *acpi_gbl_consume_decode[2];
-extern const char *acpi_gbl_DECdecode[2];
-extern const char *acpi_gbl_HEdecode[2];
-extern const char *acpi_gbl_io_decode[2];
-extern const char *acpi_gbl_LLdecode[2];
-extern const char *acpi_gbl_max_decode[2];
-extern const char *acpi_gbl_MEMdecode[4];
-extern const char *acpi_gbl_min_decode[2];
-extern const char *acpi_gbl_MTPdecode[4];
-extern const char *acpi_gbl_RNGdecode[4];
-extern const char *acpi_gbl_RWdecode[2];
-extern const char *acpi_gbl_SHRdecode[2];
-extern const char *acpi_gbl_SIZdecode[4];
-extern const char *acpi_gbl_TRSdecode[2];
-extern const char *acpi_gbl_TTPdecode[2];
-extern const char *acpi_gbl_TYPdecode[4];
+extern const char *acpi_gbl_bm_decode[];
+extern const char *acpi_gbl_config_decode[];
+extern const char *acpi_gbl_consume_decode[];
+extern const char *acpi_gbl_dec_decode[];
+extern const char *acpi_gbl_he_decode[];
+extern const char *acpi_gbl_io_decode[];
+extern const char *acpi_gbl_ll_decode[];
+extern const char *acpi_gbl_max_decode[];
+extern const char *acpi_gbl_mem_decode[];
+extern const char *acpi_gbl_min_decode[];
+extern const char *acpi_gbl_mtp_decode[];
+extern const char *acpi_gbl_rng_decode[];
+extern const char *acpi_gbl_rw_decode[];
+extern const char *acpi_gbl_shr_decode[];
+extern const char *acpi_gbl_siz_decode[];
+extern const char *acpi_gbl_trs_decode[];
+extern const char *acpi_gbl_ttp_decode[];
+extern const char *acpi_gbl_typ_decode[];
#endif
/* Types for Resource descriptor entries */
@@ -78,6 +78,12 @@ extern const char *acpi_gbl_TYPdecode[4];
#define ACPI_SMALL_VARIABLE_LENGTH 3
typedef
+acpi_status(*acpi_walk_aml_callback) (u8 * aml,
+ u32 length,
+ u32 offset,
+ u8 resource_index, void **context);
+
+typedef
acpi_status(*acpi_pkg_callback) (u8 object_type,
union acpi_operand_object * source_object,
union acpi_generic_state * state,
@@ -277,6 +283,8 @@ acpi_ut_ptr_exit(u32 line_number,
void acpi_ut_dump_buffer(u8 * buffer, u32 count, u32 display, u32 component_id);
+void acpi_ut_dump_buffer2(u8 * buffer, u32 count, u32 display);
+
void acpi_ut_report_error(char *module_name, u32 line_number);
void acpi_ut_report_info(char *module_name, u32 line_number);
@@ -445,6 +453,8 @@ acpi_ut_short_divide(acpi_integer in_dividend,
/*
* utmisc
*/
+u8 acpi_ut_is_aml_table(struct acpi_table_header *table);
+
acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id);
void acpi_ut_release_owner_id(acpi_owner_id * owner_id);
@@ -460,7 +470,9 @@ void acpi_ut_print_string(char *string, u8 max_length);
u8 acpi_ut_valid_acpi_name(u32 name);
-u8 acpi_ut_valid_acpi_character(char character);
+acpi_name acpi_ut_repair_name(acpi_name name);
+
+u8 acpi_ut_valid_acpi_char(char character, acpi_native_uint position);
acpi_status
acpi_ut_strtoul64(char *string, u32 base, acpi_integer * ret_integer);
@@ -469,6 +481,25 @@ acpi_ut_strtoul64(char *string, u32 base, acpi_integer * ret_integer);
#define ACPI_ANY_BASE 0
+u32 acpi_ut_dword_byte_swap(u32 value);
+
+void acpi_ut_set_integer_width(u8 revision);
+
+#ifdef ACPI_DEBUG_OUTPUT
+void
+acpi_ut_display_init_pathname(u8 type,
+ struct acpi_namespace_node *obj_handle,
+ char *path);
+#endif
+
+/*
+ * utresrc
+ */
+acpi_status
+acpi_ut_walk_aml_resources(u8 * aml,
+ acpi_size aml_length,
+ acpi_walk_aml_callback user_function, void **context);
+
acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index);
u32 acpi_ut_get_descriptor_length(void *aml);
@@ -483,20 +514,6 @@ acpi_status
acpi_ut_get_resource_end_tag(union acpi_operand_object *obj_desc,
u8 ** end_tag);
-u8 acpi_ut_generate_checksum(u8 * buffer, u32 length);
-
-u32 acpi_ut_dword_byte_swap(u32 value);
-
-void acpi_ut_set_integer_width(u8 revision);
-
-#ifdef ACPI_DEBUG_OUTPUT
-void
-acpi_ut_display_init_pathname(u8 type,
- struct acpi_namespace_node *obj_handle,
- char *path);
-
-#endif
-
/*
* utmutex - mutex support
*/
@@ -523,14 +540,15 @@ acpi_ut_initialize_buffer(struct acpi_buffer *buffer,
void *acpi_ut_allocate(acpi_size size, u32 component, char *module, u32 line);
-void *acpi_ut_callocate(acpi_size size, u32 component, char *module, u32 line);
+void *acpi_ut_allocate_zeroed(acpi_size size,
+ u32 component, char *module, u32 line);
#ifdef ACPI_DBG_TRACK_ALLOCATIONS
void *acpi_ut_allocate_and_track(acpi_size size,
u32 component, char *module, u32 line);
-void *acpi_ut_callocate_and_track(acpi_size size,
- u32 component, char *module, u32 line);
+void *acpi_ut_allocate_zeroed_and_track(acpi_size size,
+ u32 component, char *module, u32 line);
void
acpi_ut_free_and_track(void *address, u32 component, char *module, u32 line);
@@ -540,6 +558,11 @@ void acpi_ut_dump_allocation_info(void);
#endif /* ACPI_FUTURE_USAGE */
void acpi_ut_dump_allocations(u32 component, char *module);
+
+acpi_status
+acpi_ut_create_list(char *list_name,
+ u16 object_size, struct acpi_memory_list **return_cache);
+
#endif
#endif /* _ACUTILS_H */
diff --git a/include/acpi/amlcode.h b/include/acpi/amlcode.h
index 37964a59aef8..cf18426a87b1 100644
--- a/include/acpi/amlcode.h
+++ b/include/acpi/amlcode.h
@@ -180,8 +180,10 @@
#define AML_BANK_FIELD_OP (u16) 0x5b87
#define AML_DATA_REGION_OP (u16) 0x5b88 /* ACPI 2.0 */
-/* Bogus opcodes (they are actually two separate opcodes) */
-
+/*
+ * Combination opcodes (actually two one-byte opcodes)
+ * Used by the disassembler and i_aSL compiler
+ */
#define AML_LGREATEREQUAL_OP (u16) 0x9295
#define AML_LLESSEQUAL_OP (u16) 0x9294
#define AML_LNOTEQUAL_OP (u16) 0x9293
diff --git a/include/acpi/amlresrc.h b/include/acpi/amlresrc.h
index fb4735315ad3..be03818af9d1 100644
--- a/include/acpi/amlresrc.h
+++ b/include/acpi/amlresrc.h
@@ -42,39 +42,45 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
+/* acpisrc:struct_defs -- for acpisrc conversion */
+
#ifndef __AMLRESRC_H
#define __AMLRESRC_H
-#define ASL_RESNAME_ADDRESS "_ADR"
-#define ASL_RESNAME_ALIGNMENT "_ALN"
-#define ASL_RESNAME_ADDRESSSPACE "_ASI"
-#define ASL_RESNAME_ACCESSSIZE "_ASZ"
-#define ASL_RESNAME_TYPESPECIFICATTRIBUTES "_ATT"
-#define ASL_RESNAME_BASEADDRESS "_BAS"
-#define ASL_RESNAME_BUSMASTER "_BM_" /* Master(1), Slave(0) */
-#define ASL_RESNAME_DECODE "_DEC"
-#define ASL_RESNAME_DMA "_DMA"
-#define ASL_RESNAME_DMATYPE "_TYP" /* Compatible(0), A(1), B(2), F(3) */
-#define ASL_RESNAME_GRANULARITY "_GRA"
-#define ASL_RESNAME_INTERRUPT "_INT"
-#define ASL_RESNAME_INTERRUPTLEVEL "_LL_" /* active_lo(1), active_hi(0) */
-#define ASL_RESNAME_INTERRUPTSHARE "_SHR" /* Shareable(1), no_share(0) */
-#define ASL_RESNAME_INTERRUPTTYPE "_HE_" /* Edge(1), Level(0) */
-#define ASL_RESNAME_LENGTH "_LEN"
-#define ASL_RESNAME_MEMATTRIBUTES "_MTP" /* Memory(0), Reserved(1), ACPI(2), NVS(3) */
-#define ASL_RESNAME_MEMTYPE "_MEM" /* non_cache(0), Cacheable(1) Cache+combine(2), Cache+prefetch(3) */
-#define ASL_RESNAME_MAXADDR "_MAX"
-#define ASL_RESNAME_MINADDR "_MIN"
-#define ASL_RESNAME_MAXTYPE "_MAF"
-#define ASL_RESNAME_MINTYPE "_MIF"
-#define ASL_RESNAME_REGISTERBITOFFSET "_RBO"
-#define ASL_RESNAME_REGISTERBITWIDTH "_RBW"
-#define ASL_RESNAME_RANGETYPE "_RNG"
-#define ASL_RESNAME_READWRITETYPE "_RW_" /* read_only(0), Writeable (1) */
-#define ASL_RESNAME_TRANSLATION "_TRA"
-#define ASL_RESNAME_TRANSTYPE "_TRS" /* Sparse(1), Dense(0) */
-#define ASL_RESNAME_TYPE "_TTP" /* Translation(1), Static (0) */
-#define ASL_RESNAME_XFERTYPE "_SIz" /* 8(0), 8_and16(1), 16(2) */
+/*
+ * Resource descriptor tags, as defined in the ACPI specification.
+ * Used to symbolically reference fields within a descriptor.
+ */
+#define ACPI_RESTAG_ADDRESS "_ADR"
+#define ACPI_RESTAG_ALIGNMENT "_ALN"
+#define ACPI_RESTAG_ADDRESSSPACE "_ASI"
+#define ACPI_RESTAG_ACCESSSIZE "_ASZ"
+#define ACPI_RESTAG_TYPESPECIFICATTRIBUTES "_ATT"
+#define ACPI_RESTAG_BASEADDRESS "_BAS"
+#define ACPI_RESTAG_BUSMASTER "_BM_" /* Master(1), Slave(0) */
+#define ACPI_RESTAG_DECODE "_DEC"
+#define ACPI_RESTAG_DMA "_DMA"
+#define ACPI_RESTAG_DMATYPE "_TYP" /* Compatible(0), A(1), B(2), F(3) */
+#define ACPI_RESTAG_GRANULARITY "_GRA"
+#define ACPI_RESTAG_INTERRUPT "_INT"
+#define ACPI_RESTAG_INTERRUPTLEVEL "_LL_" /* active_lo(1), active_hi(0) */
+#define ACPI_RESTAG_INTERRUPTSHARE "_SHR" /* Shareable(1), no_share(0) */
+#define ACPI_RESTAG_INTERRUPTTYPE "_HE_" /* Edge(1), Level(0) */
+#define ACPI_RESTAG_LENGTH "_LEN"
+#define ACPI_RESTAG_MEMATTRIBUTES "_MTP" /* Memory(0), Reserved(1), ACPI(2), NVS(3) */
+#define ACPI_RESTAG_MEMTYPE "_MEM" /* non_cache(0), Cacheable(1) Cache+combine(2), Cache+prefetch(3) */
+#define ACPI_RESTAG_MAXADDR "_MAX"
+#define ACPI_RESTAG_MINADDR "_MIN"
+#define ACPI_RESTAG_MAXTYPE "_MAF"
+#define ACPI_RESTAG_MINTYPE "_MIF"
+#define ACPI_RESTAG_REGISTERBITOFFSET "_RBO"
+#define ACPI_RESTAG_REGISTERBITWIDTH "_RBW"
+#define ACPI_RESTAG_RANGETYPE "_RNG"
+#define ACPI_RESTAG_READWRITETYPE "_RW_" /* read_only(0), Writeable (1) */
+#define ACPI_RESTAG_TRANSLATION "_TRA"
+#define ACPI_RESTAG_TRANSTYPE "_TRS" /* Sparse(1), Dense(0) */
+#define ACPI_RESTAG_TYPE "_TTP" /* Translation(1), Static (0) */
+#define ACPI_RESTAG_XFERTYPE "_SIZ" /* 8(0), 8_and16(1), 16(2) */
/* Default sizes for "small" resource descriptors */
@@ -109,7 +115,7 @@ struct asl_resource_node {
* SMALL descriptors
*/
#define AML_RESOURCE_SMALL_HEADER_COMMON \
- u8 descriptor_type;
+ u8 descriptor_type;
struct aml_resource_small_header {
AML_RESOURCE_SMALL_HEADER_COMMON};
@@ -162,8 +168,8 @@ struct aml_resource_end_tag {
* LARGE descriptors
*/
#define AML_RESOURCE_LARGE_HEADER_COMMON \
- u8 descriptor_type;\
- u16 resource_length;
+ u8 descriptor_type;\
+ u16 resource_length;
struct aml_resource_large_header {
AML_RESOURCE_LARGE_HEADER_COMMON};
@@ -194,9 +200,9 @@ struct aml_resource_fixed_memory32 {
};
#define AML_RESOURCE_ADDRESS_COMMON \
- u8 resource_type; \
- u8 flags; \
- u8 specific_flags;
+ u8 resource_type; \
+ u8 flags; \
+ u8 specific_flags;
struct aml_resource_address {
AML_RESOURCE_LARGE_HEADER_COMMON AML_RESOURCE_ADDRESS_COMMON};
@@ -266,6 +272,7 @@ struct aml_resource_generic_register {
union aml_resource {
/* Descriptor headers */
+ u8 descriptor_type;
struct aml_resource_small_header small_header;
struct aml_resource_large_header large_header;
@@ -296,9 +303,9 @@ union aml_resource {
/* Utility overlays */
struct aml_resource_address address;
- u32 u32_item;
- u16 u16_item;
- u8 U8item;
+ u32 dword_item;
+ u16 word_item;
+ u8 byte_item;
};
#endif
diff --git a/include/acpi/pdc_intel.h b/include/acpi/pdc_intel.h
index 3fa81d55cd0c..c5472be6f3a2 100644
--- a/include/acpi/pdc_intel.h
+++ b/include/acpi/pdc_intel.h
@@ -18,6 +18,11 @@
ACPI_PDC_C_C1_HALT | \
ACPI_PDC_P_FFH)
+#define ACPI_PDC_EST_CAPABILITY_SWSMP (ACPI_PDC_SMP_C1PT | \
+ ACPI_PDC_C_C1_HALT | \
+ ACPI_PDC_SMP_P_SWCOORD | \
+ ACPI_PDC_P_FFH)
+
#define ACPI_PDC_C_CAPABILITY_SMP (ACPI_PDC_SMP_C2C3 | \
ACPI_PDC_SMP_C1PT | \
ACPI_PDC_C_C1_HALT)
diff --git a/include/acpi/platform/acenv.h b/include/acpi/platform/acenv.h
index 223ec6467108..453a469fd397 100644
--- a/include/acpi/platform/acenv.h
+++ b/include/acpi/platform/acenv.h
@@ -49,33 +49,41 @@
*/
#ifdef ACPI_LIBRARY
+/*
+ * Note: The non-debug version of the acpi_library does not contain any
+ * debug support, for minimimal size. The debug version uses ACPI_FULL_DEBUG
+ */
#define ACPI_USE_LOCAL_CACHE
#endif
-#ifdef ACPI_DUMP_APP
-#ifndef MSDOS
+#ifdef ACPI_ASL_COMPILER
#define ACPI_DEBUG_OUTPUT
-#endif
#define ACPI_APPLICATION
#define ACPI_DISASSEMBLER
-#define ACPI_NO_METHOD_EXECUTION
+#define ACPI_CONSTANT_EVAL_ONLY
+#define ACPI_LARGE_NAMESPACE_NODE
+#define ACPI_DATA_TABLE_DISASSEMBLY
#endif
#ifdef ACPI_EXEC_APP
#undef DEBUGGER_THREADING
#define DEBUGGER_THREADING DEBUGGER_SINGLE_THREADED
-#define ACPI_DEBUG_OUTPUT
+#define ACPI_FULL_DEBUG
#define ACPI_APPLICATION
#define ACPI_DEBUGGER
-#define ACPI_DISASSEMBLER
#define ACPI_MUTEX_DEBUG
+#define ACPI_DBG_TRACK_ALLOCATIONS
#endif
-#ifdef ACPI_ASL_COMPILER
+#ifdef ACPI_DASM_APP
+#ifndef MSDOS
#define ACPI_DEBUG_OUTPUT
+#endif
#define ACPI_APPLICATION
#define ACPI_DISASSEMBLER
-#define ACPI_CONSTANT_EVAL_ONLY
+#define ACPI_NO_METHOD_EXECUTION
+#define ACPI_LARGE_NAMESPACE_NODE
+#define ACPI_DATA_TABLE_DISASSEMBLY
#endif
#ifdef ACPI_APPLICATION
@@ -83,6 +91,12 @@
#define ACPI_USE_LOCAL_CACHE
#endif
+#ifdef ACPI_FULL_DEBUG
+#define ACPI_DEBUGGER
+#define ACPI_DEBUG_OUTPUT
+#define ACPI_DISASSEMBLER
+#endif
+
/*
* Environment configuration. The purpose of this file is to interface to the
* local generation environment.
@@ -137,7 +151,7 @@
#elif defined(MSDOS) /* Must appear after WIN32 and WIN64 check */
#include "acdos16.h"
-#elif defined(__FreeBSD__)
+#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
#include "acfreebsd.h"
#elif defined(__NetBSD__)
@@ -163,17 +177,6 @@
#endif
-/*
- * Memory allocation tracking. Used only if
- * 1) This is the debug version
- * 2) This is NOT a 16-bit version of the code (not enough real-mode memory)
- */
-#ifdef ACPI_DEBUG_OUTPUT
-#if ACPI_MACHINE_WIDTH != 16
-#define ACPI_DBG_TRACK_ALLOCATIONS
-#endif
-#endif
-
/*! [End] no source code translation !*/
/*
@@ -271,8 +274,8 @@ typedef char *va_list;
/*
* Storage alignment properties
*/
-#define _AUPBND (sizeof (acpi_native_uint) - 1)
-#define _ADNBND (sizeof (acpi_native_uint) - 1)
+#define _AUPBND (sizeof (acpi_native_int) - 1)
+#define _ADNBND (sizeof (acpi_native_int) - 1)
/*
* Variable argument list macro definitions
diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h
index 3c6a6205853a..277d35bced03 100644
--- a/include/acpi/platform/aclinux.h
+++ b/include/acpi/platform/aclinux.h
@@ -51,27 +51,22 @@
#include <linux/string.h>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/ctype.h>
#include <asm/system.h>
#include <asm/atomic.h>
#include <asm/div64.h>
#include <asm/acpi.h>
+#include <linux/slab.h>
-#define strtoul simple_strtoul
-
-#define ACPI_MACHINE_WIDTH BITS_PER_LONG
+/* Host-dependent types and defines */
-/* Type(s) for the OSL */
-
-#ifdef ACPI_USE_LOCAL_CACHE
-#define acpi_cache_t struct acpi_memory_list
-#else
-#include <linux/slab.h>
-#define acpi_cache_t kmem_cache_t
-#endif
+#define ACPI_MACHINE_WIDTH BITS_PER_LONG
+#define acpi_cache_t kmem_cache_t
+#define ACPI_EXPORT_SYMBOL(symbol) EXPORT_SYMBOL(symbol);
+#define strtoul simple_strtoul
/* Full namespace pathname length limit - arbitrary */
-
#define ACPI_PATHNAME_MAX 256
#else /* !__KERNEL__ */
@@ -103,4 +98,8 @@
#define acpi_cpu_flags unsigned long
+#define acpi_thread_id u32
+
+static inline acpi_thread_id acpi_os_get_thread_id(void) { return 0; }
+
#endif /* __ACLINUX_H__ */
diff --git a/include/acpi/processor.h b/include/acpi/processor.h
index ef7d83a41470..77371b3cdc44 100644
--- a/include/acpi/processor.h
+++ b/include/acpi/processor.h
@@ -2,6 +2,7 @@
#define __ACPI_PROCESSOR_H
#include <linux/kernel.h>
+#include <linux/cpu.h>
#include <asm/acpi.h>
@@ -17,6 +18,17 @@
#define ACPI_PDC_REVISION_ID 0x1
+#define ACPI_PSD_REV0_REVISION 0 /* Support for _PSD as in ACPI 3.0 */
+#define ACPI_PSD_REV0_ENTRIES 5
+
+/*
+ * Types of coordination defined in ACPI 3.0. Same macros can be used across
+ * P, C and T states
+ */
+#define DOMAIN_COORD_TYPE_SW_ALL 0xfc
+#define DOMAIN_COORD_TYPE_SW_ANY 0xfd
+#define DOMAIN_COORD_TYPE_HW_ALL 0xfe
+
/* Power Management */
struct acpi_processor_cx;
@@ -65,6 +77,14 @@ struct acpi_processor_power {
/* Performance Management */
+struct acpi_psd_package {
+ acpi_integer num_entries;
+ acpi_integer revision;
+ acpi_integer domain;
+ acpi_integer coord_type;
+ acpi_integer num_processors;
+} __attribute__ ((packed));
+
struct acpi_pct_register {
u8 descriptor;
u16 length;
@@ -91,7 +111,9 @@ struct acpi_processor_performance {
struct acpi_pct_register status_register;
unsigned int state_count;
struct acpi_processor_px *states;
-
+ struct acpi_psd_package domain_info;
+ cpumask_t shared_cpu_map;
+ unsigned int shared_type;
};
/* Throttling Control */
@@ -160,6 +182,9 @@ struct acpi_processor_errata {
} piix4;
};
+extern int acpi_processor_preregister_performance(
+ struct acpi_processor_performance **performance);
+
extern int acpi_processor_register_performance(struct acpi_processor_performance
*performance, unsigned int cpu);
extern void acpi_processor_unregister_performance(struct
diff --git a/include/asm-alpha/irq.h b/include/asm-alpha/irq.h
index f6de033718a0..917b9fe372cf 100644
--- a/include/asm-alpha/irq.h
+++ b/include/asm-alpha/irq.h
@@ -92,8 +92,4 @@ extern void enable_irq(unsigned int);
struct pt_regs;
extern void (*perf_irq)(unsigned long, struct pt_regs *);
-struct irqaction;
-int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *);
-
-
#endif /* _ALPHA_IRQ_H */
diff --git a/include/asm-arm/arch-imx/imx-dma.h b/include/asm-arm/arch-imx/imx-dma.h
index f2063c1d610d..599f03e5a9ef 100644
--- a/include/asm-arm/arch-imx/imx-dma.h
+++ b/include/asm-arm/arch-imx/imx-dma.h
@@ -46,7 +46,7 @@
struct imx_dma_channel {
const char *name;
void (*irq_handler) (int, void *, struct pt_regs *);
- void (*err_handler) (int, void *, struct pt_regs *);
+ void (*err_handler) (int, void *, struct pt_regs *, int errcode);
void *data;
dmamode_t dma_mode;
struct scatterlist *sg;
@@ -58,6 +58,10 @@ struct imx_dma_channel {
extern struct imx_dma_channel imx_dma_channels[IMX_DMA_CHANNELS];
+#define IMX_DMA_ERR_BURST 1
+#define IMX_DMA_ERR_REQUEST 2
+#define IMX_DMA_ERR_TRANSFER 4
+#define IMX_DMA_ERR_BUFFER 8
/* The type to distinguish channel numbers parameter from ordinal int type */
typedef int imx_dmach_t;
@@ -74,7 +78,7 @@ imx_dma_setup_sg(imx_dmach_t dma_ch,
int
imx_dma_setup_handlers(imx_dmach_t dma_ch,
void (*irq_handler) (int, void *, struct pt_regs *),
- void (*err_handler) (int, void *, struct pt_regs *), void *data);
+ void (*err_handler) (int, void *, struct pt_regs *, int), void *data);
void imx_dma_enable(imx_dmach_t dma_ch);
diff --git a/include/asm-arm/arch-ixp23xx/ixp23xx.h b/include/asm-arm/arch-ixp23xx/ixp23xx.h
index 01efdbd1180f..d0a72201ee96 100644
--- a/include/asm-arm/arch-ixp23xx/ixp23xx.h
+++ b/include/asm-arm/arch-ixp23xx/ixp23xx.h
@@ -124,6 +124,7 @@
#define IXP23XX_EXP_UNIT_FUSE IXP23XX_EXP_CFG_REG(0x28)
#define IXP23XX_EXP_MSF_MUX IXP23XX_EXP_CFG_REG(0x30)
+#define IXP23XX_EXP_CFG_FUSE IXP23XX_EXP_CFG_REG(0x34)
#define IXP23XX_EXP_BUS_PHYS 0x90000000
#define IXP23XX_EXP_BUS_WINDOW_SIZE 0x01000000
@@ -265,6 +266,8 @@
#define IXP23XX_PCI_UNIT_RESET (1 << 1)
#define IXP23XX_XSCALE_RESET (1 << 0)
+#define IXP23XX_UENGINE_CSR_VIRT_BASE (IXP23XX_CAP_CSR_VIRT + 0x18000)
+
/****************************************************************************
* PCI CSRs.
diff --git a/include/asm-arm/arch-ixp23xx/platform.h b/include/asm-arm/arch-ixp23xx/platform.h
index e4d99060a049..19a73b39c864 100644
--- a/include/asm-arm/arch-ixp23xx/platform.h
+++ b/include/asm-arm/arch-ixp23xx/platform.h
@@ -14,6 +14,21 @@
#ifndef __ASSEMBLY__
+extern inline unsigned long ixp2000_reg_read(volatile void *reg)
+{
+ return *((volatile unsigned long *)reg);
+}
+
+extern inline void ixp2000_reg_write(volatile void *reg, unsigned long val)
+{
+ *((volatile unsigned long *)reg) = val;
+}
+
+extern inline void ixp2000_reg_wrb(volatile void *reg, unsigned long val)
+{
+ *((volatile unsigned long *)reg) = val;
+}
+
struct pci_sys_data;
void ixp23xx_map_io(void);
diff --git a/include/asm-arm/arch-netx/eth.h b/include/asm-arm/arch-netx/eth.h
new file mode 100644
index 000000000000..643c90ef8b72
--- /dev/null
+++ b/include/asm-arm/arch-netx/eth.h
@@ -0,0 +1,27 @@
+/*
+ * include/asm-arm/arch-netx/eth.h
+ *
+ * Copyright (c) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef ASMARM_ARCH_ETH_H
+#define ASMARM_ARCH_ETH_H
+
+struct netxeth_platform_data {
+ unsigned int xcno; /* number of xmac/xpec engine this eth uses */
+};
+
+#endif
diff --git a/include/asm-arm/arch-pnx4008/debug-macro.S b/include/asm-arm/arch-pnx4008/debug-macro.S
index eb3839de389a..67d18a203d23 100644
--- a/include/asm-arm/arch-pnx4008/debug-macro.S
+++ b/include/asm-arm/arch-pnx4008/debug-macro.S
@@ -19,9 +19,5 @@
addne \rx, \rx, #0xf4000000
.endm
- .macro senduart,rd,rx
- strb \rd, [\rx, #0x0]
- .endm
-
#define UART_SHIFT 2
#include <asm/hardware/debug-8250.S>
diff --git a/include/asm-arm/arch-pnx4008/gpio.h b/include/asm-arm/arch-pnx4008/gpio.h
index 1fa5a77c3010..d01bf83d55c2 100644
--- a/include/asm-arm/arch-pnx4008/gpio.h
+++ b/include/asm-arm/arch-pnx4008/gpio.h
@@ -127,6 +127,79 @@
#define GPIO_ISOUT(K) ((GPIO_TYPE_MASK(K) == GPIO_OUT) && (GPIO_BIT(K) & GPIO_OUT_MASK))
#define GPIO_ISIN(K) ((GPIO_TYPE_MASK(K) == GPIO_IN) && (GPIO_BIT(K) & GPIO_IN_MASK))
+/* Start Enable Pin Interrupts - table 58 page 66 */
+
+#define SE_PIN_BASE_INT 32
+
+#define SE_U7_RX_INT 63
+#define SE_U7_HCTS_INT 62
+#define SE_BT_CLKREQ_INT 61
+#define SE_U6_IRRX_INT 60
+/*59 unused*/
+#define SE_U5_RX_INT 58
+#define SE_GPI_11_INT 57
+#define SE_U3_RX_INT 56
+#define SE_U2_HCTS_INT 55
+#define SE_U2_RX_INT 54
+#define SE_U1_RX_INT 53
+#define SE_DISP_SYNC_INT 52
+/*51 unused*/
+#define SE_SDIO_INT_N 50
+#define SE_MSDIO_START_INT 49
+#define SE_GPI_06_INT 48
+#define SE_GPI_05_INT 47
+#define SE_GPI_04_INT 46
+#define SE_GPI_03_INT 45
+#define SE_GPI_02_INT 44
+#define SE_GPI_01_INT 43
+#define SE_GPI_00_INT 42
+#define SE_SYSCLKEN_PIN_INT 41
+#define SE_SPI1_DATAIN_INT 40
+#define SE_GPI_07_INT 39
+#define SE_SPI2_DATAIN_INT 38
+#define SE_GPI_10_INT 37
+#define SE_GPI_09_INT 36
+#define SE_GPI_08_INT 35
+/*34-32 unused*/
+
+/* Start Enable Internal Interrupts - table 57 page 65 */
+
+#define SE_INT_BASE_INT 0
+
+#define SE_TS_IRQ 31
+#define SE_TS_P_INT 30
+#define SE_TS_AUX_INT 29
+/*27-28 unused*/
+#define SE_USB_AHB_NEED_CLK_INT 26
+#define SE_MSTIMER_INT 25
+#define SE_RTC_INT 24
+#define SE_USB_NEED_CLK_INT 23
+#define SE_USB_INT 22
+#define SE_USB_I2C_INT 21
+#define SE_USB_OTG_TIMER_INT 20
+#define SE_USB_OTG_ATX_INT_N 19
+/*18 unused*/
+#define SE_DSP_GPIO4_INT 17
+#define SE_KEY_IRQ 16
+#define SE_DSP_SLAVEPORT_INT 15
+#define SE_DSP_GPIO1_INT 14
+#define SE_DSP_GPIO0_INT 13
+#define SE_DSP_AHB_INT 12
+/*11-6 unused*/
+#define SE_GPIO_05_INT 5
+#define SE_GPIO_04_INT 4
+#define SE_GPIO_03_INT 3
+#define SE_GPIO_02_INT 2
+#define SE_GPIO_01_INT 1
+#define SE_GPIO_00_INT 0
+
+#define START_INT_REG_BIT(irq) (1<<((irq)&0x1F))
+
+#define START_INT_ER_REG(irq) IO_ADDRESS((PNX4008_PWRMAN_BASE + 0x20 + (((irq)&(0x1<<5))>>1)))
+#define START_INT_RSR_REG(irq) IO_ADDRESS((PNX4008_PWRMAN_BASE + 0x24 + (((irq)&(0x1<<5))>>1)))
+#define START_INT_SR_REG(irq) IO_ADDRESS((PNX4008_PWRMAN_BASE + 0x28 + (((irq)&(0x1<<5))>>1)))
+#define START_INT_APR_REG(irq) IO_ADDRESS((PNX4008_PWRMAN_BASE + 0x2C + (((irq)&(0x1<<5))>>1)))
+
extern int pnx4008_gpio_register_pin(unsigned short pin);
extern int pnx4008_gpio_unregister_pin(unsigned short pin);
extern unsigned long pnx4008_gpio_read_pin(unsigned short pin);
@@ -136,4 +209,33 @@ extern int pnx4008_gpio_read_pin_direction(unsigned short pin);
extern int pnx4008_gpio_set_pin_mux(unsigned short pin, int output);
extern int pnx4008_gpio_read_pin_mux(unsigned short pin);
+static inline void start_int_umask(u8 irq)
+{
+ __raw_writel(__raw_readl(START_INT_ER_REG(irq)) |
+ START_INT_REG_BIT(irq), START_INT_ER_REG(irq));
+}
+
+static inline void start_int_mask(u8 irq)
+{
+ __raw_writel(__raw_readl(START_INT_ER_REG(irq)) &
+ ~START_INT_REG_BIT(irq), START_INT_ER_REG(irq));
+}
+
+static inline void start_int_ack(u8 irq)
+{
+ __raw_writel(START_INT_REG_BIT(irq), START_INT_RSR_REG(irq));
+}
+
+static inline void start_int_set_falling_edge(u8 irq)
+{
+ __raw_writel(__raw_readl(START_INT_APR_REG(irq)) &
+ ~START_INT_REG_BIT(irq), START_INT_APR_REG(irq));
+}
+
+static inline void start_int_set_rising_edge(u8 irq)
+{
+ __raw_writel(__raw_readl(START_INT_APR_REG(irq)) |
+ START_INT_REG_BIT(irq), START_INT_APR_REG(irq));
+}
+
#endif /* _PNX4008_GPIO_H_ */
diff --git a/include/asm-arm/arch-pnx4008/pm.h b/include/asm-arm/arch-pnx4008/pm.h
index c660486670fb..bac1634cb3e0 100644
--- a/include/asm-arm/arch-pnx4008/pm.h
+++ b/include/asm-arm/arch-pnx4008/pm.h
@@ -29,34 +29,5 @@ extern void pnx4008_cpu_standby(void);
extern int pnx4008_startup_pll(struct clk *);
extern int pnx4008_shutdown_pll(struct clk *);
-static inline void start_int_umask(u8 irq)
-{
- __raw_writel(__raw_readl(START_INT_ER_REG(irq)) |
- START_INT_REG_BIT(irq), START_INT_ER_REG(irq));
-}
-
-static inline void start_int_mask(u8 irq)
-{
- __raw_writel(__raw_readl(START_INT_ER_REG(irq)) &
- ~START_INT_REG_BIT(irq), START_INT_ER_REG(irq));
-}
-
-static inline void start_int_ack(u8 irq)
-{
- __raw_writel(START_INT_REG_BIT(irq), START_INT_RSR_REG(irq));
-}
-
-static inline void start_int_set_falling_edge(u8 irq)
-{
- __raw_writel(__raw_readl(START_INT_APR_REG(irq)) &
- ~START_INT_REG_BIT(irq), START_INT_APR_REG(irq));
-}
-
-static inline void start_int_set_rising_edge(u8 irq)
-{
- __raw_writel(__raw_readl(START_INT_APR_REG(irq)) |
- START_INT_REG_BIT(irq), START_INT_APR_REG(irq));
-}
-
#endif /* ASSEMBLER */
#endif /* __ASM_ARCH_PNX4008_PM_H */
diff --git a/include/asm-arm/arch-s3c2410/regs-dsc.h b/include/asm-arm/arch-s3c2410/regs-dsc.h
index a023b0434efe..ba13a2c9e547 100644
--- a/include/asm-arm/arch-s3c2410/regs-dsc.h
+++ b/include/asm-arm/arch-s3c2410/regs-dsc.h
@@ -170,7 +170,7 @@
#define S3C2440_DSC1_CS1_4mA (3<<2)
#define S3C2440_DSC1_CS1_MASK (3<<2)
-#define S3C2440_DSC1_CS0 (S3C2440_SELECT_DSC1 | 0
+#define S3C2440_DSC1_CS0 (S3C2440_SELECT_DSC1 | 0)
#define S3C2440_DSC1_CS0_10mA (0<<0)
#define S3C2440_DSC1_CS0_8mA (1<<0)
#define S3C2440_DSC1_CS0_6mA (2<<0)
diff --git a/include/asm-arm/irq.h b/include/asm-arm/irq.h
index 60b5105c9c93..66e67e60bc56 100644
--- a/include/asm-arm/irq.h
+++ b/include/asm-arm/irq.h
@@ -47,10 +47,6 @@ void disable_irq_wake(unsigned int irq);
void enable_irq_wake(unsigned int irq);
int setup_irq(unsigned int, struct irqaction *);
-struct irqaction;
-struct pt_regs;
-int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *);
-
extern void migrate_irqs(void);
#endif
diff --git a/include/asm-arm/thread_notify.h b/include/asm-arm/thread_notify.h
new file mode 100644
index 000000000000..8866e5216840
--- /dev/null
+++ b/include/asm-arm/thread_notify.h
@@ -0,0 +1,48 @@
+/*
+ * linux/include/asm-arm/thread_notify.h
+ *
+ * Copyright (C) 2006 Russell King.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef ASMARM_THREAD_NOTIFY_H
+#define ASMARM_THREAD_NOTIFY_H
+
+#ifdef __KERNEL__
+
+#ifndef __ASSEMBLY__
+
+#include <linux/notifier.h>
+#include <asm/thread_info.h>
+
+static inline int thread_register_notifier(struct notifier_block *n)
+{
+ extern struct atomic_notifier_head thread_notify_head;
+ return atomic_notifier_chain_register(&thread_notify_head, n);
+}
+
+static inline void thread_unregister_notifier(struct notifier_block *n)
+{
+ extern struct atomic_notifier_head thread_notify_head;
+ atomic_notifier_chain_unregister(&thread_notify_head, n);
+}
+
+static inline void thread_notify(unsigned long rc, struct thread_info *thread)
+{
+ extern struct atomic_notifier_head thread_notify_head;
+ atomic_notifier_call_chain(&thread_notify_head, rc, thread);
+}
+
+#endif
+
+/*
+ * These are the reason codes for the thread notifier.
+ */
+#define THREAD_NOTIFY_FLUSH 0
+#define THREAD_NOTIFY_RELEASE 1
+#define THREAD_NOTIFY_SWITCH 2
+
+#endif
+#endif
diff --git a/include/asm-arm26/irq.h b/include/asm-arm26/irq.h
index 06bd5a543d13..9aaac87efba9 100644
--- a/include/asm-arm26/irq.h
+++ b/include/asm-arm26/irq.h
@@ -44,9 +44,5 @@ extern void enable_irq(unsigned int);
int set_irq_type(unsigned int irq, unsigned int type);
-int setup_irq(unsigned int, struct irqaction *);
-struct pt_regs;
-int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *);
-
#endif
diff --git a/include/asm-frv/atomic.h b/include/asm-frv/atomic.h
index 9a4ff03c3969..066386ac238e 100644
--- a/include/asm-frv/atomic.h
+++ b/include/asm-frv/atomic.h
@@ -227,7 +227,7 @@ extern unsigned long atomic_test_and_XOR_mask(unsigned long mask, volatile unsig
break; \
\
default: \
- __xg_orig = 0; \
+ __xg_orig = (__typeof__(__xg_orig))0; \
asm volatile("break"); \
break; \
} \
@@ -247,7 +247,7 @@ extern uint32_t __xchg_32(uint32_t i, volatile void *v);
switch (sizeof(__xg_orig)) { \
case 4: __xg_orig = (__typeof__(*(ptr))) __xchg_32((uint32_t) x, __xg_ptr); break; \
default: \
- __xg_orig = 0; \
+ __xg_orig = (__typeof__(__xg_orig))0; \
asm volatile("break"); \
break; \
} \
diff --git a/include/asm-frv/checksum.h b/include/asm-frv/checksum.h
index 10236f6802db..42bf0db2287a 100644
--- a/include/asm-frv/checksum.h
+++ b/include/asm-frv/checksum.h
@@ -43,7 +43,7 @@ unsigned int csum_partial_copy(const char *src, char *dst, int len, int sum);
* here even more important to align src and dst on a 32-bit (or even
* better 64-bit) boundary
*/
-extern unsigned int csum_partial_copy_from_user(const char *src, char *dst,
+extern unsigned int csum_partial_copy_from_user(const char __user *src, char *dst,
int len, int sum, int *csum_err);
#define csum_partial_copy_nocheck(src, dst, len, sum) \
diff --git a/include/asm-frv/highmem.h b/include/asm-frv/highmem.h
index cfbf7d3a1feb..e2247c22a638 100644
--- a/include/asm-frv/highmem.h
+++ b/include/asm-frv/highmem.h
@@ -134,7 +134,7 @@ static inline void *kmap_atomic(struct page *page, enum km_type type)
default:
BUG();
- return 0;
+ return NULL;
}
}
diff --git a/include/asm-frv/io.h b/include/asm-frv/io.h
index b56eba59e3cd..7765f5528894 100644
--- a/include/asm-frv/io.h
+++ b/include/asm-frv/io.h
@@ -40,13 +40,13 @@ static inline unsigned long _swapl(unsigned long v)
//#define __iormb() asm volatile("membar")
//#define __iowmb() asm volatile("membar")
-#define __raw_readb(addr) __builtin_read8((void *) (addr))
-#define __raw_readw(addr) __builtin_read16((void *) (addr))
-#define __raw_readl(addr) __builtin_read32((void *) (addr))
+#define __raw_readb __builtin_read8
+#define __raw_readw __builtin_read16
+#define __raw_readl __builtin_read32
-#define __raw_writeb(datum, addr) __builtin_write8((void *) (addr), datum)
-#define __raw_writew(datum, addr) __builtin_write16((void *) (addr), datum)
-#define __raw_writel(datum, addr) __builtin_write32((void *) (addr), datum)
+#define __raw_writeb(datum, addr) __builtin_write8(addr, datum)
+#define __raw_writew(datum, addr) __builtin_write16(addr, datum)
+#define __raw_writel(datum, addr) __builtin_write32(addr, datum)
static inline void io_outsb(unsigned int addr, const void *buf, int len)
{
@@ -116,7 +116,7 @@ static inline void memset_io(volatile void __iomem *addr, unsigned char val, int
memset((void __force *) addr, val, count);
}
-static inline void memcpy_fromio(void *dst, volatile void __iomem *src, int count)
+static inline void memcpy_fromio(void *dst, const volatile void __iomem *src, int count)
{
memcpy(dst, (void __force *) src, count);
}
@@ -128,12 +128,12 @@ static inline void memcpy_toio(volatile void __iomem *dst, const void *src, int
static inline uint8_t inb(unsigned long addr)
{
- return __builtin_read8((void *)addr);
+ return __builtin_read8((void __iomem *)addr);
}
static inline uint16_t inw(unsigned long addr)
{
- uint16_t ret = __builtin_read16((void *)addr);
+ uint16_t ret = __builtin_read16((void __iomem *)addr);
if (__is_PCI_IO(addr))
ret = _swapw(ret);
@@ -143,7 +143,7 @@ static inline uint16_t inw(unsigned long addr)
static inline uint32_t inl(unsigned long addr)
{
- uint32_t ret = __builtin_read32((void *)addr);
+ uint32_t ret = __builtin_read32((void __iomem *)addr);
if (__is_PCI_IO(addr))
ret = _swapl(ret);
@@ -153,21 +153,21 @@ static inline uint32_t inl(unsigned long addr)
static inline void outb(uint8_t datum, unsigned long addr)
{
- __builtin_write8((void *)addr, datum);
+ __builtin_write8((void __iomem *)addr, datum);
}
static inline void outw(uint16_t datum, unsigned long addr)
{
if (__is_PCI_IO(addr))
datum = _swapw(datum);
- __builtin_write16((void *)addr, datum);
+ __builtin_write16((void __iomem *)addr, datum);
}
static inline void outl(uint32_t datum, unsigned long addr)
{
if (__is_PCI_IO(addr))
datum = _swapl(datum);
- __builtin_write32((void *)addr, datum);
+ __builtin_write32((void __iomem *)addr, datum);
}
#define inb_p(addr) inb(addr)
@@ -189,12 +189,12 @@ static inline void outl(uint32_t datum, unsigned long addr)
static inline uint8_t readb(const volatile void __iomem *addr)
{
- return __builtin_read8((volatile uint8_t __force *) addr);
+ return __builtin_read8((__force void volatile __iomem *) addr);
}
static inline uint16_t readw(const volatile void __iomem *addr)
{
- uint16_t ret = __builtin_read16((volatile uint16_t __force *)addr);
+ uint16_t ret = __builtin_read16((__force void volatile __iomem *)addr);
if (__is_PCI_MEM(addr))
ret = _swapw(ret);
@@ -203,7 +203,7 @@ static inline uint16_t readw(const volatile void __iomem *addr)
static inline uint32_t readl(const volatile void __iomem *addr)
{
- uint32_t ret = __builtin_read32((volatile uint32_t __force *)addr);
+ uint32_t ret = __builtin_read32((__force void volatile __iomem *)addr);
if (__is_PCI_MEM(addr))
ret = _swapl(ret);
@@ -217,7 +217,7 @@ static inline uint32_t readl(const volatile void __iomem *addr)
static inline void writeb(uint8_t datum, volatile void __iomem *addr)
{
- __builtin_write8((volatile uint8_t __force *) addr, datum);
+ __builtin_write8(addr, datum);
if (__is_PCI_MEM(addr))
__flush_PCI_writes();
}
@@ -227,7 +227,7 @@ static inline void writew(uint16_t datum, volatile void __iomem *addr)
if (__is_PCI_MEM(addr))
datum = _swapw(datum);
- __builtin_write16((volatile uint16_t __force *) addr, datum);
+ __builtin_write16(addr, datum);
if (__is_PCI_MEM(addr))
__flush_PCI_writes();
}
@@ -237,7 +237,7 @@ static inline void writel(uint32_t datum, volatile void __iomem *addr)
if (__is_PCI_MEM(addr))
datum = _swapl(datum);
- __builtin_write32((volatile uint32_t __force *) addr, datum);
+ __builtin_write32(addr, datum);
if (__is_PCI_MEM(addr))
__flush_PCI_writes();
}
@@ -271,7 +271,7 @@ static inline void __iomem *ioremap_fullcache(unsigned long physaddr, unsigned l
return __ioremap(physaddr, size, IOMAP_FULL_CACHING);
}
-extern void iounmap(void __iomem *addr);
+extern void iounmap(void volatile __iomem *addr);
static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
{
diff --git a/include/asm-frv/mb-regs.h b/include/asm-frv/mb-regs.h
index 93fa732fb0cd..219e5f926f18 100644
--- a/include/asm-frv/mb-regs.h
+++ b/include/asm-frv/mb-regs.h
@@ -16,6 +16,17 @@
#include <asm/sections.h>
#include <asm/mem-layout.h>
+#ifndef __ASSEMBLY__
+/* gcc builtins, annotated */
+
+unsigned long __builtin_read8(volatile void __iomem *);
+unsigned long __builtin_read16(volatile void __iomem *);
+unsigned long __builtin_read32(volatile void __iomem *);
+void __builtin_write8(volatile void __iomem *, unsigned char);
+void __builtin_write16(volatile void __iomem *, unsigned short);
+void __builtin_write32(volatile void __iomem *, unsigned long);
+#endif
+
#define __region_IO KERNEL_IO_START /* the region from 0xe0000000 to 0xffffffff has suitable
* protection laid over the top for use in memory-mapped
* I/O
@@ -59,7 +70,7 @@
#define __region_PCI_MEM (__region_CS2 + 0x08000000UL)
#define __flush_PCI_writes() \
do { \
- __builtin_write8((volatile void *) __region_PCI_MEM, 0); \
+ __builtin_write8((volatile void __iomem *) __region_PCI_MEM, 0); \
} while(0)
#define __is_PCI_IO(addr) \
@@ -83,15 +94,15 @@ extern int __nongprelbss mb93090_mb00_detected;
#define __set_LEDS(X) \
do { \
if (mb93090_mb00_detected) \
- __builtin_write32((void *) __addr_LEDS(), ~(X)); \
+ __builtin_write32((void __iomem *) __addr_LEDS(), ~(X)); \
} while (0)
#else
#define __set_LEDS(X)
#endif
#define __addr_LCD() (__region_CS2 + 0x01200008UL)
-#define __get_LCD(B) __builtin_read32((volatile void *) (B))
-#define __set_LCD(B,X) __builtin_write32((volatile void *) (B), (X))
+#define __get_LCD(B) __builtin_read32((volatile void __iomem *) (B))
+#define __set_LCD(B,X) __builtin_write32((volatile void __iomem *) (B), (X))
#define LCD_D 0x000000ff /* LCD data bus */
#define LCD_RW 0x00000100 /* LCD R/W signal */
@@ -161,11 +172,11 @@ do { \
#define __get_CLKIN() 66000000UL
#define __addr_LEDS() (__region_CS2 + 0x00000023UL)
-#define __set_LEDS(X) __builtin_write8((volatile void *) __addr_LEDS(), (X))
+#define __set_LEDS(X) __builtin_write8((volatile void __iomem *) __addr_LEDS(), (X))
#define __addr_FPGATR() (__region_CS2 + 0x00000030UL)
-#define __set_FPGATR(X) __builtin_write32((volatile void *) __addr_FPGATR(), (X))
-#define __get_FPGATR() __builtin_read32((volatile void *) __addr_FPGATR())
+#define __set_FPGATR(X) __builtin_write32((volatile void __iomem *) __addr_FPGATR(), (X))
+#define __get_FPGATR() __builtin_read32((volatile void __iomem *) __addr_FPGATR())
#define MB93093_FPGA_FPGATR_AUDIO_CLK 0x00000003
@@ -180,7 +191,7 @@ do { \
#define MB93093_FPGA_SWR_PUSHSWMASK (0x1F<<26)
#define MB93093_FPGA_SWR_PUSHSW4 (1<<29)
-#define __addr_FPGA_SWR ((volatile void *)(__region_CS2 + 0x28UL))
+#define __addr_FPGA_SWR ((volatile void __iomem *)(__region_CS2 + 0x28UL))
#define __get_FPGA_PUSHSW1_5() (__builtin_read32(__addr_FPGA_SWR) & MB93093_FPGA_SWR_PUSHSWMASK)
diff --git a/include/asm-frv/signal.h b/include/asm-frv/signal.h
index 67366894780f..dcc1b3592918 100644
--- a/include/asm-frv/signal.h
+++ b/include/asm-frv/signal.h
@@ -114,13 +114,13 @@ struct old_sigaction {
__sighandler_t sa_handler;
old_sigset_t sa_mask;
unsigned long sa_flags;
- void (*sa_restorer)(void);
+ __sigrestore_t sa_restorer;
};
struct sigaction {
__sighandler_t sa_handler;
unsigned long sa_flags;
- void (*sa_restorer)(void);
+ __sigrestore_t sa_restorer;
sigset_t sa_mask; /* mask last for extensibility */
};
@@ -146,7 +146,7 @@ struct sigaction {
#endif /* __KERNEL__ */
typedef struct sigaltstack {
- void *ss_sp;
+ void __user *ss_sp;
int ss_flags;
size_t ss_size;
} stack_t;
diff --git a/include/asm-frv/uaccess.h b/include/asm-frv/uaccess.h
index a1d140438863..3d90e1018ee2 100644
--- a/include/asm-frv/uaccess.h
+++ b/include/asm-frv/uaccess.h
@@ -22,7 +22,7 @@
#define HAVE_ARCH_UNMAPPED_AREA /* we decide where to put mmaps */
-#define __ptr(x) ((unsigned long *)(x))
+#define __ptr(x) ((unsigned long __force *)(x))
#define VERIFY_READ 0
#define VERIFY_WRITE 1
@@ -64,7 +64,7 @@ static inline int ___range_ok(unsigned long addr, unsigned long size)
#define __range_ok(addr,size) ___range_ok((unsigned long) (addr), (unsigned long) (size))
-#define access_ok(type,addr,size) (__range_ok((addr), (size)) == 0)
+#define access_ok(type,addr,size) (__range_ok((void __user *)(addr), (size)) == 0)
#define __access_ok(addr,size) (__range_ok((addr), (size)) == 0)
/*
@@ -97,6 +97,7 @@ extern unsigned long search_exception_table(unsigned long);
int __pu_err = 0; \
\
typeof(*(ptr)) __pu_val = (x); \
+ __chk_user_ptr(ptr); \
\
switch (sizeof (*(ptr))) { \
case 1: \
@@ -120,7 +121,7 @@ extern unsigned long search_exception_table(unsigned long);
#define put_user(x, ptr) \
({ \
- typeof(&*ptr) _p = (ptr); \
+ typeof(*(ptr)) __user *_p = (ptr); \
int _e; \
\
_e = __range_ok(_p, sizeof(*_p)); \
@@ -175,33 +176,44 @@ do { \
*/
#define __get_user(x, ptr) \
({ \
- typeof(*(ptr)) __gu_val = 0; \
int __gu_err = 0; \
+ __chk_user_ptr(ptr); \
\
switch (sizeof(*(ptr))) { \
- case 1: \
- __get_user_asm(__gu_err, *(u8*)&__gu_val, ptr, "ub", "=r"); \
+ case 1: { \
+ unsigned char __gu_val; \
+ __get_user_asm(__gu_err, __gu_val, ptr, "ub", "=r"); \
+ (x) = *(__force __typeof__(*(ptr)) *) &__gu_val; \
break; \
- case 2: \
- __get_user_asm(__gu_err, *(u16*)&__gu_val, ptr, "uh", "=r"); \
+ } \
+ case 2: { \
+ unsigned short __gu_val; \
+ __get_user_asm(__gu_err, __gu_val, ptr, "uh", "=r"); \
+ (x) = *(__force __typeof__(*(ptr)) *) &__gu_val; \
break; \
- case 4: \
- __get_user_asm(__gu_err, *(u32*)&__gu_val, ptr, "", "=r"); \
+ } \
+ case 4: { \
+ unsigned int __gu_val; \
+ __get_user_asm(__gu_err, __gu_val, ptr, "", "=r"); \
+ (x) = *(__force __typeof__(*(ptr)) *) &__gu_val; \
break; \
- case 8: \
- __get_user_asm(__gu_err, *(u64*)&__gu_val, ptr, "d", "=e"); \
+ } \
+ case 8: { \
+ unsigned long long __gu_val; \
+ __get_user_asm(__gu_err, __gu_val, ptr, "d", "=e"); \
+ (x) = *(__force __typeof__(*(ptr)) *) &__gu_val; \
break; \
+ } \
default: \
__gu_err = __get_user_bad(); \
break; \
} \
- (x) = __gu_val; \
__gu_err; \
})
#define get_user(x, ptr) \
({ \
- typeof(&*ptr) _p = (ptr); \
+ const typeof(*(ptr)) __user *_p = (ptr);\
int _e; \
\
_e = __range_ok(_p, sizeof(*_p)); \
@@ -248,19 +260,20 @@ do { \
/*
*
*/
+#define ____force(x) (__force void *)(void __user *)(x)
#ifdef CONFIG_MMU
extern long __memset_user(void *dst, unsigned long count);
extern long __memcpy_user(void *dst, const void *src, unsigned long count);
-#define clear_user(dst,count) __memset_user((dst), (count))
-#define __copy_from_user_inatomic(to, from, n) __memcpy_user((to), (from), (n))
-#define __copy_to_user_inatomic(to, from, n) __memcpy_user((to), (from), (n))
+#define clear_user(dst,count) __memset_user(____force(dst), (count))
+#define __copy_from_user_inatomic(to, from, n) __memcpy_user((to), ____force(from), (n))
+#define __copy_to_user_inatomic(to, from, n) __memcpy_user(____force(to), (from), (n))
#else
-#define clear_user(dst,count) (memset((dst), 0, (count)), 0)
-#define __copy_from_user_inatomic(to, from, n) (memcpy((to), (from), (n)), 0)
-#define __copy_to_user_inatomic(to, from, n) (memcpy((to), (from), (n)), 0)
+#define clear_user(dst,count) (memset(____force(dst), 0, (count)), 0)
+#define __copy_from_user_inatomic(to, from, n) (memcpy((to), ____force(from), (n)), 0)
+#define __copy_to_user_inatomic(to, from, n) (memcpy(____force(to), (from), (n)), 0)
#endif
@@ -278,7 +291,7 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
return __copy_from_user_inatomic(to, from, n);
}
-static inline long copy_from_user(void *to, const void *from, unsigned long n)
+static inline long copy_from_user(void *to, const void __user *from, unsigned long n)
{
unsigned long ret = n;
@@ -291,16 +304,13 @@ static inline long copy_from_user(void *to, const void *from, unsigned long n)
return ret;
}
-static inline long copy_to_user(void *to, const void *from, unsigned long n)
+static inline long copy_to_user(void __user *to, const void *from, unsigned long n)
{
return likely(__access_ok(to, n)) ? __copy_to_user(to, from, n) : n;
}
-#define copy_to_user_ret(to,from,n,retval) ({ if (copy_to_user(to,from,n)) return retval; })
-#define copy_from_user_ret(to,from,n,retval) ({ if (copy_from_user(to,from,n)) return retval; })
-
-extern long strncpy_from_user(char *dst, const char *src, long count);
-extern long strnlen_user(const char *src, long count);
+extern long strncpy_from_user(char *dst, const char __user *src, long count);
+extern long strnlen_user(const char __user *src, long count);
#define strlen_user(str) strnlen_user(str, 32767)
diff --git a/include/asm-frv/unistd.h b/include/asm-frv/unistd.h
index 7c2e712c3b73..b80dbd839475 100644
--- a/include/asm-frv/unistd.h
+++ b/include/asm-frv/unistd.h
@@ -306,7 +306,7 @@
#define __NR_mknodat 297
#define __NR_fchownat 298
#define __NR_futimesat 299
-#define __NR_newfstatat 300
+#define __NR_fstatat64 300
#define __NR_unlinkat 301
#define __NR_renameat 302
#define __NR_linkat 303
@@ -460,24 +460,7 @@ type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5, type6 arg
* some others too.
*/
#define __NR__exit __NR_exit
-static inline _syscall0(int,pause)
-static inline _syscall0(int,sync)
-static inline _syscall0(pid_t,setsid)
-static inline _syscall3(int,write,int,fd,const char *,buf,off_t,count)
-static inline _syscall3(int,read,int,fd,char *,buf,off_t,count)
-static inline _syscall3(off_t,lseek,int,fd,off_t,offset,int,count)
-static inline _syscall1(int,dup,int,fd)
static inline _syscall3(int,execve,const char *,file,char **,argv,char **,envp)
-static inline _syscall3(int,open,const char *,file,int,flag,int,mode)
-static inline _syscall1(int,close,int,fd)
-static inline _syscall1(int,_exit,int,exitcode)
-static inline _syscall3(pid_t,waitpid,pid_t,pid,int *,wait_stat,int,options)
-static inline _syscall1(int,delete_module,const char *,name)
-
-static inline pid_t wait(int * wait_stat)
-{
- return waitpid(-1,wait_stat,0);
-}
#endif /* __KERNEL_SYSCALLS__ */
diff --git a/include/asm-generic/memory_model.h b/include/asm-generic/memory_model.h
index 0cfb086dd373..8078cbd2c016 100644
--- a/include/asm-generic/memory_model.h
+++ b/include/asm-generic/memory_model.h
@@ -23,29 +23,23 @@
#endif /* CONFIG_DISCONTIGMEM */
-#ifdef CONFIG_OUT_OF_LINE_PFN_TO_PAGE
-struct page;
-/* this is useful when inlined pfn_to_page is too big */
-extern struct page *pfn_to_page(unsigned long pfn);
-extern unsigned long page_to_pfn(struct page *page);
-#else
/*
* supports 3 memory models.
*/
#if defined(CONFIG_FLATMEM)
-#define pfn_to_page(pfn) (mem_map + ((pfn) - ARCH_PFN_OFFSET))
-#define page_to_pfn(page) ((unsigned long)((page) - mem_map) + \
+#define __pfn_to_page(pfn) (mem_map + ((pfn) - ARCH_PFN_OFFSET))
+#define __page_to_pfn(page) ((unsigned long)((page) - mem_map) + \
ARCH_PFN_OFFSET)
#elif defined(CONFIG_DISCONTIGMEM)
-#define pfn_to_page(pfn) \
+#define __pfn_to_page(pfn) \
({ unsigned long __pfn = (pfn); \
unsigned long __nid = arch_pfn_to_nid(pfn); \
NODE_DATA(__nid)->node_mem_map + arch_local_page_offset(__pfn, __nid);\
})
-#define page_to_pfn(pg) \
+#define __page_to_pfn(pg) \
({ struct page *__pg = (pg); \
struct pglist_data *__pgdat = NODE_DATA(page_to_nid(__pg)); \
(unsigned long)(__pg - __pgdat->node_mem_map) + \
@@ -57,18 +51,27 @@ extern unsigned long page_to_pfn(struct page *page);
* Note: section's mem_map is encorded to reflect its start_pfn.
* section[i].section_mem_map == mem_map's address - start_pfn;
*/
-#define page_to_pfn(pg) \
+#define __page_to_pfn(pg) \
({ struct page *__pg = (pg); \
int __sec = page_to_section(__pg); \
__pg - __section_mem_map_addr(__nr_to_section(__sec)); \
})
-#define pfn_to_page(pfn) \
+#define __pfn_to_page(pfn) \
({ unsigned long __pfn = (pfn); \
struct mem_section *__sec = __pfn_to_section(__pfn); \
__section_mem_map_addr(__sec) + __pfn; \
})
#endif /* CONFIG_FLATMEM/DISCONTIGMEM/SPARSEMEM */
+
+#ifdef CONFIG_OUT_OF_LINE_PFN_TO_PAGE
+struct page;
+/* this is useful when inlined pfn_to_page is too big */
+extern struct page *pfn_to_page(unsigned long pfn);
+extern unsigned long page_to_pfn(struct page *page);
+#else
+#define page_to_pfn __page_to_pfn
+#define pfn_to_page __pfn_to_page
#endif /* CONFIG_OUT_OF_LINE_PFN_TO_PAGE */
#endif /* __ASSEMBLY__ */
diff --git a/include/asm-h8300/irq.h b/include/asm-h8300/irq.h
index 73065f5bda0e..42a3ac424a9e 100644
--- a/include/asm-h8300/irq.h
+++ b/include/asm-h8300/irq.h
@@ -63,8 +63,4 @@ extern void enable_irq(unsigned int);
extern void disable_irq(unsigned int);
#define disable_irq_nosync(x) disable_irq(x)
-struct irqaction;
-struct pt_regs;
-int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *);
-
#endif /* _H8300_IRQ_H_ */
diff --git a/include/asm-i386/alternative.h b/include/asm-i386/alternative.h
index e201decea0c9..d79e9ee10fd7 100644
--- a/include/asm-i386/alternative.h
+++ b/include/asm-i386/alternative.h
@@ -3,6 +3,8 @@
#ifdef __KERNEL__
+#include <asm/types.h>
+
struct alt_instr {
u8 *instr; /* original instruction */
u8 *replacement;
diff --git a/include/asm-i386/apic.h b/include/asm-i386/apic.h
index cc9b940fb7e8..1d8362cb2c5d 100644
--- a/include/asm-i386/apic.h
+++ b/include/asm-i386/apic.h
@@ -138,8 +138,6 @@ void switch_ipi_to_APIC_timer(void *cpumask);
extern int timer_over_8254;
-extern int modern_apic(void);
-
#else /* !CONFIG_X86_LOCAL_APIC */
static inline void lapic_shutdown(void) { }
diff --git a/include/asm-i386/apicdef.h b/include/asm-i386/apicdef.h
index 5e4a35af2921..9f6995341fdc 100644
--- a/include/asm-i386/apicdef.h
+++ b/include/asm-i386/apicdef.h
@@ -121,7 +121,6 @@
*/
#define u32 unsigned int
-#define lapic ((volatile struct local_apic *)APIC_BASE)
struct local_apic {
diff --git a/include/asm-i386/cpufeature.h b/include/asm-i386/cpufeature.h
index b44bfc6239cb..3ecedbafa8ce 100644
--- a/include/asm-i386/cpufeature.h
+++ b/include/asm-i386/cpufeature.h
@@ -88,6 +88,12 @@
#define X86_FEATURE_XSTORE_EN (5*32+ 3) /* on-CPU RNG enabled */
#define X86_FEATURE_XCRYPT (5*32+ 6) /* on-CPU crypto (xcrypt insn) */
#define X86_FEATURE_XCRYPT_EN (5*32+ 7) /* on-CPU crypto enabled */
+#define X86_FEATURE_ACE2 (5*32+ 8) /* Advanced Cryptography Engine v2 */
+#define X86_FEATURE_ACE2_EN (5*32+ 9) /* ACE v2 enabled */
+#define X86_FEATURE_PHE (5*32+ 10) /* PadLock Hash Engine */
+#define X86_FEATURE_PHE_EN (5*32+ 11) /* PHE enabled */
+#define X86_FEATURE_PMM (5*32+ 12) /* PadLock Montgomery Multiplier */
+#define X86_FEATURE_PMM_EN (5*32+ 13) /* PMM enabled */
/* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */
#define X86_FEATURE_LAHF_LM (6*32+ 0) /* LAHF/SAHF in long mode */
@@ -121,6 +127,12 @@
#define cpu_has_xstore_enabled boot_cpu_has(X86_FEATURE_XSTORE_EN)
#define cpu_has_xcrypt boot_cpu_has(X86_FEATURE_XCRYPT)
#define cpu_has_xcrypt_enabled boot_cpu_has(X86_FEATURE_XCRYPT_EN)
+#define cpu_has_ace2 boot_cpu_has(X86_FEATURE_ACE2)
+#define cpu_has_ace2_enabled boot_cpu_has(X86_FEATURE_ACE2_EN)
+#define cpu_has_phe boot_cpu_has(X86_FEATURE_PHE)
+#define cpu_has_phe_enabled boot_cpu_has(X86_FEATURE_PHE_EN)
+#define cpu_has_pmm boot_cpu_has(X86_FEATURE_PMM)
+#define cpu_has_pmm_enabled boot_cpu_has(X86_FEATURE_PMM_EN)
#endif /* __ASM_I386_CPUFEATURE_H */
diff --git a/include/asm-i386/mce.h b/include/asm-i386/mce.h
new file mode 100644
index 000000000000..7cc1a973bf00
--- /dev/null
+++ b/include/asm-i386/mce.h
@@ -0,0 +1,5 @@
+#ifdef CONFIG_X86_MCE
+extern void mcheck_init(struct cpuinfo_x86 *c);
+#else
+#define mcheck_init(c) do {} while(0)
+#endif
diff --git a/include/asm-i386/mtrr.h b/include/asm-i386/mtrr.h
index 5a46de08efea..07f063ae26ea 100644
--- a/include/asm-i386/mtrr.h
+++ b/include/asm-i386/mtrr.h
@@ -76,6 +76,8 @@ extern int mtrr_add_page (unsigned long base, unsigned long size,
extern int mtrr_del (int reg, unsigned long base, unsigned long size);
extern int mtrr_del_page (int reg, unsigned long base, unsigned long size);
extern void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi);
+extern void mtrr_ap_init(void);
+extern void mtrr_bp_init(void);
# else
static __inline__ int mtrr_add (unsigned long base, unsigned long size,
unsigned int type, char increment)
@@ -100,6 +102,8 @@ static __inline__ int mtrr_del_page (int reg, unsigned long base,
static __inline__ void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi) {;}
+#define mtrr_ap_init() do {} while (0)
+#define mtrr_bp_init() do {} while (0)
# endif
#endif
diff --git a/include/asm-i386/processor.h b/include/asm-i386/processor.h
index 4df3818e4122..0c83cf12eec9 100644
--- a/include/asm-i386/processor.h
+++ b/include/asm-i386/processor.h
@@ -728,18 +728,4 @@ extern unsigned long boot_option_idle_override;
extern void enable_sep_cpu(void);
extern int sysenter_setup(void);
-#ifdef CONFIG_MTRR
-extern void mtrr_ap_init(void);
-extern void mtrr_bp_init(void);
-#else
-#define mtrr_ap_init() do {} while (0)
-#define mtrr_bp_init() do {} while (0)
-#endif
-
-#ifdef CONFIG_X86_MCE
-extern void mcheck_init(struct cpuinfo_x86 *c);
-#else
-#define mcheck_init(c) do {} while(0)
-#endif
-
#endif /* __ASM_I386_PROCESSOR_H */
diff --git a/include/asm-i386/uaccess.h b/include/asm-i386/uaccess.h
index 1ec65523ea5e..8462f8e0e658 100644
--- a/include/asm-i386/uaccess.h
+++ b/include/asm-i386/uaccess.h
@@ -58,7 +58,7 @@ extern struct movsl_mask {
__chk_user_ptr(addr); \
asm("addl %3,%1 ; sbbl %0,%0; cmpl %1,%4; sbbl $0,%0" \
:"=&r" (flag), "=r" (sum) \
- :"1" (addr),"g" ((int)(size)),"g" (current_thread_info()->addr_limit.seg)); \
+ :"1" (addr),"g" ((int)(size)),"rm" (current_thread_info()->addr_limit.seg)); \
flag; })
/**
@@ -390,6 +390,8 @@ unsigned long __must_check __copy_to_user_ll(void __user *to,
const void *from, unsigned long n);
unsigned long __must_check __copy_from_user_ll(void *to,
const void __user *from, unsigned long n);
+unsigned long __must_check __copy_from_user_ll_nocache(void *to,
+ const void __user *from, unsigned long n);
/*
* Here we special-case 1, 2 and 4-byte copy_*_user invocations. On a fault
@@ -478,12 +480,43 @@ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
return __copy_from_user_ll(to, from, n);
}
+#define ARCH_HAS_NOCACHE_UACCESS
+
+static __always_inline unsigned long __copy_from_user_inatomic_nocache(void *to,
+ const void __user *from, unsigned long n)
+{
+ if (__builtin_constant_p(n)) {
+ unsigned long ret;
+
+ switch (n) {
+ case 1:
+ __get_user_size(*(u8 *)to, from, 1, ret, 1);
+ return ret;
+ case 2:
+ __get_user_size(*(u16 *)to, from, 2, ret, 2);
+ return ret;
+ case 4:
+ __get_user_size(*(u32 *)to, from, 4, ret, 4);
+ return ret;
+ }
+ }
+ return __copy_from_user_ll_nocache(to, from, n);
+}
+
static __always_inline unsigned long
__copy_from_user(void *to, const void __user *from, unsigned long n)
{
might_sleep();
return __copy_from_user_inatomic(to, from, n);
}
+
+static __always_inline unsigned long
+__copy_from_user_nocache(void *to, const void __user *from, unsigned long n)
+{
+ might_sleep();
+ return __copy_from_user_inatomic_nocache(to, from, n);
+}
+
unsigned long __must_check copy_to_user(void __user *to,
const void *from, unsigned long n);
unsigned long __must_check copy_from_user(void *to,
diff --git a/include/asm-i386/unistd.h b/include/asm-i386/unistd.h
index de2ccc149e34..fc1c8ddae149 100644
--- a/include/asm-i386/unistd.h
+++ b/include/asm-i386/unistd.h
@@ -322,10 +322,11 @@
#define __NR_sync_file_range 314
#define __NR_tee 315
#define __NR_vmsplice 316
+#define __NR_move_pages 317
#ifdef __KERNEL__
-#define NR_syscalls 317
+#define NR_syscalls 318
/*
* user-visible error numbers are in the range -1 - -128: see
diff --git a/include/asm-ia64/io.h b/include/asm-ia64/io.h
index c2e3742108bb..781ee2c7e8c3 100644
--- a/include/asm-ia64/io.h
+++ b/include/asm-ia64/io.h
@@ -88,6 +88,7 @@ phys_to_virt (unsigned long address)
}
#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
+extern u64 kern_mem_attribute (unsigned long phys_addr, unsigned long size);
extern int valid_phys_addr_range (unsigned long addr, size_t count); /* efi.c */
extern int valid_mmap_phys_addr_range (unsigned long addr, size_t count);
diff --git a/include/asm-ia64/mca.h b/include/asm-ia64/mca.h
index 9c5389b7e623..ee97f7c2d462 100644
--- a/include/asm-ia64/mca.h
+++ b/include/asm-ia64/mca.h
@@ -69,14 +69,16 @@ typedef struct ia64_mc_info_s {
*/
struct ia64_sal_os_state {
- /* SAL to OS, must be at offset 0 */
+
+ /* SAL to OS */
u64 os_gp; /* GP of the os registered with the SAL, physical */
u64 pal_proc; /* PAL_PROC entry point, physical */
u64 sal_proc; /* SAL_PROC entry point, physical */
u64 rv_rc; /* MCA - Rendezvous state, INIT - reason code */
u64 proc_state_param; /* from R18 */
u64 monarch; /* 1 for a monarch event, 0 for a slave */
- /* common, must follow SAL to OS */
+
+ /* common */
u64 sal_ra; /* Return address in SAL, physical */
u64 sal_gp; /* GP of the SAL - physical */
pal_min_state_area_t *pal_min_state; /* from R17. physical in asm, virtual in C */
@@ -98,7 +100,8 @@ struct ia64_sal_os_state {
u64 iipa;
u64 iim;
u64 iha;
- /* OS to SAL, must follow common */
+
+ /* OS to SAL */
u64 os_status; /* OS status to SAL, enum below */
u64 context; /* 0 if return to same context
1 if return to new context */
diff --git a/include/asm-ia64/pgtable.h b/include/asm-ia64/pgtable.h
index eaac08d5e0bd..228981cadf8f 100644
--- a/include/asm-ia64/pgtable.h
+++ b/include/asm-ia64/pgtable.h
@@ -316,22 +316,20 @@ ia64_phys_addr_valid (unsigned long addr)
#define pte_mkhuge(pte) (__pte(pte_val(pte)))
/*
- * Macro to a page protection value as "uncacheable". Note that "protection" is really a
- * misnomer here as the protection value contains the memory attribute bits, dirty bits,
- * and various other bits as well.
+ * Make page protection values cacheable, uncacheable, or write-
+ * combining. Note that "protection" is really a misnomer here as the
+ * protection value contains the memory attribute bits, dirty bits, and
+ * various other bits as well.
*/
+#define pgprot_cacheable(prot) __pgprot((pgprot_val(prot) & ~_PAGE_MA_MASK) | _PAGE_MA_WB)
#define pgprot_noncached(prot) __pgprot((pgprot_val(prot) & ~_PAGE_MA_MASK) | _PAGE_MA_UC)
-
-/*
- * Macro to make mark a page protection value as "write-combining".
- * Note that "protection" is really a misnomer here as the protection
- * value contains the memory attribute bits, dirty bits, and various
- * other bits as well. Accesses through a write-combining translation
- * works bypasses the caches, but does allow for consecutive writes to
- * be combined into single (but larger) write transactions.
- */
#define pgprot_writecombine(prot) __pgprot((pgprot_val(prot) & ~_PAGE_MA_MASK) | _PAGE_MA_WC)
+struct file;
+extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
+ unsigned long size, pgprot_t vma_prot);
+#define __HAVE_PHYS_MEM_ACCESS_PROT
+
static inline unsigned long
pgd_index (unsigned long address)
{
diff --git a/include/asm-ia64/sn/sn_sal.h b/include/asm-ia64/sn/sn_sal.h
index 8c865e43f609..cd490b20d592 100644
--- a/include/asm-ia64/sn/sn_sal.h
+++ b/include/asm-ia64/sn/sn_sal.h
@@ -345,7 +345,7 @@ ia64_sn_plat_set_error_handling_features(void)
ret_stuff.v1 = 0;
ret_stuff.v2 = 0;
SAL_CALL_REENTRANT(ret_stuff, SN_SAL_SET_ERROR_HANDLING_FEATURES,
- (SAL_ERR_FEAT_MCA_SLV_TO_OS_INIT_SLV | SAL_ERR_FEAT_LOG_SBES),
+ SAL_ERR_FEAT_LOG_SBES,
0, 0, 0, 0, 0, 0);
return ret_stuff.status;
diff --git a/include/asm-ia64/unistd.h b/include/asm-ia64/unistd.h
index 632f2eedf72c..bb0eb727dcd0 100644
--- a/include/asm-ia64/unistd.h
+++ b/include/asm-ia64/unistd.h
@@ -265,7 +265,7 @@
#define __NR_keyctl 1273
#define __NR_ioprio_set 1274
#define __NR_ioprio_get 1275
-/* 1276 is available for reuse (was briefly sys_set_zone_reclaim) */
+#define __NR_move_pages 1276
#define __NR_inotify_init 1277
#define __NR_inotify_add_watch 1278
#define __NR_inotify_rm_watch 1279
diff --git a/include/asm-m68k/irq.h b/include/asm-m68k/irq.h
index b4f48b2a6a57..9727ca9d9f26 100644
--- a/include/asm-m68k/irq.h
+++ b/include/asm-m68k/irq.h
@@ -130,8 +130,4 @@ extern volatile unsigned int num_spurious;
*/
extern irq_node_t *new_irq_node(void);
-struct irqaction;
-struct pt_regs;
-int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *);
-
#endif /* _M68K_IRQ_H_ */
diff --git a/include/asm-m68k/processor.h b/include/asm-m68k/processor.h
index 352799e71f08..8455f778b601 100644
--- a/include/asm-m68k/processor.h
+++ b/include/asm-m68k/processor.h
@@ -71,10 +71,10 @@ struct thread_struct {
};
#define INIT_THREAD { \
- ksp: sizeof(init_stack) + (unsigned long) init_stack, \
- sr: PS_S, \
- fs: __KERNEL_DS, \
- info: INIT_THREAD_INFO(init_task) \
+ .ksp = sizeof(init_stack) + (unsigned long) init_stack, \
+ .sr = PS_S, \
+ .fs = __KERNEL_DS, \
+ .info = INIT_THREAD_INFO(init_task), \
}
/*
diff --git a/include/asm-m68k/uaccess.h b/include/asm-m68k/uaccess.h
index 2ffd87b0a769..b761ef218cea 100644
--- a/include/asm-m68k/uaccess.h
+++ b/include/asm-m68k/uaccess.h
@@ -4,8 +4,9 @@
/*
* User space memory access functions
*/
+#include <linux/compiler.h>
#include <linux/errno.h>
-#include <linux/sched.h>
+#include <linux/types.h>
#include <asm/segment.h>
#define VERIFY_READ 0
@@ -32,858 +33,315 @@ struct exception_table_entry
unsigned long insn, fixup;
};
+extern int __put_user_bad(void);
+extern int __get_user_bad(void);
+
+#define __put_user_asm(res, x, ptr, bwl, reg, err) \
+asm volatile ("\n" \
+ "1: moves."#bwl" %2,%1\n" \
+ "2:\n" \
+ " .section .fixup,\"ax\"\n" \
+ " .even\n" \
+ "10: moveq.l %3,%0\n" \
+ " jra 2b\n" \
+ " .previous\n" \
+ "\n" \
+ " .section __ex_table,\"a\"\n" \
+ " .align 4\n" \
+ " .long 1b,10b\n" \
+ " .long 2b,10b\n" \
+ " .previous" \
+ : "+d" (res), "=m" (*(ptr)) \
+ : #reg (x), "i" (err))
/*
* These are the main single-value transfer routines. They automatically
* use the right size if we just have the right pointer type.
*/
-#define put_user(x, ptr) \
-({ \
- int __pu_err; \
- typeof(*(ptr)) __pu_val = (x); \
- __chk_user_ptr(ptr); \
- switch (sizeof (*(ptr))) { \
- case 1: \
- __put_user_asm(__pu_err, __pu_val, ptr, b); \
- break; \
- case 2: \
- __put_user_asm(__pu_err, __pu_val, ptr, w); \
- break; \
- case 4: \
- __put_user_asm(__pu_err, __pu_val, ptr, l); \
- break; \
- case 8: \
- __pu_err = __constant_copy_to_user(ptr, &__pu_val, 8); \
- break; \
- default: \
- __pu_err = __put_user_bad(); \
- break; \
- } \
- __pu_err; \
+#define __put_user(x, ptr) \
+({ \
+ typeof(*(ptr)) __pu_val = (x); \
+ int __pu_err = 0; \
+ __chk_user_ptr(ptr); \
+ switch (sizeof (*(ptr))) { \
+ case 1: \
+ __put_user_asm(__pu_err, __pu_val, ptr, b, d, -EFAULT); \
+ break; \
+ case 2: \
+ __put_user_asm(__pu_err, __pu_val, ptr, w, d, -EFAULT); \
+ break; \
+ case 4: \
+ __put_user_asm(__pu_err, __pu_val, ptr, l, r, -EFAULT); \
+ break; \
+ case 8: \
+ { \
+ const void *__pu_ptr = (ptr); \
+ asm volatile ("\n" \
+ "1: moves.l %2,(%1)+\n" \
+ "2: moves.l %R2,(%1)\n" \
+ "3:\n" \
+ " .section .fixup,\"ax\"\n" \
+ " .even\n" \
+ "10: movel %3,%0\n" \
+ " jra 3b\n" \
+ " .previous\n" \
+ "\n" \
+ " .section __ex_table,\"a\"\n" \
+ " .align 4\n" \
+ " .long 1b,10b\n" \
+ " .long 2b,10b\n" \
+ " .long 3b,10b\n" \
+ " .previous" \
+ : "+d" (__pu_err), "+a" (__pu_ptr) \
+ : "r" (__pu_val), "i" (-EFAULT) \
+ : "memory"); \
+ break; \
+ } \
+ default: \
+ __pu_err = __put_user_bad(); \
+ break; \
+ } \
+ __pu_err; \
})
-#define __put_user(x, ptr) put_user(x, ptr)
+#define put_user(x, ptr) __put_user(x, ptr)
-extern int __put_user_bad(void);
-/*
- * Tell gcc we read from memory instead of writing: this is because
- * we do not write to any memory gcc knows about, so there are no
- * aliasing issues.
- */
-#define __put_user_asm(err,x,ptr,bwl) \
-__asm__ __volatile__ \
- ("21:moves" #bwl " %2,%1\n" \
- "1:\n" \
- ".section .fixup,\"ax\"\n" \
- " .even\n" \
- "2: movel %3,%0\n" \
- " jra 1b\n" \
- ".previous\n" \
- ".section __ex_table,\"a\"\n" \
- " .align 4\n" \
- " .long 21b,2b\n" \
- " .long 1b,2b\n" \
- ".previous" \
- : "=d"(err) \
- : "m"(*(ptr)), "r"(x), "i"(-EFAULT), "0"(0))
-
-#define get_user(x, ptr) \
-({ \
- int __gu_err; \
- typeof(*(ptr)) __gu_val; \
- __chk_user_ptr(ptr); \
- switch (sizeof(*(ptr))) { \
- case 1: \
- __get_user_asm(__gu_err, __gu_val, ptr, b, "=d"); \
- break; \
- case 2: \
- __get_user_asm(__gu_err, __gu_val, ptr, w, "=r"); \
- break; \
- case 4: \
- __get_user_asm(__gu_err, __gu_val, ptr, l, "=r"); \
- break; \
- case 8: \
- __gu_err = __constant_copy_from_user(&__gu_val, ptr, 8); \
- break; \
- default: \
- __gu_val = (typeof(*(ptr)))0; \
- __gu_err = __get_user_bad(); \
- break; \
- } \
- (x) = __gu_val; \
- __gu_err; \
+#define __get_user_asm(res, x, ptr, type, bwl, reg, err) ({ \
+ type __gu_val; \
+ asm volatile ("\n" \
+ "1: moves."#bwl" %2,%1\n" \
+ "2:\n" \
+ " .section .fixup,\"ax\"\n" \
+ " .even\n" \
+ "10: move.l %3,%0\n" \
+ " sub."#bwl" %1,%1\n" \
+ " jra 2b\n" \
+ " .previous\n" \
+ "\n" \
+ " .section __ex_table,\"a\"\n" \
+ " .align 4\n" \
+ " .long 1b,10b\n" \
+ " .previous" \
+ : "+d" (res), "=&" #reg (__gu_val) \
+ : "m" (*(ptr)), "i" (err)); \
+ (x) = (typeof(*(ptr)))(long)__gu_val; \
})
-#define __get_user(x, ptr) get_user(x, ptr)
-extern int __get_user_bad(void);
+#define __get_user(x, ptr) \
+({ \
+ int __gu_err = 0; \
+ __chk_user_ptr(ptr); \
+ switch (sizeof(*(ptr))) { \
+ case 1: \
+ __get_user_asm(__gu_err, x, ptr, u8, b, d, -EFAULT); \
+ break; \
+ case 2: \
+ __get_user_asm(__gu_err, x, ptr, u16, w, d, -EFAULT); \
+ break; \
+ case 4: \
+ __get_user_asm(__gu_err, x, ptr, u32, l, r, -EFAULT); \
+ break; \
+/* case 8: disabled because gcc-4.1 has a broken typeof \
+ { \
+ const void *__gu_ptr = (ptr); \
+ u64 __gu_val; \
+ asm volatile ("\n" \
+ "1: moves.l (%2)+,%1\n" \
+ "2: moves.l (%2),%R1\n" \
+ "3:\n" \
+ " .section .fixup,\"ax\"\n" \
+ " .even\n" \
+ "10: move.l %3,%0\n" \
+ " sub.l %1,%1\n" \
+ " sub.l %R1,%R1\n" \
+ " jra 3b\n" \
+ " .previous\n" \
+ "\n" \
+ " .section __ex_table,\"a\"\n" \
+ " .align 4\n" \
+ " .long 1b,10b\n" \
+ " .long 2b,10b\n" \
+ " .previous" \
+ : "+d" (__gu_err), "=&r" (__gu_val), \
+ "+a" (__gu_ptr) \
+ : "i" (-EFAULT) \
+ : "memory"); \
+ (x) = (typeof(*(ptr)))__gu_val; \
+ break; \
+ } */ \
+ default: \
+ __gu_err = __get_user_bad(); \
+ break; \
+ } \
+ __gu_err; \
+})
+#define get_user(x, ptr) __get_user(x, ptr)
-#define __get_user_asm(err,x,ptr,bwl,reg) \
-__asm__ __volatile__ \
- ("1: moves" #bwl " %2,%1\n" \
- "2:\n" \
- ".section .fixup,\"ax\"\n" \
- " .even\n" \
- "3: movel %3,%0\n" \
- " sub" #bwl " %1,%1\n" \
- " jra 2b\n" \
- ".previous\n" \
- ".section __ex_table,\"a\"\n" \
- " .align 4\n" \
- " .long 1b,3b\n" \
- ".previous" \
- : "=d"(err), reg(x) \
- : "m"(*(ptr)), "i" (-EFAULT), "0"(0))
+unsigned long __generic_copy_from_user(void *to, const void __user *from, unsigned long n);
+unsigned long __generic_copy_to_user(void __user *to, const void *from, unsigned long n);
-static inline unsigned long
-__generic_copy_from_user(void *to, const void __user *from, unsigned long n)
-{
- unsigned long tmp;
- __asm__ __volatile__
- (" tstl %2\n"
- " jeq 2f\n"
- "1: movesl (%1)+,%3\n"
- " movel %3,(%0)+\n"
- " subql #1,%2\n"
- " jne 1b\n"
- "2: movel %4,%2\n"
- " bclr #1,%2\n"
- " jeq 4f\n"
- "3: movesw (%1)+,%3\n"
- " movew %3,(%0)+\n"
- "4: bclr #0,%2\n"
- " jeq 6f\n"
- "5: movesb (%1)+,%3\n"
- " moveb %3,(%0)+\n"
- "6:\n"
- ".section .fixup,\"ax\"\n"
- " .even\n"
- "7: movel %2,%%d0\n"
- "71:clrl (%0)+\n"
- " subql #1,%%d0\n"
- " jne 71b\n"
- " lsll #2,%2\n"
- " addl %4,%2\n"
- " btst #1,%4\n"
- " jne 81f\n"
- " btst #0,%4\n"
- " jne 91f\n"
- " jra 6b\n"
- "8: addql #2,%2\n"
- "81:clrw (%0)+\n"
- " btst #0,%4\n"
- " jne 91f\n"
- " jra 6b\n"
- "9: addql #1,%2\n"
- "91:clrb (%0)+\n"
- " jra 6b\n"
- ".previous\n"
- ".section __ex_table,\"a\"\n"
- " .align 4\n"
- " .long 1b,7b\n"
- " .long 3b,8b\n"
- " .long 5b,9b\n"
- ".previous"
- : "=a"(to), "=a"(from), "=d"(n), "=&d"(tmp)
- : "d"(n & 3), "0"(to), "1"(from), "2"(n/4)
- : "d0", "memory");
- return n;
-}
-
-static inline unsigned long
-__generic_copy_to_user(void __user *to, const void *from, unsigned long n)
+static __always_inline unsigned long
+__constant_copy_from_user(void *to, const void __user *from, unsigned long n)
{
- unsigned long tmp;
- __asm__ __volatile__
- (" tstl %2\n"
- " jeq 3f\n"
- "1: movel (%1)+,%3\n"
- "22:movesl %3,(%0)+\n"
- "2: subql #1,%2\n"
- " jne 1b\n"
- "3: movel %4,%2\n"
- " bclr #1,%2\n"
- " jeq 4f\n"
- " movew (%1)+,%3\n"
- "24:movesw %3,(%0)+\n"
- "4: bclr #0,%2\n"
- " jeq 5f\n"
- " moveb (%1)+,%3\n"
- "25:movesb %3,(%0)+\n"
- "5:\n"
- ".section .fixup,\"ax\"\n"
- " .even\n"
- "60:addql #1,%2\n"
- "6: lsll #2,%2\n"
- " addl %4,%2\n"
- " jra 5b\n"
- "7: addql #2,%2\n"
- " jra 5b\n"
- "8: addql #1,%2\n"
- " jra 5b\n"
- ".previous\n"
- ".section __ex_table,\"a\"\n"
- " .align 4\n"
- " .long 1b,60b\n"
- " .long 22b,6b\n"
- " .long 2b,6b\n"
- " .long 24b,7b\n"
- " .long 3b,60b\n"
- " .long 4b,7b\n"
- " .long 25b,8b\n"
- " .long 5b,8b\n"
- ".previous"
- : "=a"(to), "=a"(from), "=d"(n), "=&d"(tmp)
- : "r"(n & 3), "0"(to), "1"(from), "2"(n / 4)
- : "memory");
- return n;
-}
+ unsigned long res = 0, tmp;
-#define __copy_from_user_big(to, from, n, fixup, copy) \
- __asm__ __volatile__ \
- ("10: movesl (%1)+,%%d0\n" \
- " movel %%d0,(%0)+\n" \
- " subql #1,%2\n" \
- " jne 10b\n" \
- ".section .fixup,\"ax\"\n" \
- " .even\n" \
- "11: movel %2,%%d0\n" \
- "13: clrl (%0)+\n" \
- " subql #1,%%d0\n" \
- " jne 13b\n" \
- " lsll #2,%2\n" \
- fixup "\n" \
- " jra 12f\n" \
- ".previous\n" \
- ".section __ex_table,\"a\"\n" \
- " .align 4\n" \
- " .long 10b,11b\n" \
- ".previous\n" \
- copy "\n" \
- "12:" \
- : "=a"(to), "=a"(from), "=d"(n) \
- : "0"(to), "1"(from), "2"(n/4) \
- : "d0", "memory")
+ /* limit the inlined version to 3 moves */
+ if (n == 11 || n > 12)
+ return __generic_copy_from_user(to, from, n);
-static inline unsigned long
-__constant_copy_from_user(void *to, const void __user *from, unsigned long n)
-{
- switch (n) {
- case 0:
- break;
- case 1:
- __asm__ __volatile__
- ("1: movesb (%1)+,%%d0\n"
- " moveb %%d0,(%0)+\n"
- "2:\n"
- ".section .fixup,\"ax\"\n"
- " .even\n"
- "3: addql #1,%2\n"
- " clrb (%0)+\n"
- " jra 2b\n"
- ".previous\n"
- ".section __ex_table,\"a\"\n"
- " .align 4\n"
- " .long 1b,3b\n"
- ".previous"
- : "=a"(to), "=a"(from), "=d"(n)
- : "0"(to), "1"(from), "2"(0)
- : "d0", "memory");
- break;
- case 2:
- __asm__ __volatile__
- ("1: movesw (%1)+,%%d0\n"
- " movew %%d0,(%0)+\n"
- "2:\n"
- ".section .fixup,\"ax\"\n"
- " .even\n"
- "3: addql #2,%2\n"
- " clrw (%0)+\n"
- " jra 2b\n"
- ".previous\n"
- ".section __ex_table,\"a\"\n"
- " .align 4\n"
- " .long 1b,3b\n"
- ".previous"
- : "=a"(to), "=a"(from), "=d"(n)
- : "0"(to), "1"(from), "2"(0)
- : "d0", "memory");
- break;
- case 3:
- __asm__ __volatile__
- ("1: movesw (%1)+,%%d0\n"
- " movew %%d0,(%0)+\n"
- "2: movesb (%1)+,%%d0\n"
- " moveb %%d0,(%0)+\n"
- "3:"
- ".section .fixup,\"ax\"\n"
- " .even\n"
- "4: addql #2,%2\n"
- " clrw (%0)+\n"
- "5: addql #1,%2\n"
- " clrb (%0)+\n"
- " jra 3b\n"
- ".previous\n"
- ".section __ex_table,\"a\"\n"
- " .align 4\n"
- " .long 1b,4b\n"
- " .long 2b,5b\n"
- ".previous"
- : "=a"(to), "=a"(from), "=d"(n)
- : "0"(to), "1"(from), "2"(0)
- : "d0", "memory");
- break;
- case 4:
- __asm__ __volatile__
- ("1: movesl (%1)+,%%d0\n"
- " movel %%d0,(%0)+\n"
- "2:"
- ".section .fixup,\"ax\"\n"
- " .even\n"
- "3: addql #4,%2\n"
- " clrl (%0)+\n"
- " jra 2b\n"
- ".previous\n"
- ".section __ex_table,\"a\"\n"
- " .align 4\n"
- " .long 1b,3b\n"
- ".previous"
- : "=a"(to), "=a"(from), "=d"(n)
- : "0"(to), "1"(from), "2"(0)
- : "d0", "memory");
- break;
- case 8:
- __asm__ __volatile__
- ("1: movesl (%1)+,%%d0\n"
- " movel %%d0,(%0)+\n"
- "2: movesl (%1)+,%%d0\n"
- " movel %%d0,(%0)+\n"
- "3:"
- ".section .fixup,\"ax\"\n"
- " .even\n"
- "4: addql #4,%2\n"
- " clrl (%0)+\n"
- "5: addql #4,%2\n"
- " clrl (%0)+\n"
- " jra 3b\n"
- ".previous\n"
- ".section __ex_table,\"a\"\n"
- " .align 4\n"
- " .long 1b,4b\n"
- " .long 2b,5b\n"
- ".previous"
- : "=a"(to), "=a"(from), "=d"(n)
- : "0"(to), "1"(from), "2"(0)
- : "d0", "memory");
- break;
- case 12:
- __asm__ __volatile__
- ("1: movesl (%1)+,%%d0\n"
- " movel %%d0,(%0)+\n"
- "2: movesl (%1)+,%%d0\n"
- " movel %%d0,(%0)+\n"
- "3: movesl (%1)+,%%d0\n"
- " movel %%d0,(%0)+\n"
- "4:"
- ".section .fixup,\"ax\"\n"
- " .even\n"
- "5: addql #4,%2\n"
- " clrl (%0)+\n"
- "6: addql #4,%2\n"
- " clrl (%0)+\n"
- "7: addql #4,%2\n"
- " clrl (%0)+\n"
- " jra 4b\n"
- ".previous\n"
- ".section __ex_table,\"a\"\n"
- " .align 4\n"
- " .long 1b,5b\n"
- " .long 2b,6b\n"
- " .long 3b,7b\n"
- ".previous"
- : "=a"(to), "=a"(from), "=d"(n)
- : "0"(to), "1"(from), "2"(0)
- : "d0", "memory");
- break;
- case 16:
- __asm__ __volatile__
- ("1: movesl (%1)+,%%d0\n"
- " movel %%d0,(%0)+\n"
- "2: movesl (%1)+,%%d0\n"
- " movel %%d0,(%0)+\n"
- "3: movesl (%1)+,%%d0\n"
- " movel %%d0,(%0)+\n"
- "4: movesl (%1)+,%%d0\n"
- " movel %%d0,(%0)+\n"
- "5:"
- ".section .fixup,\"ax\"\n"
- " .even\n"
- "6: addql #4,%2\n"
- " clrl (%0)+\n"
- "7: addql #4,%2\n"
- " clrl (%0)+\n"
- "8: addql #4,%2\n"
- " clrl (%0)+\n"
- "9: addql #4,%2\n"
- " clrl (%0)+\n"
- " jra 5b\n"
- ".previous\n"
- ".section __ex_table,\"a\"\n"
- " .align 4\n"
- " .long 1b,6b\n"
- " .long 2b,7b\n"
- " .long 3b,8b\n"
- " .long 4b,9b\n"
- ".previous"
- : "=a"(to), "=a"(from), "=d"(n)
- : "0"(to), "1"(from), "2"(0)
- : "d0", "memory");
- break;
- default:
- switch (n & 3) {
- case 0:
- __copy_from_user_big(to, from, n, "", "");
- break;
+ switch (n) {
case 1:
- __copy_from_user_big(to, from, n,
- /* fixup */
- "1: addql #1,%2\n"
- " clrb (%0)+",
- /* copy */
- "2: movesb (%1)+,%%d0\n"
- " moveb %%d0,(%0)+\n"
- ".section __ex_table,\"a\"\n"
- " .long 2b,1b\n"
- ".previous");
- break;
+ __get_user_asm(res, *(u8 *)to, (u8 *)from, u8, b, d, 1);
+ return res;
case 2:
- __copy_from_user_big(to, from, n,
- /* fixup */
- "1: addql #2,%2\n"
- " clrw (%0)+",
- /* copy */
- "2: movesw (%1)+,%%d0\n"
- " movew %%d0,(%0)+\n"
- ".section __ex_table,\"a\"\n"
- " .long 2b,1b\n"
- ".previous");
- break;
- case 3:
- __copy_from_user_big(to, from, n,
- /* fixup */
- "1: addql #2,%2\n"
- " clrw (%0)+\n"
- "2: addql #1,%2\n"
- " clrb (%0)+",
- /* copy */
- "3: movesw (%1)+,%%d0\n"
- " movew %%d0,(%0)+\n"
- "4: movesb (%1)+,%%d0\n"
- " moveb %%d0,(%0)+\n"
- ".section __ex_table,\"a\"\n"
- " .long 3b,1b\n"
- " .long 4b,2b\n"
- ".previous");
- break;
+ __get_user_asm(res, *(u16 *)to, (u16 *)from, u16, w, d, 2);
+ return res;
+ case 4:
+ __get_user_asm(res, *(u32 *)to, (u32 *)from, u32, l, r, 4);
+ return res;
}
- break;
- }
- return n;
-}
-#define __copy_to_user_big(to, from, n, fixup, copy) \
- __asm__ __volatile__ \
- ("10: movel (%1)+,%%d0\n" \
- "31: movesl %%d0,(%0)+\n" \
- "11: subql #1,%2\n" \
- " jne 10b\n" \
- "41:\n" \
- ".section .fixup,\"ax\"\n" \
- " .even\n" \
- "22: addql #1,%2\n" \
- "12: lsll #2,%2\n" \
- fixup "\n" \
- " jra 13f\n" \
- ".previous\n" \
- ".section __ex_table,\"a\"\n" \
- " .align 4\n" \
- " .long 10b,22b\n" \
- " .long 31b,12b\n" \
- " .long 11b,12b\n" \
- " .long 41b,22b\n" \
- ".previous\n" \
- copy "\n" \
- "13:" \
- : "=a"(to), "=a"(from), "=d"(n) \
- : "0"(to), "1"(from), "2"(n/4) \
- : "d0", "memory")
+ asm volatile ("\n"
+ " .ifndef .Lfrom_user\n"
+ " .set .Lfrom_user,1\n"
+ " .macro copy_from_user to,from,tmp\n"
+ " .if .Lcnt >= 4\n"
+ "1: moves.l (\\from)+,\\tmp\n"
+ " move.l \\tmp,(\\to)+\n"
+ " .set .Lcnt,.Lcnt-4\n"
+ " .elseif .Lcnt & 2\n"
+ "1: moves.w (\\from)+,\\tmp\n"
+ " move.w \\tmp,(\\to)+\n"
+ " .set .Lcnt,.Lcnt-2\n"
+ " .elseif .Lcnt & 1\n"
+ "1: moves.b (\\from)+,\\tmp\n"
+ " move.b \\tmp,(\\to)+\n"
+ " .set .Lcnt,.Lcnt-1\n"
+ " .else\n"
+ " .exitm\n"
+ " .endif\n"
+ "\n"
+ " .section __ex_table,\"a\"\n"
+ " .align 4\n"
+ " .long 1b,3f\n"
+ " .previous\n"
+ " .endm\n"
+ " .endif\n"
+ "\n"
+ " .set .Lcnt,%c4\n"
+ " copy_from_user %1,%2,%3\n"
+ " copy_from_user %1,%2,%3\n"
+ " copy_from_user %1,%2,%3\n"
+ "2:\n"
+ " .section .fixup,\"ax\"\n"
+ " .even\n"
+ "3: moveq.l %4,%0\n"
+ " move.l %5,%1\n"
+ " .rept %c4 / 4\n"
+ " clr.l (%1)+\n"
+ " .endr\n"
+ " .if %c4 & 2\n"
+ " clr.w (%1)+\n"
+ " .endif\n"
+ " .if %c4 & 1\n"
+ " clr.b (%1)+\n"
+ " .endif\n"
+ " jra 2b\n"
+ " .previous\n"
+ : "+r" (res), "+a" (to), "+a" (from), "=&d" (tmp)
+ : "i" (n), "g" (to)
+ : "memory");
-#define __copy_to_user_inatomic __copy_to_user
-#define __copy_from_user_inatomic __copy_from_user
+ return res;
+}
-static inline unsigned long
+static __always_inline unsigned long
__constant_copy_to_user(void __user *to, const void *from, unsigned long n)
{
- switch (n) {
- case 0:
- break;
- case 1:
- __asm__ __volatile__
- (" moveb (%1)+,%%d0\n"
- "21:movesb %%d0,(%0)+\n"
- "1:\n"
- ".section .fixup,\"ax\"\n"
- " .even\n"
- "2: addql #1,%2\n"
- " jra 1b\n"
- ".previous\n"
- ".section __ex_table,\"a\"\n"
- " .align 4\n "
- " .long 21b,2b\n"
- " .long 1b,2b\n"
- ".previous"
- : "=a"(to), "=a"(from), "=d"(n)
- : "0"(to), "1"(from), "2"(0)
- : "d0", "memory");
- break;
- case 2:
- __asm__ __volatile__
- (" movew (%1)+,%%d0\n"
- "21:movesw %%d0,(%0)+\n"
- "1:\n"
- ".section .fixup,\"ax\"\n"
- " .even\n"
- "2: addql #2,%2\n"
- " jra 1b\n"
- ".previous\n"
- ".section __ex_table,\"a\"\n"
- " .align 4\n"
- " .long 21b,2b\n"
- " .long 1b,2b\n"
- ".previous"
- : "=a"(to), "=a"(from), "=d"(n)
- : "0"(to), "1"(from), "2"(0)
- : "d0", "memory");
- break;
- case 3:
- __asm__ __volatile__
- (" movew (%1)+,%%d0\n"
- "21:movesw %%d0,(%0)+\n"
- "1: moveb (%1)+,%%d0\n"
- "22:movesb %%d0,(%0)+\n"
- "2:\n"
- ".section .fixup,\"ax\"\n"
- " .even\n"
- "3: addql #2,%2\n"
- "4: addql #1,%2\n"
- " jra 2b\n"
- ".previous\n"
- ".section __ex_table,\"a\"\n"
- " .align 4\n"
- " .long 21b,3b\n"
- " .long 1b,3b\n"
- " .long 22b,4b\n"
- " .long 2b,4b\n"
- ".previous"
- : "=a"(to), "=a"(from), "=d"(n)
- : "0"(to), "1"(from), "2"(0)
- : "d0", "memory");
- break;
- case 4:
- __asm__ __volatile__
- (" movel (%1)+,%%d0\n"
- "21:movesl %%d0,(%0)+\n"
- "1:\n"
- ".section .fixup,\"ax\"\n"
- " .even\n"
- "2: addql #4,%2\n"
- " jra 1b\n"
- ".previous\n"
- ".section __ex_table,\"a\"\n"
- " .align 4\n"
- " .long 21b,2b\n"
- " .long 1b,2b\n"
- ".previous"
- : "=a"(to), "=a"(from), "=d"(n)
- : "0"(to), "1"(from), "2"(0)
- : "d0", "memory");
- break;
- case 8:
- __asm__ __volatile__
- (" movel (%1)+,%%d0\n"
- "21:movesl %%d0,(%0)+\n"
- "1: movel (%1)+,%%d0\n"
- "22:movesl %%d0,(%0)+\n"
- "2:\n"
- ".section .fixup,\"ax\"\n"
- " .even\n"
- "3: addql #4,%2\n"
- "4: addql #4,%2\n"
- " jra 2b\n"
- ".previous\n"
- ".section __ex_table,\"a\"\n"
- " .align 4\n"
- " .long 21b,3b\n"
- " .long 1b,3b\n"
- " .long 22b,4b\n"
- " .long 2b,4b\n"
- ".previous"
- : "=a"(to), "=a"(from), "=d"(n)
- : "0"(to), "1"(from), "2"(0)
- : "d0", "memory");
- break;
- case 12:
- __asm__ __volatile__
- (" movel (%1)+,%%d0\n"
- "21:movesl %%d0,(%0)+\n"
- "1: movel (%1)+,%%d0\n"
- "22:movesl %%d0,(%0)+\n"
- "2: movel (%1)+,%%d0\n"
- "23:movesl %%d0,(%0)+\n"
- "3:\n"
- ".section .fixup,\"ax\"\n"
- " .even\n"
- "4: addql #4,%2\n"
- "5: addql #4,%2\n"
- "6: addql #4,%2\n"
- " jra 3b\n"
- ".previous\n"
- ".section __ex_table,\"a\"\n"
- " .align 4\n"
- " .long 21b,4b\n"
- " .long 1b,4b\n"
- " .long 22b,5b\n"
- " .long 2b,5b\n"
- " .long 23b,6b\n"
- " .long 3b,6b\n"
- ".previous"
- : "=a"(to), "=a"(from), "=d"(n)
- : "0"(to), "1"(from), "2"(0)
- : "d0", "memory");
- break;
- case 16:
- __asm__ __volatile__
- (" movel (%1)+,%%d0\n"
- "21:movesl %%d0,(%0)+\n"
- "1: movel (%1)+,%%d0\n"
- "22:movesl %%d0,(%0)+\n"
- "2: movel (%1)+,%%d0\n"
- "23:movesl %%d0,(%0)+\n"
- "3: movel (%1)+,%%d0\n"
- "24:movesl %%d0,(%0)+\n"
- "4:"
- ".section .fixup,\"ax\"\n"
- " .even\n"
- "5: addql #4,%2\n"
- "6: addql #4,%2\n"
- "7: addql #4,%2\n"
- "8: addql #4,%2\n"
- " jra 4b\n"
- ".previous\n"
- ".section __ex_table,\"a\"\n"
- " .align 4\n"
- " .long 21b,5b\n"
- " .long 1b,5b\n"
- " .long 22b,6b\n"
- " .long 2b,6b\n"
- " .long 23b,7b\n"
- " .long 3b,7b\n"
- " .long 24b,8b\n"
- " .long 4b,8b\n"
- ".previous"
- : "=a"(to), "=a"(from), "=d"(n)
- : "0"(to), "1"(from), "2"(0)
- : "d0", "memory");
- break;
- default:
- switch (n & 3) {
- case 0:
- __copy_to_user_big(to, from, n, "", "");
- break;
+ unsigned long res = 0, tmp;
+
+ /* limit the inlined version to 3 moves */
+ if (n == 11 || n > 12)
+ return __generic_copy_to_user(to, from, n);
+
+ switch (n) {
case 1:
- __copy_to_user_big(to, from, n,
- /* fixup */
- "1: addql #1,%2",
- /* copy */
- " moveb (%1)+,%%d0\n"
- "22:movesb %%d0,(%0)+\n"
- "2:"
- ".section __ex_table,\"a\"\n"
- " .long 22b,1b\n"
- " .long 2b,1b\n"
- ".previous");
- break;
+ __put_user_asm(res, *(u8 *)from, (u8 *)to, b, d, 1);
+ return res;
case 2:
- __copy_to_user_big(to, from, n,
- /* fixup */
- "1: addql #2,%2",
- /* copy */
- " movew (%1)+,%%d0\n"
- "22:movesw %%d0,(%0)+\n"
- "2:"
- ".section __ex_table,\"a\"\n"
- " .long 22b,1b\n"
- " .long 2b,1b\n"
- ".previous");
- break;
- case 3:
- __copy_to_user_big(to, from, n,
- /* fixup */
- "1: addql #2,%2\n"
- "2: addql #1,%2",
- /* copy */
- " movew (%1)+,%%d0\n"
- "23:movesw %%d0,(%0)+\n"
- "3: moveb (%1)+,%%d0\n"
- "24:movesb %%d0,(%0)+\n"
- "4:"
- ".section __ex_table,\"a\"\n"
- " .long 23b,1b\n"
- " .long 3b,1b\n"
- " .long 24b,2b\n"
- " .long 4b,2b\n"
- ".previous");
- break;
+ __put_user_asm(res, *(u16 *)from, (u16 *)to, w, d, 2);
+ return res;
+ case 4:
+ __put_user_asm(res, *(u32 *)from, (u32 *)to, l, r, 4);
+ return res;
}
- break;
- }
- return n;
+
+ asm volatile ("\n"
+ " .ifndef .Lto_user\n"
+ " .set .Lto_user,1\n"
+ " .macro copy_to_user to,from,tmp\n"
+ " .if .Lcnt >= 4\n"
+ " move.l (\\from)+,\\tmp\n"
+ "11: moves.l \\tmp,(\\to)+\n"
+ "12: .set .Lcnt,.Lcnt-4\n"
+ " .elseif .Lcnt & 2\n"
+ " move.w (\\from)+,\\tmp\n"
+ "11: moves.w \\tmp,(\\to)+\n"
+ "12: .set .Lcnt,.Lcnt-2\n"
+ " .elseif .Lcnt & 1\n"
+ " move.b (\\from)+,\\tmp\n"
+ "11: moves.b \\tmp,(\\to)+\n"
+ "12: .set .Lcnt,.Lcnt-1\n"
+ " .else\n"
+ " .exitm\n"
+ " .endif\n"
+ "\n"
+ " .section __ex_table,\"a\"\n"
+ " .align 4\n"
+ " .long 11b,3f\n"
+ " .long 12b,3f\n"
+ " .previous\n"
+ " .endm\n"
+ " .endif\n"
+ "\n"
+ " .set .Lcnt,%c4\n"
+ " copy_to_user %1,%2,%3\n"
+ " copy_to_user %1,%2,%3\n"
+ " copy_to_user %1,%2,%3\n"
+ "2:\n"
+ " .section .fixup,\"ax\"\n"
+ " .even\n"
+ "3: moveq.l %4,%0\n"
+ " jra 2b\n"
+ " .previous\n"
+ : "+r" (res), "+a" (to), "+a" (from), "=&d" (tmp)
+ : "i" (n)
+ : "memory");
+
+ return res;
}
-#define copy_from_user(to, from, n) \
+#define __copy_from_user(to, from, n) \
(__builtin_constant_p(n) ? \
__constant_copy_from_user(to, from, n) : \
__generic_copy_from_user(to, from, n))
-#define copy_to_user(to, from, n) \
+#define __copy_to_user(to, from, n) \
(__builtin_constant_p(n) ? \
__constant_copy_to_user(to, from, n) : \
__generic_copy_to_user(to, from, n))
-#define __copy_from_user(to, from, n) copy_from_user(to, from, n)
-#define __copy_to_user(to, from, n) copy_to_user(to, from, n)
+#define __copy_to_user_inatomic __copy_to_user
+#define __copy_from_user_inatomic __copy_from_user
-/*
- * Copy a null terminated string from userspace.
- */
+#define copy_from_user(to, from, n) __copy_from_user(to, from, n)
+#define copy_to_user(to, from, n) __copy_to_user(to, from, n)
-static inline long
-strncpy_from_user(char *dst, const char __user *src, long count)
-{
- long res;
- if (count == 0) return count;
- __asm__ __volatile__
- ("1: movesb (%2)+,%%d0\n"
- "12:moveb %%d0,(%1)+\n"
- " jeq 2f\n"
- " subql #1,%3\n"
- " jne 1b\n"
- "2: subl %3,%0\n"
- "3:\n"
- ".section .fixup,\"ax\"\n"
- " .even\n"
- "4: movel %4,%0\n"
- " jra 3b\n"
- ".previous\n"
- ".section __ex_table,\"a\"\n"
- " .align 4\n"
- " .long 1b,4b\n"
- " .long 12b,4b\n"
- ".previous"
- : "=d"(res), "=a"(dst), "=a"(src), "=d"(count)
- : "i"(-EFAULT), "0"(count), "1"(dst), "2"(src), "3"(count)
- : "d0", "memory");
- return res;
-}
-
-/*
- * Return the size of a string (including the ending 0)
- *
- * Return 0 on exception, a value greater than N if too long
- */
-static inline long strnlen_user(const char __user *src, long n)
-{
- long res;
-
- res = -(unsigned long)src;
- __asm__ __volatile__
- ("1:\n"
- " tstl %2\n"
- " jeq 3f\n"
- "2: movesb (%1)+,%%d0\n"
- "22:\n"
- " subql #1,%2\n"
- " tstb %%d0\n"
- " jne 1b\n"
- " jra 4f\n"
- "3:\n"
- " addql #1,%0\n"
- "4:\n"
- " addl %1,%0\n"
- "5:\n"
- ".section .fixup,\"ax\"\n"
- " .even\n"
- "6: moveq %3,%0\n"
- " jra 5b\n"
- ".previous\n"
- ".section __ex_table,\"a\"\n"
- " .align 4\n"
- " .long 2b,6b\n"
- " .long 22b,6b\n"
- ".previous"
- : "=d"(res), "=a"(src), "=d"(n)
- : "i"(0), "0"(res), "1"(src), "2"(n)
- : "d0");
- return res;
-}
+long strncpy_from_user(char *dst, const char __user *src, long count);
+long strnlen_user(const char __user *src, long n);
+unsigned long clear_user(void __user *to, unsigned long n);
#define strlen_user(str) strnlen_user(str, 32767)
-/*
- * Zero Userspace
- */
-
-static inline unsigned long
-clear_user(void __user *to, unsigned long n)
-{
- __asm__ __volatile__
- (" tstl %1\n"
- " jeq 3f\n"
- "1: movesl %3,(%0)+\n"
- "2: subql #1,%1\n"
- " jne 1b\n"
- "3: movel %2,%1\n"
- " bclr #1,%1\n"
- " jeq 4f\n"
- "24:movesw %3,(%0)+\n"
- "4: bclr #0,%1\n"
- " jeq 5f\n"
- "25:movesb %3,(%0)+\n"
- "5:\n"
- ".section .fixup,\"ax\"\n"
- " .even\n"
- "61:addql #1,%1\n"
- "6: lsll #2,%1\n"
- " addl %2,%1\n"
- " jra 5b\n"
- "7: addql #2,%1\n"
- " jra 5b\n"
- "8: addql #1,%1\n"
- " jra 5b\n"
- ".previous\n"
- ".section __ex_table,\"a\"\n"
- " .align 4\n"
- " .long 1b,61b\n"
- " .long 2b,6b\n"
- " .long 3b,61b\n"
- " .long 24b,7b\n"
- " .long 4b,7b\n"
- " .long 25b,8b\n"
- " .long 5b,8b\n"
- ".previous"
- : "=a"(to), "=d"(n)
- : "r"(n & 3), "r"(0), "0"(to), "1"(n/4));
- return n;
-}
-
#endif /* _M68K_UACCESS_H */
diff --git a/include/asm-m68k/unistd.h b/include/asm-m68k/unistd.h
index f236fe92156f..7c0b6296b45c 100644
--- a/include/asm-m68k/unistd.h
+++ b/include/asm-m68k/unistd.h
@@ -410,46 +410,7 @@ __syscall_return(type,__res); \
#ifdef __KERNEL_SYSCALLS__
-#include <linux/compiler.h>
-#include <linux/interrupt.h>
-#include <linux/types.h>
-
-/*
- * we need this inline - forking from kernel space will result
- * in NO COPY ON WRITE (!!!), until an execve is executed. This
- * is no problem, but for the stack. This is handled by not letting
- * main() use the stack at all after fork(). Thus, no function
- * calls - which means inline code for fork too, as otherwise we
- * would use the stack upon exit from 'fork()'.
- *
- * Actually only pause and fork are needed inline, so that there
- * won't be any messing with the stack from main(), but we define
- * some others too.
- */
-#define __NR__exit __NR_exit
-static inline _syscall0(pid_t,setsid)
-static inline _syscall3(int,write,int,fd,const char *,buf,off_t,count)
-static inline _syscall3(int,read,int,fd,char *,buf,off_t,count)
-static inline _syscall3(off_t,lseek,int,fd,off_t,offset,int,count)
-static inline _syscall1(int,dup,int,fd)
static inline _syscall3(int,execve,const char *,file,char **,argv,char **,envp)
-static inline _syscall3(int,open,const char *,file,int,flag,int,mode)
-static inline _syscall1(int,close,int,fd)
-static inline _syscall1(int,_exit,int,exitcode)
-static inline _syscall3(pid_t,waitpid,pid_t,pid,int *,wait_stat,int,options)
-
-asmlinkage long sys_mmap2(
- unsigned long addr, unsigned long len,
- unsigned long prot, unsigned long flags,
- unsigned long fd, unsigned long pgoff);
-asmlinkage int sys_execve(char *name, char **argv, char **envp);
-asmlinkage int sys_pipe(unsigned long *fildes);
-struct pt_regs;
-struct sigaction;
-asmlinkage long sys_rt_sigaction(int sig,
- const struct sigaction __user *act,
- struct sigaction __user *oact,
- size_t sigsetsize);
#endif /* __KERNEL_SYSCALLS__ */
diff --git a/include/asm-m68knommu/irq.h b/include/asm-m68knommu/irq.h
index 2b408842a30e..c5247516fcfe 100644
--- a/include/asm-m68knommu/irq.h
+++ b/include/asm-m68knommu/irq.h
@@ -87,8 +87,4 @@ extern void (*mach_disable_irq)(unsigned int);
#define disable_irq(x) do { } while (0)
#define disable_irq_nosync(x) disable_irq(x)
-struct irqaction;
-struct pt_regs;
-int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *);
-
#endif /* _M68K_IRQ_H_ */
diff --git a/include/asm-m68knommu/ptrace.h b/include/asm-m68knommu/ptrace.h
index f65bd90749e6..1e19c457de7d 100644
--- a/include/asm-m68knommu/ptrace.h
+++ b/include/asm-m68knommu/ptrace.h
@@ -70,7 +70,7 @@ struct switch_stack {
/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */
#define PTRACE_GETREGS 12
#define PTRACE_SETREGS 13
-#ifdef COFNIG_FPU
+#ifdef CONFIG_FPU
#define PTRACE_GETFPREGS 14
#define PTRACE_SETFPREGS 15
#endif
diff --git a/include/asm-mips/mach-au1x00/au1xxx_psc.h b/include/asm-mips/mach-au1x00/au1xxx_psc.h
index 5c3e2a38ce12..d7cbacdd21fe 100644
--- a/include/asm-mips/mach-au1x00/au1xxx_psc.h
+++ b/include/asm-mips/mach-au1x00/au1xxx_psc.h
@@ -39,7 +39,12 @@
#define PSC0_BASE_ADDR 0xb1a00000
#define PSC1_BASE_ADDR 0xb1b00000
#define PSC2_BASE_ADDR 0xb0a00000
-#define PSC3_BASE_ADDR 0xb0d00000
+#define PSC3_BASE_ADDR 0xb0b00000
+#endif
+
+#ifdef CONFIG_SOC_AU1200
+#define PSC0_BASE_ADDR 0xb1a00000
+#define PSC1_BASE_ADDR 0xb1b00000
#endif
/* The PSC select and control registers are common to
@@ -227,6 +232,8 @@ typedef struct psc_i2s {
#define PSC_I2SCFG_DD_DISABLE (1 << 27)
#define PSC_I2SCFG_DE_ENABLE (1 << 26)
#define PSC_I2SCFG_SET_WS(x) (((((x) / 2) - 1) & 0x7f) << 16)
+#define PSC_I2SCFG_WS(n) ((n & 0xFF) << 16)
+#define PSC_I2SCFG_WS_MASK (PSC_I2SCFG_WS(0x3F))
#define PSC_I2SCFG_WI (1 << 15)
#define PSC_I2SCFG_DIV_MASK (3 << 13)
diff --git a/include/asm-mips/mach-db1x00/db1x00.h b/include/asm-mips/mach-db1x00/db1x00.h
index 8fbb4b42a8b5..0f5f4c29f4e8 100644
--- a/include/asm-mips/mach-db1x00/db1x00.h
+++ b/include/asm-mips/mach-db1x00/db1x00.h
@@ -30,8 +30,20 @@
#ifdef CONFIG_MIPS_DB1550
+
+#define DBDMA_AC97_TX_CHAN DSCR_CMD0_PSC1_TX
+#define DBDMA_AC97_RX_CHAN DSCR_CMD0_PSC1_RX
+#define DBDMA_I2S_TX_CHAN DSCR_CMD0_PSC3_TX
+#define DBDMA_I2S_RX_CHAN DSCR_CMD0_PSC3_RX
+
+#define SPI_PSC_BASE PSC0_BASE_ADDR
+#define AC97_PSC_BASE PSC1_BASE_ADDR
+#define SMBUS_PSC_BASE PSC2_BASE_ADDR
+#define I2S_PSC_BASE PSC3_BASE_ADDR
+
#define BCSR_KSEG1_ADDR 0xAF000000
#define NAND_PHYS_ADDR 0x20000000
+
#else
#define BCSR_KSEG1_ADDR 0xAE000000
#endif
diff --git a/include/asm-mips/mmzone.h b/include/asm-mips/mmzone.h
index dc231c89bef9..f53ec54c92ff 100644
--- a/include/asm-mips/mmzone.h
+++ b/include/asm-mips/mmzone.h
@@ -10,7 +10,6 @@
#ifdef CONFIG_DISCONTIGMEM
-#define kvaddr_to_nid(kvaddr) pa_to_nid(__pa(kvaddr))
#define pfn_to_nid(pfn) pa_to_nid((pfn) << PAGE_SHIFT)
#endif /* CONFIG_DISCONTIGMEM */
diff --git a/include/asm-parisc/mmzone.h b/include/asm-parisc/mmzone.h
index ceb9b73199d1..c87813662d4d 100644
--- a/include/asm-parisc/mmzone.h
+++ b/include/asm-parisc/mmzone.h
@@ -14,11 +14,6 @@ extern struct node_map_data node_data[];
#define NODE_DATA(nid) (&node_data[nid].pg_data)
-/*
- * Given a kernel address, find the home node of the underlying memory.
- */
-#define kvaddr_to_nid(kaddr) pfn_to_nid(__pa(kaddr) >> PAGE_SHIFT)
-
#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
#define node_end_pfn(nid) \
({ \
diff --git a/include/asm-powerpc/bitops.h b/include/asm-powerpc/bitops.h
index d1c2a4405660..76e2f08c3c83 100644
--- a/include/asm-powerpc/bitops.h
+++ b/include/asm-powerpc/bitops.h
@@ -288,8 +288,8 @@ static __inline__ int test_le_bit(unsigned long nr,
#define __test_and_clear_le_bit(nr, addr) \
__test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
-#define find_first_zero_le_bit(addr, size) find_next_zero_le_bit((addr), (size), 0)
-unsigned long find_next_zero_le_bit(const unsigned long *addr,
+#define find_first_zero_le_bit(addr, size) generic_find_next_zero_le_bit((addr), (size), 0)
+unsigned long generic_find_next_zero_le_bit(const unsigned long *addr,
unsigned long size, unsigned long offset);
/* Bitmap functions for the ext2 filesystem */
@@ -309,7 +309,7 @@ unsigned long find_next_zero_le_bit(const unsigned long *addr,
#define ext2_find_first_zero_bit(addr, size) \
find_first_zero_le_bit((unsigned long*)addr, size)
#define ext2_find_next_zero_bit(addr, size, off) \
- find_next_zero_le_bit((unsigned long*)addr, size, off)
+ generic_find_next_zero_le_bit((unsigned long*)addr, size, off)
/* Bitmap functions for the minix filesystem. */
diff --git a/include/asm-powerpc/cputable.h b/include/asm-powerpc/cputable.h
index f6265c2a0dd2..fab41c280aa1 100644
--- a/include/asm-powerpc/cputable.h
+++ b/include/asm-powerpc/cputable.h
@@ -24,6 +24,9 @@
#define PPC_FEATURE_ICACHE_SNOOP 0x00002000
#define PPC_FEATURE_ARCH_2_05 0x00001000
+#define PPC_FEATURE_TRUE_LE 0x00000002
+#define PPC_FEATURE_PPC_LE 0x00000001
+
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
@@ -69,6 +72,13 @@ struct cpu_spec {
/* Processor specific oprofile operations */
enum powerpc_oprofile_type oprofile_type;
+ /* Bit locations inside the mmcra change */
+ unsigned long oprofile_mmcra_sihv;
+ unsigned long oprofile_mmcra_sipr;
+
+ /* Bits to clear during an oprofile exception */
+ unsigned long oprofile_mmcra_clear;
+
/* Name of processor class, for the ELF AT_PLATFORM entry */
char *platform;
};
@@ -104,6 +114,8 @@ extern void do_cpu_ftr_fixups(unsigned long offset);
#define CPU_FTR_NO_BTIC ASM_CONST(0x0000000000040000)
#define CPU_FTR_BIG_PHYS ASM_CONST(0x0000000000080000)
#define CPU_FTR_NODSISRALIGN ASM_CONST(0x0000000000100000)
+#define CPU_FTR_PPC_LE ASM_CONST(0x0000000000200000)
+#define CPU_FTR_REAL_LE ASM_CONST(0x0000000000400000)
#ifdef __powerpc64__
/* Add the 64b processor unique features in the top half of the word */
@@ -117,7 +129,6 @@ extern void do_cpu_ftr_fixups(unsigned long offset);
#define CPU_FTR_SMT ASM_CONST(0x0000010000000000)
#define CPU_FTR_COHERENT_ICACHE ASM_CONST(0x0000020000000000)
#define CPU_FTR_LOCKLESS_TLBIE ASM_CONST(0x0000040000000000)
-#define CPU_FTR_MMCRA_SIHV ASM_CONST(0x0000080000000000)
#define CPU_FTR_CI_LARGE_PAGE ASM_CONST(0x0000100000000000)
#define CPU_FTR_PAUSE_ZERO ASM_CONST(0x0000200000000000)
#define CPU_FTR_PURR ASM_CONST(0x0000400000000000)
@@ -134,7 +145,6 @@ extern void do_cpu_ftr_fixups(unsigned long offset);
#define CPU_FTR_SMT ASM_CONST(0x0)
#define CPU_FTR_COHERENT_ICACHE ASM_CONST(0x0)
#define CPU_FTR_LOCKLESS_TLBIE ASM_CONST(0x0)
-#define CPU_FTR_MMCRA_SIHV ASM_CONST(0x0)
#define CPU_FTR_CI_LARGE_PAGE ASM_CONST(0x0)
#define CPU_FTR_PURR ASM_CONST(0x0)
#endif
@@ -192,92 +202,95 @@ extern void do_cpu_ftr_fixups(unsigned long offset);
#define CPU_FTRS_PPC601 (CPU_FTR_COMMON | CPU_FTR_601 | CPU_FTR_HPTE_TABLE)
#define CPU_FTRS_603 (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \
CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \
- CPU_FTR_MAYBE_CAN_NAP)
+ CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE)
#define CPU_FTRS_604 (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \
- CPU_FTR_USE_TB | CPU_FTR_604_PERF_MON | CPU_FTR_HPTE_TABLE)
+ CPU_FTR_USE_TB | CPU_FTR_604_PERF_MON | CPU_FTR_HPTE_TABLE | \
+ CPU_FTR_PPC_LE)
#define CPU_FTRS_740_NOTAU (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \
CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \
- CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP)
+ CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE)
#define CPU_FTRS_740 (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \
CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \
- CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP)
+ CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | \
+ CPU_FTR_PPC_LE)
#define CPU_FTRS_750 (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \
CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \
- CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP)
+ CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | \
+ CPU_FTR_PPC_LE)
#define CPU_FTRS_750FX1 (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \
CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \
CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | \
- CPU_FTR_DUAL_PLL_750FX | CPU_FTR_NO_DPM)
+ CPU_FTR_DUAL_PLL_750FX | CPU_FTR_NO_DPM | CPU_FTR_PPC_LE)
#define CPU_FTRS_750FX2 (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \
CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \
CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | \
- CPU_FTR_NO_DPM)
+ CPU_FTR_NO_DPM | CPU_FTR_PPC_LE)
#define CPU_FTRS_750FX (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \
CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \
CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | \
- CPU_FTR_DUAL_PLL_750FX | CPU_FTR_HAS_HIGH_BATS)
+ CPU_FTR_DUAL_PLL_750FX | CPU_FTR_HAS_HIGH_BATS | CPU_FTR_PPC_LE)
#define CPU_FTRS_750GX (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | \
CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU | \
CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | \
- CPU_FTR_DUAL_PLL_750FX | CPU_FTR_HAS_HIGH_BATS)
+ CPU_FTR_DUAL_PLL_750FX | CPU_FTR_HAS_HIGH_BATS | CPU_FTR_PPC_LE)
#define CPU_FTRS_7400_NOTAU (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \
CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \
CPU_FTR_ALTIVEC_COMP | CPU_FTR_HPTE_TABLE | \
- CPU_FTR_MAYBE_CAN_NAP)
+ CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE)
#define CPU_FTRS_7400 (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \
CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \
CPU_FTR_TAU | CPU_FTR_ALTIVEC_COMP | CPU_FTR_HPTE_TABLE | \
- CPU_FTR_MAYBE_CAN_NAP)
+ CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE)
#define CPU_FTRS_7450_20 (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \
CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \
- CPU_FTR_NEED_COHERENT)
+ CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE)
#define CPU_FTRS_7450_21 (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \
CPU_FTR_USE_TB | \
CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \
CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_L3_DISABLE_NAP | \
- CPU_FTR_NEED_COHERENT)
+ CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE)
#define CPU_FTRS_7450_23 (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \
CPU_FTR_USE_TB | \
CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \
- CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_NEED_COHERENT)
+ CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE)
#define CPU_FTRS_7455_1 (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \
CPU_FTR_USE_TB | \
CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR | \
CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | CPU_FTR_HAS_HIGH_BATS | \
- CPU_FTR_NEED_COHERENT)
+ CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE)
#define CPU_FTRS_7455_20 (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \
CPU_FTR_USE_TB | \
CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \
CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_L3_DISABLE_NAP | \
- CPU_FTR_NEED_COHERENT | CPU_FTR_HAS_HIGH_BATS)
+ CPU_FTR_NEED_COHERENT | CPU_FTR_HAS_HIGH_BATS | CPU_FTR_PPC_LE)
#define CPU_FTRS_7455 (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \
CPU_FTR_USE_TB | \
CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \
CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS | \
- CPU_FTR_NEED_COHERENT)
+ CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE)
#define CPU_FTRS_7447_10 (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \
CPU_FTR_USE_TB | \
CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \
CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS | \
- CPU_FTR_NEED_COHERENT | CPU_FTR_NO_BTIC)
+ CPU_FTR_NEED_COHERENT | CPU_FTR_NO_BTIC | CPU_FTR_PPC_LE)
#define CPU_FTRS_7447 (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \
CPU_FTR_USE_TB | \
CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \
CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS | \
- CPU_FTR_NEED_COHERENT)
+ CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE)
#define CPU_FTRS_7447A (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \
CPU_FTR_USE_TB | \
CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \
CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS | \
- CPU_FTR_NEED_COHERENT)
+ CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE)
#define CPU_FTRS_82XX (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \
CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB)
#define CPU_FTRS_G2_LE (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | \
@@ -287,13 +300,6 @@ extern void do_cpu_ftr_fixups(unsigned long offset);
CPU_FTR_COMMON)
#define CPU_FTRS_CLASSIC32 (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \
CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE)
-#define CPU_FTRS_POWER3_32 (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \
- CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE)
-#define CPU_FTRS_POWER4_32 (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \
- CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_NODSISRALIGN)
-#define CPU_FTRS_970_32 (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \
- CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_ALTIVEC_COMP | \
- CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_NODSISRALIGN)
#define CPU_FTRS_8XX (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB)
#define CPU_FTRS_40X (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \
CPU_FTR_NODSISRALIGN)
@@ -307,7 +313,7 @@ extern void do_cpu_ftr_fixups(unsigned long offset);
#define CPU_FTRS_GENERIC_32 (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN)
#ifdef __powerpc64__
#define CPU_FTRS_POWER3 (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \
- CPU_FTR_HPTE_TABLE | CPU_FTR_IABR)
+ CPU_FTR_HPTE_TABLE | CPU_FTR_IABR | CPU_FTR_PPC_LE)
#define CPU_FTRS_RS64 (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \
CPU_FTR_HPTE_TABLE | CPU_FTR_IABR | \
CPU_FTR_MMCRA | CPU_FTR_CTRL)
@@ -320,12 +326,12 @@ extern void do_cpu_ftr_fixups(unsigned long offset);
CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | \
CPU_FTR_MMCRA | CPU_FTR_SMT | \
CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \
- CPU_FTR_MMCRA_SIHV | CPU_FTR_PURR)
+ CPU_FTR_PURR)
#define CPU_FTRS_POWER6 (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \
CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | \
CPU_FTR_MMCRA | CPU_FTR_SMT | \
CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \
- CPU_FTR_PURR | CPU_FTR_CI_LARGE_PAGE)
+ CPU_FTR_PURR | CPU_FTR_CI_LARGE_PAGE | CPU_FTR_REAL_LE)
#define CPU_FTRS_CELL (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \
CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | \
CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \
@@ -354,12 +360,6 @@ enum {
#else
CPU_FTRS_GENERIC_32 |
#endif
-#ifdef CONFIG_PPC64BRIDGE
- CPU_FTRS_POWER3_32 |
-#endif
-#ifdef CONFIG_POWER4
- CPU_FTRS_POWER4_32 | CPU_FTRS_970_32 |
-#endif
#ifdef CONFIG_8xx
CPU_FTRS_8XX |
#endif
@@ -399,12 +399,6 @@ enum {
#else
CPU_FTRS_GENERIC_32 &
#endif
-#ifdef CONFIG_PPC64BRIDGE
- CPU_FTRS_POWER3_32 &
-#endif
-#ifdef CONFIG_POWER4
- CPU_FTRS_POWER4_32 & CPU_FTRS_970_32 &
-#endif
#ifdef CONFIG_8xx
CPU_FTRS_8XX &
#endif
diff --git a/include/asm-powerpc/delay.h b/include/asm-powerpc/delay.h
index 057a60955474..f9200a65c632 100644
--- a/include/asm-powerpc/delay.h
+++ b/include/asm-powerpc/delay.h
@@ -17,5 +17,18 @@
extern void __delay(unsigned long loops);
extern void udelay(unsigned long usecs);
+/*
+ * On shared processor machines the generic implementation of mdelay can
+ * result in large errors. While each iteration of the loop inside mdelay
+ * is supposed to take 1ms, the hypervisor could sleep our partition for
+ * longer (eg 10ms). With the right timing these errors can add up.
+ *
+ * Since there is no 32bit overflow issue on 64bit kernels, just call
+ * udelay directly.
+ */
+#ifdef CONFIG_PPC64
+#define mdelay(n) udelay((n) * 1000)
+#endif
+
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_DELAY_H */
diff --git a/include/asm-powerpc/eeh.h b/include/asm-powerpc/eeh.h
index e9c86b1eedab..4df3e80118f4 100644
--- a/include/asm-powerpc/eeh.h
+++ b/include/asm-powerpc/eeh.h
@@ -292,8 +292,6 @@ static inline void eeh_memcpy_toio(volatile void __iomem *dest, const void *src,
static inline u8 eeh_inb(unsigned long port)
{
u8 val;
- if (!_IO_IS_VALID(port))
- return ~0;
val = in_8((u8 __iomem *)(port+pci_io_base));
if (EEH_POSSIBLE_ERROR(val, u8))
return eeh_check_failure((void __iomem *)(port), val);
@@ -302,15 +300,12 @@ static inline u8 eeh_inb(unsigned long port)
static inline void eeh_outb(u8 val, unsigned long port)
{
- if (_IO_IS_VALID(port))
- out_8((u8 __iomem *)(port+pci_io_base), val);
+ out_8((u8 __iomem *)(port+pci_io_base), val);
}
static inline u16 eeh_inw(unsigned long port)
{
u16 val;
- if (!_IO_IS_VALID(port))
- return ~0;
val = in_le16((u16 __iomem *)(port+pci_io_base));
if (EEH_POSSIBLE_ERROR(val, u16))
return eeh_check_failure((void __iomem *)(port), val);
@@ -319,15 +314,12 @@ static inline u16 eeh_inw(unsigned long port)
static inline void eeh_outw(u16 val, unsigned long port)
{
- if (_IO_IS_VALID(port))
- out_le16((u16 __iomem *)(port+pci_io_base), val);
+ out_le16((u16 __iomem *)(port+pci_io_base), val);
}
static inline u32 eeh_inl(unsigned long port)
{
u32 val;
- if (!_IO_IS_VALID(port))
- return ~0;
val = in_le32((u32 __iomem *)(port+pci_io_base));
if (EEH_POSSIBLE_ERROR(val, u32))
return eeh_check_failure((void __iomem *)(port), val);
@@ -336,8 +328,7 @@ static inline u32 eeh_inl(unsigned long port)
static inline void eeh_outl(u32 val, unsigned long port)
{
- if (_IO_IS_VALID(port))
- out_le32((u32 __iomem *)(port+pci_io_base), val);
+ out_le32((u32 __iomem *)(port+pci_io_base), val);
}
/* in-string eeh macros */
diff --git a/include/asm-powerpc/eeh_event.h b/include/asm-powerpc/eeh_event.h
index 93d55a2bebfd..dc6bf0ffb796 100644
--- a/include/asm-powerpc/eeh_event.h
+++ b/include/asm-powerpc/eeh_event.h
@@ -18,8 +18,8 @@
* Copyright (c) 2005 Linas Vepstas <linas@linas.org>
*/
-#ifndef ASM_PPC64_EEH_EVENT_H
-#define ASM_PPC64_EEH_EVENT_H
+#ifndef ASM_POWERPC_EEH_EVENT_H
+#define ASM_POWERPC_EEH_EVENT_H
#ifdef __KERNEL__
/** EEH event -- structure holding pci controller data that describes
@@ -39,7 +39,7 @@ struct eeh_event {
* @dev pci device
*
* This routine builds a PCI error event which will be delivered
- * to all listeners on the peh_notifier_chain.
+ * to all listeners on the eeh_notifier_chain.
*
* This routine can be called within an interrupt context;
* the actual event will be delivered in a normal context
@@ -51,7 +51,7 @@ int eeh_send_failure_event (struct device_node *dn,
int time_unavail);
/* Main recovery function */
-void handle_eeh_events (struct eeh_event *);
+struct pci_dn * handle_eeh_events (struct eeh_event *);
#endif /* __KERNEL__ */
-#endif /* ASM_PPC64_EEH_EVENT_H */
+#endif /* ASM_POWERPC_EEH_EVENT_H */
diff --git a/include/asm-powerpc/elf.h b/include/asm-powerpc/elf.h
index 99c18b71aa82..9a83a987d396 100644
--- a/include/asm-powerpc/elf.h
+++ b/include/asm-powerpc/elf.h
@@ -293,7 +293,7 @@ do { \
NEW_AUX_ENT(AT_DCACHEBSIZE, dcache_bsize); \
NEW_AUX_ENT(AT_ICACHEBSIZE, icache_bsize); \
NEW_AUX_ENT(AT_UCACHEBSIZE, ucache_bsize); \
- VDSO_AUX_ENT(AT_SYSINFO_EHDR, current->thread.vdso_base) \
+ VDSO_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso_base) \
} while (0)
/* PowerPC64 relocations defined by the ABIs */
diff --git a/include/asm-powerpc/hvcall.h b/include/asm-powerpc/hvcall.h
index 6cc7e1fb7bfd..0d3c4e85711a 100644
--- a/include/asm-powerpc/hvcall.h
+++ b/include/asm-powerpc/hvcall.h
@@ -102,6 +102,15 @@
#define H_PP1 (1UL<<(63-62))
#define H_PP2 (1UL<<(63-63))
+/* VASI States */
+#define H_VASI_INVALID 0
+#define H_VASI_ENABLED 1
+#define H_VASI_ABORTED 2
+#define H_VASI_SUSPENDING 3
+#define H_VASI_SUSPENDED 4
+#define H_VASI_RESUMED 5
+#define H_VASI_COMPLETED 6
+
/* DABRX flags */
#define H_DABRX_HYPERVISOR (1UL<<(63-61))
#define H_DABRX_KERNEL (1UL<<(63-62))
@@ -190,6 +199,7 @@
#define H_QUERY_INT_STATE 0x1E4
#define H_POLL_PENDING 0x1D8
#define H_JOIN 0x298
+#define H_VASI_STATE 0x2A4
#define H_ENABLE_CRQ 0x2B0
#ifndef __ASSEMBLY__
diff --git a/include/asm-powerpc/immap_86xx.h b/include/asm-powerpc/immap_86xx.h
new file mode 100644
index 000000000000..d905b6622268
--- /dev/null
+++ b/include/asm-powerpc/immap_86xx.h
@@ -0,0 +1,199 @@
+/*
+ * MPC86xx Internal Memory Map
+ *
+ * Author: Jeff Brown
+ *
+ * Copyright 2004 Freescale Semiconductor, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef __ASM_POWERPC_IMMAP_86XX_H__
+#define __ASM_POWERPC_IMMAP_86XX_H__
+#ifdef __KERNEL__
+
+/* Eventually this should define all the IO block registers in 86xx */
+
+/* PCI Registers */
+typedef struct ccsr_pci {
+ uint cfg_addr; /* 0x.000 - PCI Configuration Address Register */
+ uint cfg_data; /* 0x.004 - PCI Configuration Data Register */
+ uint int_ack; /* 0x.008 - PCI Interrupt Acknowledge Register */
+ char res1[3060];
+ uint potar0; /* 0x.c00 - PCI Outbound Transaction Address Register 0 */
+ uint potear0; /* 0x.c04 - PCI Outbound Translation Extended Address Register 0 */
+ uint powbar0; /* 0x.c08 - PCI Outbound Window Base Address Register 0 */
+ char res2[4];
+ uint powar0; /* 0x.c10 - PCI Outbound Window Attributes Register 0 */
+ char res3[12];
+ uint potar1; /* 0x.c20 - PCI Outbound Transaction Address Register 1 */
+ uint potear1; /* 0x.c24 - PCI Outbound Translation Extended Address Register 1 */
+ uint powbar1; /* 0x.c28 - PCI Outbound Window Base Address Register 1 */
+ char res4[4];
+ uint powar1; /* 0x.c30 - PCI Outbound Window Attributes Register 1 */
+ char res5[12];
+ uint potar2; /* 0x.c40 - PCI Outbound Transaction Address Register 2 */
+ uint potear2; /* 0x.c44 - PCI Outbound Translation Extended Address Register 2 */
+ uint powbar2; /* 0x.c48 - PCI Outbound Window Base Address Register 2 */
+ char res6[4];
+ uint powar2; /* 0x.c50 - PCI Outbound Window Attributes Register 2 */
+ char res7[12];
+ uint potar3; /* 0x.c60 - PCI Outbound Transaction Address Register 3 */
+ uint potear3; /* 0x.c64 - PCI Outbound Translation Extended Address Register 3 */
+ uint powbar3; /* 0x.c68 - PCI Outbound Window Base Address Register 3 */
+ char res8[4];
+ uint powar3; /* 0x.c70 - PCI Outbound Window Attributes Register 3 */
+ char res9[12];
+ uint potar4; /* 0x.c80 - PCI Outbound Transaction Address Register 4 */
+ uint potear4; /* 0x.c84 - PCI Outbound Translation Extended Address Register 4 */
+ uint powbar4; /* 0x.c88 - PCI Outbound Window Base Address Register 4 */
+ char res10[4];
+ uint powar4; /* 0x.c90 - PCI Outbound Window Attributes Register 4 */
+ char res11[268];
+ uint pitar3; /* 0x.da0 - PCI Inbound Translation Address Register 3 */
+ char res12[4];
+ uint piwbar3; /* 0x.da8 - PCI Inbound Window Base Address Register 3 */
+ uint piwbear3; /* 0x.dac - PCI Inbound Window Base Extended Address Register 3 */
+ uint piwar3; /* 0x.db0 - PCI Inbound Window Attributes Register 3 */
+ char res13[12];
+ uint pitar2; /* 0x.dc0 - PCI Inbound Translation Address Register 2 */
+ char res14[4];
+ uint piwbar2; /* 0x.dc8 - PCI Inbound Window Base Address Register 2 */
+ uint piwbear2; /* 0x.dcc - PCI Inbound Window Base Extended Address Register 2 */
+ uint piwar2; /* 0x.dd0 - PCI Inbound Window Attributes Register 2 */
+ char res15[12];
+ uint pitar1; /* 0x.de0 - PCI Inbound Translation Address Register 1 */
+ char res16[4];
+ uint piwbar1; /* 0x.de8 - PCI Inbound Window Base Address Register 1 */
+ char res17[4];
+ uint piwar1; /* 0x.df0 - PCI Inbound Window Attributes Register 1 */
+ char res18[12];
+ uint err_dr; /* 0x.e00 - PCI Error Detect Register */
+ uint err_cap_dr; /* 0x.e04 - PCI Error Capture Disable Register */
+ uint err_en; /* 0x.e08 - PCI Error Enable Register */
+ uint err_attrib; /* 0x.e0c - PCI Error Attributes Capture Register */
+ uint err_addr; /* 0x.e10 - PCI Error Address Capture Register */
+ uint err_ext_addr; /* 0x.e14 - PCI Error Extended Address Capture Register */
+ uint err_dl; /* 0x.e18 - PCI Error Data Low Capture Register */
+ uint err_dh; /* 0x.e1c - PCI Error Data High Capture Register */
+ uint gas_timr; /* 0x.e20 - PCI Gasket Timer Register */
+ uint pci_timr; /* 0x.e24 - PCI Timer Register */
+ char res19[472];
+} ccsr_pci_t;
+
+/* PCI Express Registers */
+typedef struct ccsr_pex {
+ uint pex_config_addr; /* 0x.000 - PCI Express Configuration Address Register */
+ uint pex_config_data; /* 0x.004 - PCI Express Configuration Data Register */
+ char res1[4];
+ uint pex_otb_cpl_tor; /* 0x.00c - PCI Express Outbound completion timeout register */
+ uint pex_conf_tor; /* 0x.010 - PCI Express configuration timeout register */
+ char res2[12];
+ uint pex_pme_mes_dr; /* 0x.020 - PCI Express PME and message detect register */
+ uint pex_pme_mes_disr; /* 0x.024 - PCI Express PME and message disable register */
+ uint pex_pme_mes_ier; /* 0x.028 - PCI Express PME and message interrupt enable register */
+ uint pex_pmcr; /* 0x.02c - PCI Express power management command register */
+ char res3[3024];
+ uint pexotar0; /* 0x.c00 - PCI Express outbound translation address register 0 */
+ uint pexotear0; /* 0x.c04 - PCI Express outbound translation extended address register 0*/
+ char res4[8];
+ uint pexowar0; /* 0x.c10 - PCI Express outbound window attributes register 0*/
+ char res5[12];
+ uint pexotar1; /* 0x.c20 - PCI Express outbound translation address register 1 */
+ uint pexotear1; /* 0x.c24 - PCI Express outbound translation extended address register 1*/
+ uint pexowbar1; /* 0x.c28 - PCI Express outbound window base address register 1*/
+ char res6[4];
+ uint pexowar1; /* 0x.c30 - PCI Express outbound window attributes register 1*/
+ char res7[12];
+ uint pexotar2; /* 0x.c40 - PCI Express outbound translation address register 2 */
+ uint pexotear2; /* 0x.c44 - PCI Express outbound translation extended address register 2*/
+ uint pexowbar2; /* 0x.c48 - PCI Express outbound window base address register 2*/
+ char res8[4];
+ uint pexowar2; /* 0x.c50 - PCI Express outbound window attributes register 2*/
+ char res9[12];
+ uint pexotar3; /* 0x.c60 - PCI Express outbound translation address register 3 */
+ uint pexotear3; /* 0x.c64 - PCI Express outbound translation extended address register 3*/
+ uint pexowbar3; /* 0x.c68 - PCI Express outbound window base address register 3*/
+ char res10[4];
+ uint pexowar3; /* 0x.c70 - PCI Express outbound window attributes register 3*/
+ char res11[12];
+ uint pexotar4; /* 0x.c80 - PCI Express outbound translation address register 4 */
+ uint pexotear4; /* 0x.c84 - PCI Express outbound translation extended address register 4*/
+ uint pexowbar4; /* 0x.c88 - PCI Express outbound window base address register 4*/
+ char res12[4];
+ uint pexowar4; /* 0x.c90 - PCI Express outbound window attributes register 4*/
+ char res13[12];
+ char res14[256];
+ uint pexitar3; /* 0x.da0 - PCI Express inbound translation address register 3 */
+ char res15[4];
+ uint pexiwbar3; /* 0x.da8 - PCI Express inbound window base address register 3 */
+ uint pexiwbear3; /* 0x.dac - PCI Express inbound window base extended address register 3 */
+ uint pexiwar3; /* 0x.db0 - PCI Express inbound window attributes register 3 */
+ char res16[12];
+ uint pexitar2; /* 0x.dc0 - PCI Express inbound translation address register 2 */
+ char res17[4];
+ uint pexiwbar2; /* 0x.dc8 - PCI Express inbound window base address register 2 */
+ uint pexiwbear2; /* 0x.dcc - PCI Express inbound window base extended address register 2 */
+ uint pexiwar2; /* 0x.dd0 - PCI Express inbound window attributes register 2 */
+ char res18[12];
+ uint pexitar1; /* 0x.de0 - PCI Express inbound translation address register 2 */
+ char res19[4];
+ uint pexiwbar1; /* 0x.de8 - PCI Express inbound window base address register 2 */
+ uint pexiwbear1; /* 0x.dec - PCI Express inbound window base extended address register 2 */
+ uint pexiwar1; /* 0x.df0 - PCI Express inbound window attributes register 2 */
+ char res20[12];
+ uint pex_err_dr; /* 0x.e00 - PCI Express error detect register */
+ char res21[4];
+ uint pex_err_en; /* 0x.e08 - PCI Express error interrupt enable register */
+ char res22[4];
+ uint pex_err_disr; /* 0x.e10 - PCI Express error disable register */
+ char res23[12];
+ uint pex_err_cap_stat; /* 0x.e20 - PCI Express error capture status register */
+ char res24[4];
+ uint pex_err_cap_r0; /* 0x.e28 - PCI Express error capture register 0 */
+ uint pex_err_cap_r1; /* 0x.e2c - PCI Express error capture register 0 */
+ uint pex_err_cap_r2; /* 0x.e30 - PCI Express error capture register 0 */
+ uint pex_err_cap_r3; /* 0x.e34 - PCI Express error capture register 0 */
+} ccsr_pex_t;
+
+/* Global Utility Registers */
+typedef struct ccsr_guts {
+ uint porpllsr; /* 0x.0000 - POR PLL Ratio Status Register */
+ uint porbmsr; /* 0x.0004 - POR Boot Mode Status Register */
+ uint porimpscr; /* 0x.0008 - POR I/O Impedance Status and Control Register */
+ uint pordevsr; /* 0x.000c - POR I/O Device Status Register */
+ uint pordbgmsr; /* 0x.0010 - POR Debug Mode Status Register */
+ char res1[12];
+ uint gpporcr; /* 0x.0020 - General-Purpose POR Configuration Register */
+ char res2[12];
+ uint gpiocr; /* 0x.0030 - GPIO Control Register */
+ char res3[12];
+ uint gpoutdr; /* 0x.0040 - General-Purpose Output Data Register */
+ char res4[12];
+ uint gpindr; /* 0x.0050 - General-Purpose Input Data Register */
+ char res5[12];
+ uint pmuxcr; /* 0x.0060 - Alternate Function Signal Multiplex Control */
+ char res6[12];
+ uint devdisr; /* 0x.0070 - Device Disable Control */
+ char res7[12];
+ uint powmgtcsr; /* 0x.0080 - Power Management Status and Control Register */
+ char res8[12];
+ uint mcpsumr; /* 0x.0090 - Machine Check Summary Register */
+ char res9[12];
+ uint pvr; /* 0x.00a0 - Processor Version Register */
+ uint svr; /* 0x.00a4 - System Version Register */
+ char res10[3416];
+ uint clkocr; /* 0x.0e00 - Clock Out Select Register */
+ char res11[12];
+ uint ddrdllcr; /* 0x.0e10 - DDR DLL Control Register */
+ char res12[12];
+ uint lbcdllcr; /* 0x.0e20 - LBC DLL Control Register */
+ char res13[61916];
+} ccsr_guts_t;
+
+#endif /* __ASM_POWERPC_IMMAP_86XX_H__ */
+#endif /* __KERNEL__ */
diff --git a/include/asm-powerpc/io.h b/include/asm-powerpc/io.h
index f1c2469b8844..a9496f34b048 100644
--- a/include/asm-powerpc/io.h
+++ b/include/asm-powerpc/io.h
@@ -40,12 +40,6 @@ extern int check_legacy_ioport(unsigned long base_port);
extern unsigned long isa_io_base;
extern unsigned long pci_io_base;
-extern unsigned long io_page_mask;
-
-#define MAX_ISA_PORT 0x10000
-
-#define _IO_IS_VALID(port) ((port) >= MAX_ISA_PORT || (1 << (port>>PAGE_SHIFT)) \
- & io_page_mask)
#ifdef CONFIG_PPC_ISERIES
/* __raw_* accessors aren't supported on iSeries */
diff --git a/include/asm-powerpc/iommu.h b/include/asm-powerpc/iommu.h
index 2acf7b29ef06..a5e98641a2ae 100644
--- a/include/asm-powerpc/iommu.h
+++ b/include/asm-powerpc/iommu.h
@@ -66,7 +66,8 @@ extern void iommu_free_table(struct device_node *dn);
/* Initializes an iommu_table based in values set in the passed-in
* structure
*/
-extern struct iommu_table *iommu_init_table(struct iommu_table * tbl);
+extern struct iommu_table *iommu_init_table(struct iommu_table * tbl,
+ int nid);
extern int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
struct scatterlist *sglist, int nelems, unsigned long mask,
@@ -75,7 +76,8 @@ extern void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction);
extern void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
- dma_addr_t *dma_handle, unsigned long mask, gfp_t flag);
+ dma_addr_t *dma_handle, unsigned long mask,
+ gfp_t flag, int node);
extern void iommu_free_coherent(struct iommu_table *tbl, size_t size,
void *vaddr, dma_addr_t dma_handle);
extern dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr,
diff --git a/include/asm-powerpc/irq.h b/include/asm-powerpc/irq.h
index 1e9f25330307..a10feec29d4d 100644
--- a/include/asm-powerpc/irq.h
+++ b/include/asm-powerpc/irq.h
@@ -347,6 +347,92 @@ extern u64 ppc64_interrupt_controller;
#define SIU_INT_PC1 ((uint)0x3e+CPM_IRQ_OFFSET)
#define SIU_INT_PC0 ((uint)0x3f+CPM_IRQ_OFFSET)
+#elif defined(CONFIG_PPC_86xx)
+#include <asm/mpc86xx.h>
+
+#define NR_EPIC_INTS 48
+#ifndef NR_8259_INTS
+#define NR_8259_INTS 16 /*ULI 1575 can route 12 interrupts */
+#endif
+#define NUM_8259_INTERRUPTS NR_8259_INTS
+
+#ifndef I8259_OFFSET
+#define I8259_OFFSET 0
+#endif
+
+#define NR_IRQS 256
+
+/* Internal IRQs on MPC86xx OpenPIC */
+
+#ifndef MPC86xx_OPENPIC_IRQ_OFFSET
+#define MPC86xx_OPENPIC_IRQ_OFFSET NR_8259_INTS
+#endif
+
+/* The 48 internal sources */
+#define MPC86xx_IRQ_NULL ( 0 + MPC86xx_OPENPIC_IRQ_OFFSET)
+#define MPC86xx_IRQ_MCM ( 1 + MPC86xx_OPENPIC_IRQ_OFFSET)
+#define MPC86xx_IRQ_DDR ( 2 + MPC86xx_OPENPIC_IRQ_OFFSET)
+#define MPC86xx_IRQ_LBC ( 3 + MPC86xx_OPENPIC_IRQ_OFFSET)
+#define MPC86xx_IRQ_DMA0 ( 4 + MPC86xx_OPENPIC_IRQ_OFFSET)
+#define MPC86xx_IRQ_DMA1 ( 5 + MPC86xx_OPENPIC_IRQ_OFFSET)
+#define MPC86xx_IRQ_DMA2 ( 6 + MPC86xx_OPENPIC_IRQ_OFFSET)
+#define MPC86xx_IRQ_DMA3 ( 7 + MPC86xx_OPENPIC_IRQ_OFFSET)
+
+/* no 10,11 */
+#define MPC86xx_IRQ_UART2 (12 + MPC86xx_OPENPIC_IRQ_OFFSET)
+#define MPC86xx_IRQ_TSEC1_TX (13 + MPC86xx_OPENPIC_IRQ_OFFSET)
+#define MPC86xx_IRQ_TSEC1_RX (14 + MPC86xx_OPENPIC_IRQ_OFFSET)
+#define MPC86xx_IRQ_TSEC3_TX (15 + MPC86xx_OPENPIC_IRQ_OFFSET)
+#define MPC86xx_IRQ_TSEC3_RX (16 + MPC86xx_OPENPIC_IRQ_OFFSET)
+#define MPC86xx_IRQ_TSEC3_ERROR (17 + MPC86xx_OPENPIC_IRQ_OFFSET)
+#define MPC86xx_IRQ_TSEC1_ERROR (18 + MPC86xx_OPENPIC_IRQ_OFFSET)
+#define MPC86xx_IRQ_TSEC2_TX (19 + MPC86xx_OPENPIC_IRQ_OFFSET)
+#define MPC86xx_IRQ_TSEC2_RX (20 + MPC86xx_OPENPIC_IRQ_OFFSET)
+#define MPC86xx_IRQ_TSEC4_TX (21 + MPC86xx_OPENPIC_IRQ_OFFSET)
+#define MPC86xx_IRQ_TSEC4_RX (22 + MPC86xx_OPENPIC_IRQ_OFFSET)
+#define MPC86xx_IRQ_TSEC4_ERROR (23 + MPC86xx_OPENPIC_IRQ_OFFSET)
+#define MPC86xx_IRQ_TSEC2_ERROR (24 + MPC86xx_OPENPIC_IRQ_OFFSET)
+/* no 25 */
+#define MPC86xx_IRQ_UART1 (26 + MPC86xx_OPENPIC_IRQ_OFFSET)
+#define MPC86xx_IRQ_IIC (27 + MPC86xx_OPENPIC_IRQ_OFFSET)
+#define MPC86xx_IRQ_PERFMON (28 + MPC86xx_OPENPIC_IRQ_OFFSET)
+/* no 29,30,31 */
+#define MPC86xx_IRQ_SRIO_ERROR (32 + MPC86xx_OPENPIC_IRQ_OFFSET)
+#define MPC86xx_IRQ_SRIO_OUT_BELL (33 + MPC86xx_OPENPIC_IRQ_OFFSET)
+#define MPC86xx_IRQ_SRIO_IN_BELL (34 + MPC86xx_OPENPIC_IRQ_OFFSET)
+/* no 35,36 */
+#define MPC86xx_IRQ_SRIO_OUT_MSG1 (37 + MPC86xx_OPENPIC_IRQ_OFFSET)
+#define MPC86xx_IRQ_SRIO_IN_MSG1 (38 + MPC86xx_OPENPIC_IRQ_OFFSET)
+#define MPC86xx_IRQ_SRIO_OUT_MSG2 (39 + MPC86xx_OPENPIC_IRQ_OFFSET)
+#define MPC86xx_IRQ_SRIO_IN_MSG2 (40 + MPC86xx_OPENPIC_IRQ_OFFSET)
+
+/* The 12 external interrupt lines */
+#define MPC86xx_IRQ_EXT_BASE 48
+#define MPC86xx_IRQ_EXT0 (0 + MPC86xx_IRQ_EXT_BASE \
+ + MPC86xx_OPENPIC_IRQ_OFFSET)
+#define MPC86xx_IRQ_EXT1 (1 + MPC86xx_IRQ_EXT_BASE \
+ + MPC86xx_OPENPIC_IRQ_OFFSET)
+#define MPC86xx_IRQ_EXT2 (2 + MPC86xx_IRQ_EXT_BASE \
+ + MPC86xx_OPENPIC_IRQ_OFFSET)
+#define MPC86xx_IRQ_EXT3 (3 + MPC86xx_IRQ_EXT_BASE \
+ + MPC86xx_OPENPIC_IRQ_OFFSET)
+#define MPC86xx_IRQ_EXT4 (4 + MPC86xx_IRQ_EXT_BASE \
+ + MPC86xx_OPENPIC_IRQ_OFFSET)
+#define MPC86xx_IRQ_EXT5 (5 + MPC86xx_IRQ_EXT_BASE \
+ + MPC86xx_OPENPIC_IRQ_OFFSET)
+#define MPC86xx_IRQ_EXT6 (6 + MPC86xx_IRQ_EXT_BASE \
+ + MPC86xx_OPENPIC_IRQ_OFFSET)
+#define MPC86xx_IRQ_EXT7 (7 + MPC86xx_IRQ_EXT_BASE \
+ + MPC86xx_OPENPIC_IRQ_OFFSET)
+#define MPC86xx_IRQ_EXT8 (8 + MPC86xx_IRQ_EXT_BASE \
+ + MPC86xx_OPENPIC_IRQ_OFFSET)
+#define MPC86xx_IRQ_EXT9 (9 + MPC86xx_IRQ_EXT_BASE \
+ + MPC86xx_OPENPIC_IRQ_OFFSET)
+#define MPC86xx_IRQ_EXT10 (10 + MPC86xx_IRQ_EXT_BASE \
+ + MPC86xx_OPENPIC_IRQ_OFFSET)
+#define MPC86xx_IRQ_EXT11 (11 + MPC86xx_IRQ_EXT_BASE \
+ + MPC86xx_OPENPIC_IRQ_OFFSET)
+
#else /* CONFIG_40x + CONFIG_8xx */
/*
* this is the # irq's for all ppc arch's (pmac/chrp/prep)
diff --git a/arch/powerpc/platforms/iseries/iommu.h b/include/asm-powerpc/iseries/iommu.h
index cb5658fbe657..0edbfe10cb37 100644
--- a/arch/powerpc/platforms/iseries/iommu.h
+++ b/include/asm-powerpc/iseries/iommu.h
@@ -1,5 +1,5 @@
-#ifndef _PLATFORMS_ISERIES_IOMMU_H
-#define _PLATFORMS_ISERIES_IOMMU_H
+#ifndef _ASM_POWERPC_ISERIES_IOMMU_H
+#define _ASM_POWERPC_ISERIES_IOMMU_H
/*
* Copyright (C) 2005 Stephen Rothwell, IBM Corporation
@@ -32,4 +32,4 @@ extern void iommu_table_getparms_iSeries(unsigned long busno,
unsigned char slotno, unsigned char virtbus,
struct iommu_table *tbl);
-#endif /* _PLATFORMS_ISERIES_IOMMU_H */
+#endif /* _ASM_POWERPC_ISERIES_IOMMU_H */
diff --git a/include/asm-powerpc/kdump.h b/include/asm-powerpc/kdump.h
index a87aed00d61f..5a5c3b5ab1e0 100644
--- a/include/asm-powerpc/kdump.h
+++ b/include/asm-powerpc/kdump.h
@@ -1,13 +1,38 @@
#ifndef _PPC64_KDUMP_H
#define _PPC64_KDUMP_H
+/* Kdump kernel runs at 32 MB, change at your peril. */
+#define KDUMP_KERNELBASE 0x2000000
+
/* How many bytes to reserve at zero for kdump. The reserve limit should
- * be greater or equal to the trampoline's end address. */
+ * be greater or equal to the trampoline's end address.
+ * Reserve to the end of the FWNMI area, see head_64.S */
#define KDUMP_RESERVE_LIMIT 0x8000
+#ifdef CONFIG_CRASH_DUMP
+
+#define PHYSICAL_START KDUMP_KERNELBASE
#define KDUMP_TRAMPOLINE_START 0x0100
#define KDUMP_TRAMPOLINE_END 0x3000
-extern void kdump_setup(void);
+#else /* !CONFIG_CRASH_DUMP */
+
+#define PHYSICAL_START 0x0
+
+#endif /* CONFIG_CRASH_DUMP */
+
+#ifndef __ASSEMBLY__
+#ifdef CONFIG_CRASH_DUMP
+
+extern void reserve_kdump_trampoline(void);
+extern void setup_kdump_trampoline(void);
+
+#else /* !CONFIG_CRASH_DUMP */
+
+static inline void reserve_kdump_trampoline(void) { ; }
+static inline void setup_kdump_trampoline(void) { ; }
+
+#endif /* CONFIG_CRASH_DUMP */
+#endif /* __ASSEMBLY__ */
#endif /* __PPC64_KDUMP_H */
diff --git a/include/asm-powerpc/kexec.h b/include/asm-powerpc/kexec.h
index 6a2af2f6853b..efe8872ec583 100644
--- a/include/asm-powerpc/kexec.h
+++ b/include/asm-powerpc/kexec.h
@@ -31,9 +31,10 @@
#define KEXEC_ARCH KEXEC_ARCH_PPC
#endif
+#ifndef __ASSEMBLY__
+
#ifdef CONFIG_KEXEC
-#ifndef __ASSEMBLY__
#ifdef __powerpc64__
/*
* This function is responsible for capturing register states if coming
@@ -123,8 +124,19 @@ extern int default_machine_kexec_prepare(struct kimage *image);
extern void default_machine_crash_shutdown(struct pt_regs *regs);
extern void machine_kexec_simple(struct kimage *image);
+extern int overlaps_crashkernel(unsigned long start, unsigned long size);
+extern void reserve_crashkernel(void);
+
+#else /* !CONFIG_KEXEC */
+
+static inline int overlaps_crashkernel(unsigned long start, unsigned long size)
+{
+ return 0;
+}
+
+static inline void reserve_crashkernel(void) { ; }
-#endif /* ! __ASSEMBLY__ */
#endif /* CONFIG_KEXEC */
+#endif /* ! __ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_KEXEC_H */
diff --git a/include/asm-powerpc/machdep.h b/include/asm-powerpc/machdep.h
index 3e7d37aa4a6d..73db1f71329d 100644
--- a/include/asm-powerpc/machdep.h
+++ b/include/asm-powerpc/machdep.h
@@ -237,6 +237,11 @@ struct machdep_calls {
*/
void (*machine_kexec)(struct kimage *image);
#endif /* CONFIG_KEXEC */
+
+#ifdef CONFIG_PCI_MSI
+ int (*enable_msi)(struct pci_dev *pdev);
+ void (*disable_msi)(struct pci_dev *pdev);
+#endif /* CONFIG_PCI_MSI */
};
extern void power4_idle(void);
diff --git a/include/asm-powerpc/mmu.h b/include/asm-powerpc/mmu.h
index 31f721994bd8..3a5ebe229af5 100644
--- a/include/asm-powerpc/mmu.h
+++ b/include/asm-powerpc/mmu.h
@@ -96,6 +96,8 @@ extern char initial_stab[];
#define HPTE_R_FLAGS ASM_CONST(0x00000000000003ff)
#define HPTE_R_PP ASM_CONST(0x0000000000000003)
#define HPTE_R_N ASM_CONST(0x0000000000000004)
+#define HPTE_R_C ASM_CONST(0x0000000000000080)
+#define HPTE_R_R ASM_CONST(0x0000000000000100)
/* Values for PP (assumes Ks=0, Kp=1) */
/* pp0 will always be 0 for linux */
@@ -163,6 +165,16 @@ struct mmu_psize_def
extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
extern int mmu_linear_psize;
extern int mmu_virtual_psize;
+extern int mmu_vmalloc_psize;
+extern int mmu_io_psize;
+
+/*
+ * If the processor supports 64k normal pages but not 64k cache
+ * inhibited pages, we have to be prepared to switch processes
+ * to use 4k pages when they create cache-inhibited mappings.
+ * If this is the case, mmu_ci_restrictions will be set to 1.
+ */
+extern int mmu_ci_restrictions;
#ifdef CONFIG_HUGETLB_PAGE
/*
@@ -254,6 +266,7 @@ extern long iSeries_hpte_insert(unsigned long hpte_group,
extern void stabs_alloc(void);
extern void slb_initialize(void);
+extern void slb_flush_and_rebolt(void);
extern void stab_initialize(unsigned long stab);
#endif /* __ASSEMBLY__ */
@@ -357,9 +370,12 @@ typedef unsigned long mm_context_id_t;
typedef struct {
mm_context_id_t id;
+ u16 user_psize; /* page size index */
+ u16 sllp; /* SLB entry page size encoding */
#ifdef CONFIG_HUGETLB_PAGE
u16 low_htlb_areas, high_htlb_areas;
#endif
+ unsigned long vdso_base;
} mm_context_t;
diff --git a/include/asm-powerpc/mmu_context.h b/include/asm-powerpc/mmu_context.h
index 1b8a25fd48f3..8c6b1a6d944f 100644
--- a/include/asm-powerpc/mmu_context.h
+++ b/include/asm-powerpc/mmu_context.h
@@ -20,16 +20,9 @@
* 2 of the License, or (at your option) any later version.
*/
-/*
- * Getting into a kernel thread, there is no valid user segment, mark
- * paca->pgdir NULL so that SLB miss on user addresses will fault
- */
static inline void enter_lazy_tlb(struct mm_struct *mm,
struct task_struct *tsk)
{
-#ifdef CONFIG_PPC_64K_PAGES
- get_paca()->pgdir = NULL;
-#endif /* CONFIG_PPC_64K_PAGES */
}
#define NO_CONTEXT 0
@@ -52,13 +45,8 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
cpu_set(smp_processor_id(), next->cpu_vm_mask);
/* No need to flush userspace segments if the mm doesnt change */
-#ifdef CONFIG_PPC_64K_PAGES
- if (prev == next && get_paca()->pgdir == next->pgd)
- return;
-#else
if (prev == next)
return;
-#endif /* CONFIG_PPC_64K_PAGES */
#ifdef CONFIG_ALTIVEC
if (cpu_has_feature(CPU_FTR_ALTIVEC))
diff --git a/include/asm-powerpc/mpc86xx.h b/include/asm-powerpc/mpc86xx.h
new file mode 100644
index 000000000000..d0a6718d188b
--- /dev/null
+++ b/include/asm-powerpc/mpc86xx.h
@@ -0,0 +1,47 @@
+/*
+ * MPC86xx definitions
+ *
+ * Author: Jeff Brown
+ *
+ * Copyright 2004 Freescale Semiconductor, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifdef __KERNEL__
+#ifndef __ASM_POWERPC_MPC86xx_H__
+#define __ASM_POWERPC_MPC86xx_H__
+
+#include <linux/config.h>
+#include <asm/mmu.h>
+
+#ifdef CONFIG_PPC_86xx
+
+#ifdef CONFIG_MPC8641_HPCN
+#include <platforms/86xx/mpc8641_hpcn.h>
+#endif
+
+#define _IO_BASE isa_io_base
+#define _ISA_MEM_BASE isa_mem_base
+#ifdef CONFIG_PCI
+#define PCI_DRAM_OFFSET pci_dram_offset
+#else
+#define PCI_DRAM_OFFSET 0
+#endif
+
+#define CPU0_BOOT_RELEASE 0x01000000
+#define CPU1_BOOT_RELEASE 0x02000000
+#define CPU_ALL_RELEASED (CPU0_BOOT_RELEASE | CPU1_BOOT_RELEASE)
+#define MCM_PORT_CONFIG_OFFSET 0x1010
+
+/* Offset from CCSRBAR */
+#define MPC86xx_OPENPIC_OFFSET (0x40000)
+#define MPC86xx_MCM_OFFSET (0x00000)
+#define MPC86xx_MCM_SIZE (0x02000)
+
+#endif /* CONFIG_PPC_86xx */
+#endif /* __ASM_POWERPC_MPC86xx_H__ */
+#endif /* __KERNEL__ */
diff --git a/include/asm-powerpc/mpic.h b/include/asm-powerpc/mpic.h
index 6b9e78142f4f..f0d22ac34b96 100644
--- a/include/asm-powerpc/mpic.h
+++ b/include/asm-powerpc/mpic.h
@@ -22,6 +22,10 @@
#define MPIC_GREG_GCONF_8259_PTHROU_DIS 0x20000000
#define MPIC_GREG_GCONF_BASE_MASK 0x000fffff
#define MPIC_GREG_GLOBAL_CONF_1 0x00030
+#define MPIC_GREG_GLOBAL_CONF_1_SIE 0x08000000
+#define MPIC_GREG_GLOBAL_CONF_1_CLK_RATIO_MASK 0x70000000
+#define MPIC_GREG_GLOBAL_CONF_1_CLK_RATIO(r) \
+ (((r) << 28) & MPIC_GREG_GLOBAL_CONF_1_CLK_RATIO_MASK)
#define MPIC_GREG_VENDOR_0 0x00040
#define MPIC_GREG_VENDOR_1 0x00050
#define MPIC_GREG_VENDOR_2 0x00060
@@ -284,6 +288,12 @@ extern int mpic_get_one_irq(struct mpic *mpic, struct pt_regs *regs);
/* This one gets to the primary mpic */
extern int mpic_get_irq(struct pt_regs *regs);
+/* Set the EPIC clock ratio */
+void mpic_set_clk_ratio(struct mpic *mpic, u32 clock_ratio);
+
+/* Enable/Disable EPIC serial interrupt mode */
+void mpic_set_serial_int(struct mpic *mpic, int enable);
+
/* global mpic for pSeries */
extern struct mpic *pSeries_mpic;
diff --git a/include/asm-powerpc/paca.h b/include/asm-powerpc/paca.h
index 3c6f644d49b4..2d4585f06209 100644
--- a/include/asm-powerpc/paca.h
+++ b/include/asm-powerpc/paca.h
@@ -78,11 +78,9 @@ struct paca_struct {
u64 exmc[10]; /* used for machine checks */
u64 exslb[10]; /* used for SLB/segment table misses
* on the linear mapping */
-#ifdef CONFIG_PPC_64K_PAGES
- pgd_t *pgdir;
-#endif /* CONFIG_PPC_64K_PAGES */
mm_context_t context;
+ u16 vmalloc_sllp;
u16 slb_cache[SLB_CACHE_ENTRIES];
u16 slb_cache_ptr;
diff --git a/include/asm-powerpc/page.h b/include/asm-powerpc/page.h
index f0469b961359..fb597b37c2a2 100644
--- a/include/asm-powerpc/page.h
+++ b/include/asm-powerpc/page.h
@@ -12,6 +12,7 @@
#ifdef __KERNEL__
#include <asm/asm-compat.h>
+#include <asm/kdump.h>
/*
* On PPC32 page size is 4K. For PPC64 we support either 4K or 64K software
@@ -51,13 +52,6 @@
* If you want to test if something's a kernel address, use is_kernel_addr().
*/
-#ifdef CONFIG_CRASH_DUMP
-/* Kdump kernel runs at 32 MB, change at your peril. */
-#define PHYSICAL_START 0x2000000
-#else
-#define PHYSICAL_START 0x0
-#endif
-
#define PAGE_OFFSET ASM_CONST(CONFIG_KERNEL_START)
#define KERNELBASE (PAGE_OFFSET + PHYSICAL_START)
@@ -197,6 +191,9 @@ extern void copy_user_page(void *to, void *from, unsigned long vaddr,
struct page *p);
extern int page_is_ram(unsigned long pfn);
+struct vm_area_struct;
+extern const char *arch_vma_name(struct vm_area_struct *vma);
+
#include <asm-generic/memory_model.h>
#endif /* __ASSEMBLY__ */
diff --git a/include/asm-powerpc/pci-bridge.h b/include/asm-powerpc/pci-bridge.h
index 38de92d41a14..4f55573762bb 100644
--- a/include/asm-powerpc/pci-bridge.h
+++ b/include/asm-powerpc/pci-bridge.h
@@ -6,6 +6,7 @@
#include <asm-ppc/pci-bridge.h>
#else
+#include <linux/config.h>
#include <linux/pci.h>
#include <linux/list.h>
@@ -22,6 +23,7 @@
struct pci_controller {
struct pci_bus *bus;
char is_dynamic;
+ int node;
void *arch_data;
struct list_head list_node;
@@ -78,12 +80,6 @@ struct pci_dn {
struct iommu_table *iommu_table; /* for phb's or bridges */
struct pci_dev *pcidev; /* back-pointer to the pci device */
struct device_node *node; /* back-pointer to the device_node */
-#ifdef CONFIG_PPC_ISERIES
- struct list_head Device_List;
- int Irq; /* Assigned IRQ */
- int Flags; /* Possible flags(disable/bist)*/
- u8 LogicalSlot; /* Hv Slot Index for Tces */
-#endif
u32 config_space[16]; /* saved PCI config space */
};
@@ -171,6 +167,12 @@ static inline unsigned long pci_address_to_pio(phys_addr_t address)
#define PCI_PROBE_NORMAL 0 /* Do normal PCI probing */
#define PCI_PROBE_DEVTREE 1 /* Instantiate from device tree */
+#ifdef CONFIG_NUMA
+#define PHB_SET_NODE(PHB, NODE) ((PHB)->node = (NODE))
+#else
+#define PHB_SET_NODE(PHB, NODE) ((PHB)->node = -1)
+#endif
+
#endif /* CONFIG_PPC64 */
#endif /* __KERNEL__ */
#endif
diff --git a/include/asm-powerpc/pgtable-4k.h b/include/asm-powerpc/pgtable-4k.h
index b2e18629932a..e7036155672e 100644
--- a/include/asm-powerpc/pgtable-4k.h
+++ b/include/asm-powerpc/pgtable-4k.h
@@ -78,6 +78,8 @@
#define pte_iterate_hashed_end() } while(0)
+#define pte_pagesize_index(pte) MMU_PAGE_4K
+
/*
* 4-level page tables related bits
*/
diff --git a/include/asm-powerpc/pgtable-64k.h b/include/asm-powerpc/pgtable-64k.h
index 653915014dcd..4b7126c53f37 100644
--- a/include/asm-powerpc/pgtable-64k.h
+++ b/include/asm-powerpc/pgtable-64k.h
@@ -90,6 +90,8 @@
#define pte_iterate_hashed_end() } while(0); } } while(0)
+#define pte_pagesize_index(pte) \
+ (((pte) & _PAGE_COMBO)? MMU_PAGE_4K: MMU_PAGE_64K)
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
diff --git a/include/asm-powerpc/pgtable.h b/include/asm-powerpc/pgtable.h
index 964e312a1ffc..8dbf5ad8150f 100644
--- a/include/asm-powerpc/pgtable.h
+++ b/include/asm-powerpc/pgtable.h
@@ -46,8 +46,8 @@ struct mm_struct;
/*
* Define the address range of the vmalloc VM area.
*/
-#define VMALLOC_START (0xD000000000000000ul)
-#define VMALLOC_SIZE (0x80000000000UL)
+#define VMALLOC_START ASM_CONST(0xD000000000000000)
+#define VMALLOC_SIZE ASM_CONST(0x80000000000)
#define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE)
/*
@@ -412,12 +412,6 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
flush_tlb_pending();
}
pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
-
-#ifdef CONFIG_PPC_64K_PAGES
- if (mmu_virtual_psize != MMU_PAGE_64K)
- pte = __pte(pte_val(pte) | _PAGE_COMBO);
-#endif /* CONFIG_PPC_64K_PAGES */
-
*ptep = pte;
}
diff --git a/include/asm-powerpc/processor.h b/include/asm-powerpc/processor.h
index 93f83efeb310..22e54a2a6604 100644
--- a/include/asm-powerpc/processor.h
+++ b/include/asm-powerpc/processor.h
@@ -149,11 +149,11 @@ struct thread_struct {
unsigned int val; /* Floating point status */
} fpscr;
int fpexc_mode; /* floating-point exception mode */
+ unsigned int align_ctl; /* alignment handling control */
#ifdef CONFIG_PPC64
unsigned long start_tb; /* Start purr when proc switched in */
unsigned long accum_tb; /* Total accumilated purr for process */
#endif
- unsigned long vdso_base; /* base of the vDSO library */
unsigned long dabr; /* Data address breakpoint register */
#ifdef CONFIG_ALTIVEC
/* Complete AltiVec register set */
@@ -190,7 +190,7 @@ struct thread_struct {
.fs = KERNEL_DS, \
.fpr = {0}, \
.fpscr = { .val = 0, }, \
- .fpexc_mode = MSR_FE0|MSR_FE1, \
+ .fpexc_mode = 0, \
}
#endif
@@ -212,6 +212,18 @@ unsigned long get_wchan(struct task_struct *p);
extern int get_fpexc_mode(struct task_struct *tsk, unsigned long adr);
extern int set_fpexc_mode(struct task_struct *tsk, unsigned int val);
+#define GET_ENDIAN(tsk, adr) get_endian((tsk), (adr))
+#define SET_ENDIAN(tsk, val) set_endian((tsk), (val))
+
+extern int get_endian(struct task_struct *tsk, unsigned long adr);
+extern int set_endian(struct task_struct *tsk, unsigned int val);
+
+#define GET_UNALIGN_CTL(tsk, adr) get_unalign_ctl((tsk), (adr))
+#define SET_UNALIGN_CTL(tsk, val) set_unalign_ctl((tsk), (val))
+
+extern int get_unalign_ctl(struct task_struct *tsk, unsigned long adr);
+extern int set_unalign_ctl(struct task_struct *tsk, unsigned int val);
+
static inline unsigned int __unpack_fe01(unsigned long msr_bits)
{
return ((msr_bits & MSR_FE0) >> 10) | ((msr_bits & MSR_FE1) >> 8);
diff --git a/include/asm-powerpc/prom.h b/include/asm-powerpc/prom.h
index f4e2ca6fd53f..010d186d095b 100644
--- a/include/asm-powerpc/prom.h
+++ b/include/asm-powerpc/prom.h
@@ -229,7 +229,16 @@ extern int of_address_to_resource(struct device_node *dev, int index,
extern int of_pci_address_to_resource(struct device_node *dev, int bar,
struct resource *r);
+/* Parse the ibm,dma-window property of an OF node into the busno, phys and
+ * size parameters.
+ */
+void of_parse_dma_window(struct device_node *dn, unsigned char *dma_window_prop,
+ unsigned long *busno, unsigned long *phys, unsigned long *size);
+
extern void kdump_move_device_tree(void);
+/* CPU OF node matching */
+struct device_node *of_get_cpu_node(int cpu, unsigned int *thread);
+
#endif /* __KERNEL__ */
#endif /* _POWERPC_PROM_H */
diff --git a/include/asm-powerpc/ptrace.h b/include/asm-powerpc/ptrace.h
index 9c550b314823..dc4cb9cc73a1 100644
--- a/include/asm-powerpc/ptrace.h
+++ b/include/asm-powerpc/ptrace.h
@@ -229,13 +229,13 @@ do { \
#define PTRACE_GET_DEBUGREG 25
#define PTRACE_SET_DEBUGREG 26
-#ifdef __powerpc64__
/* Additional PTRACE requests implemented on PowerPC. */
#define PPC_PTRACE_GETREGS 0x99 /* Get GPRs 0 - 31 */
#define PPC_PTRACE_SETREGS 0x98 /* Set GPRs 0 - 31 */
#define PPC_PTRACE_GETFPREGS 0x97 /* Get FPRs 0 - 31 */
#define PPC_PTRACE_SETFPREGS 0x96 /* Set FPRs 0 - 31 */
+#ifdef __powerpc64__
/* Calls to trace a 64bit program from a 32bit program */
#define PPC_PTRACE_PEEKTEXT_3264 0x95
#define PPC_PTRACE_PEEKDATA_3264 0x94
diff --git a/include/asm-powerpc/reg.h b/include/asm-powerpc/reg.h
index bd467bf5cf5a..cf73475a0c69 100644
--- a/include/asm-powerpc/reg.h
+++ b/include/asm-powerpc/reg.h
@@ -93,8 +93,8 @@
#define MSR_LE __MASK(MSR_LE_LG) /* Little Endian */
#ifdef CONFIG_PPC64
-#define MSR_ MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_ISF
-#define MSR_KERNEL MSR_ | MSR_SF | MSR_HV
+#define MSR_ MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_ISF |MSR_HV
+#define MSR_KERNEL MSR_ | MSR_SF
#define MSR_USER32 MSR_ | MSR_PR | MSR_EE
#define MSR_USER64 MSR_USER32 | MSR_SF
@@ -153,7 +153,7 @@
#define SPRN_DABR 0x3F5 /* Data Address Breakpoint Register */
#define DABR_TRANSLATION (1UL << 2)
#define SPRN_DAR 0x013 /* Data Address Register */
-#define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
+#define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
#define DSISR_NOHPTE 0x40000000 /* no translation found */
#define DSISR_PROTFAULT 0x08000000 /* protection fault */
#define DSISR_ISSTORE 0x02000000 /* access was a store */
@@ -258,16 +258,16 @@
#define SPRN_IABR 0x3F2 /* Instruction Address Breakpoint Register */
#define SPRN_HID4 0x3F4 /* 970 HID4 */
#define SPRN_HID5 0x3F6 /* 970 HID5 */
-#define SPRN_HID6 0x3F9 /* BE HID 6 */
-#define HID6_LB (0x0F<<12) /* Concurrent Large Page Modes */
-#define HID6_DLP (1<<20) /* Disable all large page modes (4K only) */
-#define SPRN_TSC_CELL 0x399 /* Thread switch control on Cell */
-#define TSC_CELL_DEC_ENABLE_0 0x400000 /* Decrementer Interrupt */
-#define TSC_CELL_DEC_ENABLE_1 0x200000 /* Decrementer Interrupt */
-#define TSC_CELL_EE_ENABLE 0x100000 /* External Interrupt */
-#define TSC_CELL_EE_BOOST 0x080000 /* External Interrupt Boost */
-#define SPRN_TSC 0x3FD /* Thread switch control on others */
-#define SPRN_TST 0x3FC /* Thread switch timeout on others */
+#define SPRN_HID6 0x3F9 /* BE HID 6 */
+#define HID6_LB (0x0F<<12) /* Concurrent Large Page Modes */
+#define HID6_DLP (1<<20) /* Disable all large page modes (4K only) */
+#define SPRN_TSC_CELL 0x399 /* Thread switch control on Cell */
+#define TSC_CELL_DEC_ENABLE_0 0x400000 /* Decrementer Interrupt */
+#define TSC_CELL_DEC_ENABLE_1 0x200000 /* Decrementer Interrupt */
+#define TSC_CELL_EE_ENABLE 0x100000 /* External Interrupt */
+#define TSC_CELL_EE_BOOST 0x080000 /* External Interrupt Boost */
+#define SPRN_TSC 0x3FD /* Thread switch control on others */
+#define SPRN_TST 0x3FC /* Thread switch timeout on others */
#if !defined(SPRN_IAC1) && !defined(SPRN_IAC2)
#define SPRN_IAC1 0x3F4 /* Instruction Address Compare 1 */
#define SPRN_IAC2 0x3F5 /* Instruction Address Compare 2 */
@@ -362,7 +362,7 @@
#endif
#define SPRN_PTEHI 0x3D5 /* 981 7450 PTE HI word (S/W TLB load) */
#define SPRN_PTELO 0x3D6 /* 982 7450 PTE LO word (S/W TLB load) */
-#define SPRN_PURR 0x135 /* Processor Utilization of Resources Reg */
+#define SPRN_PURR 0x135 /* Processor Utilization of Resources Reg */
#define SPRN_PVR 0x11F /* Processor Version Register */
#define SPRN_RPA 0x3D6 /* Required Physical Address Register */
#define SPRN_SDA 0x3BF /* Sampled Data Address Register */
@@ -386,6 +386,8 @@
#define SRR1_WAKEMT 0x00280000 /* mtctrl */
#define SRR1_WAKEDEC 0x00180000 /* Decrementer interrupt */
#define SRR1_WAKETHERM 0x00100000 /* Thermal management interrupt */
+#define SPRN_HSRR0 0x13A /* Save/Restore Register 0 */
+#define SPRN_HSRR1 0x13B /* Save/Restore Register 1 */
#ifndef SPRN_SVR
#define SPRN_SVR 0x11E /* System Version Register */
@@ -443,6 +445,10 @@
#define MMCRA_SIHV 0x10000000UL /* state of MSR HV when SIAR set */
#define MMCRA_SIPR 0x08000000UL /* state of MSR PR when SIAR set */
#define MMCRA_SAMPLE_ENABLE 0x00000001UL /* enable sampling */
+#define POWER6_MMCRA_SIHV 0x0000040000000000ULL
+#define POWER6_MMCRA_SIPR 0x0000020000000000ULL
+#define POWER6_MMCRA_THRM 0x00000020UL
+#define POWER6_MMCRA_OTHER 0x0000000EUL
#define SPRN_PMC1 787
#define SPRN_PMC2 788
#define SPRN_PMC3 789
@@ -495,6 +501,19 @@
#define MMCR0_PMC2_LOADMISSTIME 0x5
#endif
+/*
+ * An mtfsf instruction with the L bit set. On CPUs that support this a
+ * full 64bits of FPSCR is restored and on other CPUs it is ignored.
+ *
+ * Until binutils gets the new form of mtfsf, hardwire the instruction.
+ */
+#ifdef CONFIG_PPC64
+#define MTFSF_L(REG) \
+ .long (0xfc00058e | ((0xff) << 17) | ((REG) << 11) | (1 << 25))
+#else
+#define MTFSF_L(REG) mtfsf 0xff, (REG)
+#endif
+
/* Processor Version Register (PVR) field extraction */
#define PVR_VER(pvr) (((pvr) >> 16) & 0xFFFF) /* Version field */
@@ -559,20 +578,20 @@
/* 64-bit processors */
/* XXX the prefix should be PVR_, we'll do a global sweep to fix it one day */
-#define PV_NORTHSTAR 0x0033
-#define PV_PULSAR 0x0034
-#define PV_POWER4 0x0035
-#define PV_ICESTAR 0x0036
-#define PV_SSTAR 0x0037
-#define PV_POWER4p 0x0038
+#define PV_NORTHSTAR 0x0033
+#define PV_PULSAR 0x0034
+#define PV_POWER4 0x0035
+#define PV_ICESTAR 0x0036
+#define PV_SSTAR 0x0037
+#define PV_POWER4p 0x0038
#define PV_970 0x0039
-#define PV_POWER5 0x003A
+#define PV_POWER5 0x003A
#define PV_POWER5p 0x003B
#define PV_970FX 0x003C
-#define PV_630 0x0040
-#define PV_630p 0x0041
-#define PV_970MP 0x0044
-#define PV_BE 0x0070
+#define PV_630 0x0040
+#define PV_630p 0x0041
+#define PV_970MP 0x0044
+#define PV_BE 0x0070
/*
* Number of entries in the SLB. If this ever changes we should handle
diff --git a/include/asm-powerpc/rtas.h b/include/asm-powerpc/rtas.h
index f43c6835e62a..02e213e3d69f 100644
--- a/include/asm-powerpc/rtas.h
+++ b/include/asm-powerpc/rtas.h
@@ -24,6 +24,7 @@
#define RTAS_RMOBUF_MAX (64 * 1024)
/* RTAS return status codes */
+#define RTAS_NOT_SUSPENDABLE -9004
#define RTAS_BUSY -2 /* RTAS Busy */
#define RTAS_EXTENDED_DELAY_MIN 9900
#define RTAS_EXTENDED_DELAY_MAX 9905
@@ -177,12 +178,8 @@ extern unsigned long rtas_get_boot_time(void);
extern void rtas_get_rtc_time(struct rtc_time *rtc_time);
extern int rtas_set_rtc_time(struct rtc_time *rtc_time);
-/* Given an RTAS status code of 9900..9905 compute the hinted delay */
-unsigned int rtas_extended_busy_delay_time(int status);
-static inline int rtas_is_extended_busy(int status)
-{
- return status >= 9900 && status <= 9909;
-}
+extern unsigned int rtas_busy_delay_time(int status);
+extern unsigned int rtas_busy_delay(int status);
extern void pSeries_log_error(char *buf, unsigned int err_type, int fatal);
diff --git a/include/asm-powerpc/spu.h b/include/asm-powerpc/spu.h
index 95713f397357..9609d3ee8798 100644
--- a/include/asm-powerpc/spu.h
+++ b/include/asm-powerpc/spu.h
@@ -24,8 +24,8 @@
#define _SPU_H
#ifdef __KERNEL__
-#include <linux/kref.h>
#include <linux/workqueue.h>
+#include <linux/sysdev.h>
#define LS_SIZE (256 * 1024)
#define LS_ADDR_MASK (LS_SIZE - 1)
@@ -122,7 +122,6 @@ struct spu {
u64 flags;
u64 dar;
u64 dsisr;
- struct kref kref;
size_t ls_size;
unsigned int slb_replace;
struct mm_struct *mm;
@@ -134,7 +133,6 @@ struct spu {
int class_0_pending;
spinlock_t register_lock;
- u32 stop_code;
void (* wbox_callback)(struct spu *spu);
void (* ibox_callback)(struct spu *spu);
void (* stop_callback)(struct spu *spu);
@@ -143,6 +141,8 @@ struct spu {
char irq_c0[8];
char irq_c1[8];
char irq_c2[8];
+
+ struct sys_device sysdev;
};
struct spu *spu_alloc(void);
@@ -181,29 +181,6 @@ static inline void unregister_spu_syscalls(struct spufs_calls *calls)
#endif /* MODULE */
-/* access to priv1 registers */
-void spu_int_mask_and(struct spu *spu, int class, u64 mask);
-void spu_int_mask_or(struct spu *spu, int class, u64 mask);
-void spu_int_mask_set(struct spu *spu, int class, u64 mask);
-u64 spu_int_mask_get(struct spu *spu, int class);
-void spu_int_stat_clear(struct spu *spu, int class, u64 stat);
-u64 spu_int_stat_get(struct spu *spu, int class);
-void spu_int_route_set(struct spu *spu, u64 route);
-u64 spu_mfc_dar_get(struct spu *spu);
-u64 spu_mfc_dsisr_get(struct spu *spu);
-void spu_mfc_dsisr_set(struct spu *spu, u64 dsisr);
-void spu_mfc_sdr_set(struct spu *spu, u64 sdr);
-void spu_mfc_sr1_set(struct spu *spu, u64 sr1);
-u64 spu_mfc_sr1_get(struct spu *spu);
-void spu_mfc_tclass_id_set(struct spu *spu, u64 tclass_id);
-u64 spu_mfc_tclass_id_get(struct spu *spu);
-void spu_tlb_invalidate(struct spu *spu);
-void spu_resource_allocation_groupID_set(struct spu *spu, u64 id);
-u64 spu_resource_allocation_groupID_get(struct spu *spu);
-void spu_resource_allocation_enable_set(struct spu *spu, u64 enable);
-u64 spu_resource_allocation_enable_get(struct spu *spu);
-
-
/*
* This defines the Local Store, Problem Area and Privlege Area of an SPU.
*/
diff --git a/include/asm-powerpc/spu_csa.h b/include/asm-powerpc/spu_csa.h
index ba18d7d4dde2..964c2d38ccb7 100644
--- a/include/asm-powerpc/spu_csa.h
+++ b/include/asm-powerpc/spu_csa.h
@@ -86,10 +86,18 @@ struct spu_lscsa {
struct spu_reg128 event_mask;
struct spu_reg128 srr0;
struct spu_reg128 stopped_status;
- struct spu_reg128 pad[119]; /* 'ls' must be page-aligned. */
- unsigned char ls[LS_SIZE];
+
+ /*
+ * 'ls' must be page-aligned on all configurations.
+ * Since we don't want to rely on having the spu-gcc
+ * installed to build the kernel and this structure
+ * is used in the SPU-side code, make it 64k-page
+ * aligned for now.
+ */
+ unsigned char ls[LS_SIZE] __attribute__((aligned(65536)));
};
+#ifndef __SPU__
/*
* struct spu_problem_collapsed - condensed problem state area, w/o pads.
*/
@@ -250,6 +258,7 @@ extern int spu_restore(struct spu_state *new, struct spu *spu);
extern int spu_switch(struct spu_state *prev, struct spu_state *new,
struct spu *spu);
+#endif /* !__SPU__ */
#endif /* __KERNEL__ */
#endif /* !__ASSEMBLY__ */
#endif /* _SPU_CSA_H_ */
diff --git a/include/asm-powerpc/spu_priv1.h b/include/asm-powerpc/spu_priv1.h
new file mode 100644
index 000000000000..300c458b6d06
--- /dev/null
+++ b/include/asm-powerpc/spu_priv1.h
@@ -0,0 +1,182 @@
+/*
+ * Defines an spu hypervisor abstraction layer.
+ *
+ * Copyright 2006 Sony Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#if !defined(_SPU_PRIV1_H)
+#define _SPU_PRIV1_H
+#if defined(__KERNEL__)
+
+struct spu;
+
+/* access to priv1 registers */
+
+struct spu_priv1_ops
+{
+ void (*int_mask_and) (struct spu *spu, int class, u64 mask);
+ void (*int_mask_or) (struct spu *spu, int class, u64 mask);
+ void (*int_mask_set) (struct spu *spu, int class, u64 mask);
+ u64 (*int_mask_get) (struct spu *spu, int class);
+ void (*int_stat_clear) (struct spu *spu, int class, u64 stat);
+ u64 (*int_stat_get) (struct spu *spu, int class);
+ void (*cpu_affinity_set) (struct spu *spu, int cpu);
+ u64 (*mfc_dar_get) (struct spu *spu);
+ u64 (*mfc_dsisr_get) (struct spu *spu);
+ void (*mfc_dsisr_set) (struct spu *spu, u64 dsisr);
+ void (*mfc_sdr_set) (struct spu *spu, u64 sdr);
+ void (*mfc_sr1_set) (struct spu *spu, u64 sr1);
+ u64 (*mfc_sr1_get) (struct spu *spu);
+ void (*mfc_tclass_id_set) (struct spu *spu, u64 tclass_id);
+ u64 (*mfc_tclass_id_get) (struct spu *spu);
+ void (*tlb_invalidate) (struct spu *spu);
+ void (*resource_allocation_groupID_set) (struct spu *spu, u64 id);
+ u64 (*resource_allocation_groupID_get) (struct spu *spu);
+ void (*resource_allocation_enable_set) (struct spu *spu, u64 enable);
+ u64 (*resource_allocation_enable_get) (struct spu *spu);
+};
+
+extern const struct spu_priv1_ops* spu_priv1_ops;
+
+static inline void
+spu_int_mask_and (struct spu *spu, int class, u64 mask)
+{
+ spu_priv1_ops->int_mask_and(spu, class, mask);
+}
+
+static inline void
+spu_int_mask_or (struct spu *spu, int class, u64 mask)
+{
+ spu_priv1_ops->int_mask_or(spu, class, mask);
+}
+
+static inline void
+spu_int_mask_set (struct spu *spu, int class, u64 mask)
+{
+ spu_priv1_ops->int_mask_set(spu, class, mask);
+}
+
+static inline u64
+spu_int_mask_get (struct spu *spu, int class)
+{
+ return spu_priv1_ops->int_mask_get(spu, class);
+}
+
+static inline void
+spu_int_stat_clear (struct spu *spu, int class, u64 stat)
+{
+ spu_priv1_ops->int_stat_clear(spu, class, stat);
+}
+
+static inline u64
+spu_int_stat_get (struct spu *spu, int class)
+{
+ return spu_priv1_ops->int_stat_get (spu, class);
+}
+
+static inline void
+spu_cpu_affinity_set (struct spu *spu, int cpu)
+{
+ spu_priv1_ops->cpu_affinity_set(spu, cpu);
+}
+
+static inline u64
+spu_mfc_dar_get (struct spu *spu)
+{
+ return spu_priv1_ops->mfc_dar_get(spu);
+}
+
+static inline u64
+spu_mfc_dsisr_get (struct spu *spu)
+{
+ return spu_priv1_ops->mfc_dsisr_get(spu);
+}
+
+static inline void
+spu_mfc_dsisr_set (struct spu *spu, u64 dsisr)
+{
+ spu_priv1_ops->mfc_dsisr_set(spu, dsisr);
+}
+
+static inline void
+spu_mfc_sdr_set (struct spu *spu, u64 sdr)
+{
+ spu_priv1_ops->mfc_sdr_set(spu, sdr);
+}
+
+static inline void
+spu_mfc_sr1_set (struct spu *spu, u64 sr1)
+{
+ spu_priv1_ops->mfc_sr1_set(spu, sr1);
+}
+
+static inline u64
+spu_mfc_sr1_get (struct spu *spu)
+{
+ return spu_priv1_ops->mfc_sr1_get(spu);
+}
+
+static inline void
+spu_mfc_tclass_id_set (struct spu *spu, u64 tclass_id)
+{
+ spu_priv1_ops->mfc_tclass_id_set(spu, tclass_id);
+}
+
+static inline u64
+spu_mfc_tclass_id_get (struct spu *spu)
+{
+ return spu_priv1_ops->mfc_tclass_id_get(spu);
+}
+
+static inline void
+spu_tlb_invalidate (struct spu *spu)
+{
+ spu_priv1_ops->tlb_invalidate(spu);
+}
+
+static inline void
+spu_resource_allocation_groupID_set (struct spu *spu, u64 id)
+{
+ spu_priv1_ops->resource_allocation_groupID_set(spu, id);
+}
+
+static inline u64
+spu_resource_allocation_groupID_get (struct spu *spu)
+{
+ return spu_priv1_ops->resource_allocation_groupID_get(spu);
+}
+
+static inline void
+spu_resource_allocation_enable_set (struct spu *spu, u64 enable)
+{
+ spu_priv1_ops->resource_allocation_enable_set(spu, enable);
+}
+
+static inline u64
+spu_resource_allocation_enable_get (struct spu *spu)
+{
+ return spu_priv1_ops->resource_allocation_enable_get(spu);
+}
+
+/* The declarations folowing are put here for convenience
+ * and only intended to be used by the platform setup code
+ * for initializing spu_priv1_ops.
+ */
+
+extern const struct spu_priv1_ops spu_priv1_mmio_ops;
+
+#endif /* __KERNEL__ */
+#endif
diff --git a/include/asm-powerpc/systbl.h b/include/asm-powerpc/systbl.h
new file mode 100644
index 000000000000..eac85ce101b6
--- /dev/null
+++ b/include/asm-powerpc/systbl.h
@@ -0,0 +1,306 @@
+/*
+ * List of powerpc syscalls. For the meaning of the _SPU suffix see
+ * arch/powerpc/platforms/cell/spu_callbacks.c
+ */
+
+SYSCALL(restart_syscall)
+SYSCALL(exit)
+PPC_SYS(fork)
+SYSCALL_SPU(read)
+SYSCALL_SPU(write)
+COMPAT_SYS_SPU(open)
+SYSCALL_SPU(close)
+COMPAT_SYS_SPU(waitpid)
+COMPAT_SYS_SPU(creat)
+SYSCALL_SPU(link)
+SYSCALL_SPU(unlink)
+COMPAT_SYS(execve)
+SYSCALL_SPU(chdir)
+COMPAT_SYS_SPU(time)
+SYSCALL_SPU(mknod)
+SYSCALL_SPU(chmod)
+SYSCALL_SPU(lchown)
+SYSCALL(ni_syscall)
+OLDSYS(stat)
+SYSX_SPU(sys_lseek,ppc32_lseek,sys_lseek)
+SYSCALL_SPU(getpid)
+COMPAT_SYS(mount)
+SYSX(sys_ni_syscall,sys_oldumount,sys_oldumount)
+SYSCALL_SPU(setuid)
+SYSCALL_SPU(getuid)
+COMPAT_SYS_SPU(stime)
+COMPAT_SYS(ptrace)
+SYSCALL_SPU(alarm)
+OLDSYS(fstat)
+COMPAT_SYS(pause)
+COMPAT_SYS(utime)
+SYSCALL(ni_syscall)
+SYSCALL(ni_syscall)
+COMPAT_SYS_SPU(access)
+COMPAT_SYS_SPU(nice)
+SYSCALL(ni_syscall)
+SYSCALL_SPU(sync)
+COMPAT_SYS_SPU(kill)
+SYSCALL_SPU(rename)
+COMPAT_SYS_SPU(mkdir)
+SYSCALL_SPU(rmdir)
+SYSCALL_SPU(dup)
+SYSCALL_SPU(pipe)
+COMPAT_SYS_SPU(times)
+SYSCALL(ni_syscall)
+SYSCALL_SPU(brk)
+SYSCALL_SPU(setgid)
+SYSCALL_SPU(getgid)
+SYSCALL(signal)
+SYSCALL_SPU(geteuid)
+SYSCALL_SPU(getegid)
+SYSCALL(acct)
+SYSCALL(umount)
+SYSCALL(ni_syscall)
+COMPAT_SYS_SPU(ioctl)
+COMPAT_SYS_SPU(fcntl)
+SYSCALL(ni_syscall)
+COMPAT_SYS_SPU(setpgid)
+SYSCALL(ni_syscall)
+SYSX(sys_ni_syscall,sys_olduname, sys_olduname)
+COMPAT_SYS_SPU(umask)
+SYSCALL_SPU(chroot)
+SYSCALL(ustat)
+SYSCALL_SPU(dup2)
+SYSCALL_SPU(getppid)
+SYSCALL_SPU(getpgrp)
+SYSCALL_SPU(setsid)
+SYS32ONLY(sigaction)
+SYSCALL_SPU(sgetmask)
+COMPAT_SYS_SPU(ssetmask)
+SYSCALL_SPU(setreuid)
+SYSCALL_SPU(setregid)
+SYS32ONLY(sigsuspend)
+COMPAT_SYS(sigpending)
+COMPAT_SYS_SPU(sethostname)
+COMPAT_SYS_SPU(setrlimit)
+COMPAT_SYS(old_getrlimit)
+COMPAT_SYS_SPU(getrusage)
+COMPAT_SYS_SPU(gettimeofday)
+COMPAT_SYS_SPU(settimeofday)
+COMPAT_SYS_SPU(getgroups)
+COMPAT_SYS_SPU(setgroups)
+SYSX(sys_ni_syscall,sys_ni_syscall,ppc_select)
+SYSCALL_SPU(symlink)
+OLDSYS(lstat)
+COMPAT_SYS_SPU(readlink)
+SYSCALL(uselib)
+SYSCALL(swapon)
+SYSCALL(reboot)
+SYSX(sys_ni_syscall,old32_readdir,old_readdir)
+SYSCALL_SPU(mmap)
+SYSCALL_SPU(munmap)
+SYSCALL_SPU(truncate)
+SYSCALL_SPU(ftruncate)
+SYSCALL_SPU(fchmod)
+SYSCALL_SPU(fchown)
+COMPAT_SYS_SPU(getpriority)
+COMPAT_SYS_SPU(setpriority)
+SYSCALL(ni_syscall)
+COMPAT_SYS(statfs)
+COMPAT_SYS(fstatfs)
+SYSCALL(ni_syscall)
+COMPAT_SYS_SPU(socketcall)
+COMPAT_SYS_SPU(syslog)
+COMPAT_SYS_SPU(setitimer)
+COMPAT_SYS_SPU(getitimer)
+COMPAT_SYS_SPU(newstat)
+COMPAT_SYS_SPU(newlstat)
+COMPAT_SYS_SPU(newfstat)
+SYSX(sys_ni_syscall,sys_uname,sys_uname)
+SYSCALL(ni_syscall)
+SYSCALL_SPU(vhangup)
+SYSCALL(ni_syscall)
+SYSCALL(ni_syscall)
+COMPAT_SYS_SPU(wait4)
+SYSCALL(swapoff)
+COMPAT_SYS_SPU(sysinfo)
+COMPAT_SYS(ipc)
+SYSCALL_SPU(fsync)
+SYS32ONLY(sigreturn)
+PPC_SYS(clone)
+COMPAT_SYS_SPU(setdomainname)
+PPC_SYS_SPU(newuname)
+SYSCALL(ni_syscall)
+COMPAT_SYS_SPU(adjtimex)
+SYSCALL_SPU(mprotect)
+SYSX(sys_ni_syscall,compat_sys_sigprocmask,sys_sigprocmask)
+SYSCALL(ni_syscall)
+SYSCALL(init_module)
+SYSCALL(delete_module)
+SYSCALL(ni_syscall)
+SYSCALL(quotactl)
+COMPAT_SYS_SPU(getpgid)
+SYSCALL_SPU(fchdir)
+SYSCALL_SPU(bdflush)
+COMPAT_SYS(sysfs)
+SYSX_SPU(ppc64_personality,ppc64_personality,sys_personality)
+SYSCALL(ni_syscall)
+SYSCALL_SPU(setfsuid)
+SYSCALL_SPU(setfsgid)
+SYSCALL_SPU(llseek)
+COMPAT_SYS_SPU(getdents)
+SYSX_SPU(sys_select,ppc32_select,ppc_select)
+SYSCALL_SPU(flock)
+SYSCALL_SPU(msync)
+COMPAT_SYS_SPU(readv)
+COMPAT_SYS_SPU(writev)
+COMPAT_SYS_SPU(getsid)
+SYSCALL_SPU(fdatasync)
+COMPAT_SYS(sysctl)
+SYSCALL_SPU(mlock)
+SYSCALL_SPU(munlock)
+SYSCALL_SPU(mlockall)
+SYSCALL_SPU(munlockall)
+COMPAT_SYS_SPU(sched_setparam)
+COMPAT_SYS_SPU(sched_getparam)
+COMPAT_SYS_SPU(sched_setscheduler)
+COMPAT_SYS_SPU(sched_getscheduler)
+SYSCALL_SPU(sched_yield)
+COMPAT_SYS_SPU(sched_get_priority_max)
+COMPAT_SYS_SPU(sched_get_priority_min)
+COMPAT_SYS_SPU(sched_rr_get_interval)
+COMPAT_SYS_SPU(nanosleep)
+SYSCALL_SPU(mremap)
+SYSCALL_SPU(setresuid)
+SYSCALL_SPU(getresuid)
+SYSCALL(ni_syscall)
+SYSCALL_SPU(poll)
+COMPAT_SYS(nfsservctl)
+SYSCALL_SPU(setresgid)
+SYSCALL_SPU(getresgid)
+COMPAT_SYS_SPU(prctl)
+COMPAT_SYS(rt_sigreturn)
+COMPAT_SYS(rt_sigaction)
+COMPAT_SYS(rt_sigprocmask)
+COMPAT_SYS(rt_sigpending)
+COMPAT_SYS(rt_sigtimedwait)
+COMPAT_SYS(rt_sigqueueinfo)
+COMPAT_SYS(rt_sigsuspend)
+COMPAT_SYS_SPU(pread64)
+COMPAT_SYS_SPU(pwrite64)
+SYSCALL_SPU(chown)
+SYSCALL_SPU(getcwd)
+SYSCALL_SPU(capget)
+SYSCALL_SPU(capset)
+COMPAT_SYS(sigaltstack)
+SYSX_SPU(sys_sendfile64,compat_sys_sendfile,sys_sendfile)
+SYSCALL(ni_syscall)
+SYSCALL(ni_syscall)
+PPC_SYS(vfork)
+COMPAT_SYS_SPU(getrlimit)
+COMPAT_SYS_SPU(readahead)
+SYS32ONLY(mmap2)
+SYS32ONLY(truncate64)
+SYS32ONLY(ftruncate64)
+SYSX(sys_ni_syscall,sys_stat64,sys_stat64)
+SYSX(sys_ni_syscall,sys_lstat64,sys_lstat64)
+SYSX(sys_ni_syscall,sys_fstat64,sys_fstat64)
+SYSCALL(pciconfig_read)
+SYSCALL(pciconfig_write)
+SYSCALL(pciconfig_iobase)
+SYSCALL(ni_syscall)
+SYSCALL_SPU(getdents64)
+SYSCALL_SPU(pivot_root)
+SYSX(sys_ni_syscall,compat_sys_fcntl64,sys_fcntl64)
+SYSCALL_SPU(madvise)
+SYSCALL_SPU(mincore)
+SYSCALL_SPU(gettid)
+SYSCALL_SPU(tkill)
+SYSCALL_SPU(setxattr)
+SYSCALL_SPU(lsetxattr)
+SYSCALL_SPU(fsetxattr)
+SYSCALL_SPU(getxattr)
+SYSCALL_SPU(lgetxattr)
+SYSCALL_SPU(fgetxattr)
+SYSCALL_SPU(listxattr)
+SYSCALL_SPU(llistxattr)
+SYSCALL_SPU(flistxattr)
+SYSCALL_SPU(removexattr)
+SYSCALL_SPU(lremovexattr)
+SYSCALL_SPU(fremovexattr)
+COMPAT_SYS_SPU(futex)
+COMPAT_SYS_SPU(sched_setaffinity)
+COMPAT_SYS_SPU(sched_getaffinity)
+SYSCALL(ni_syscall)
+SYSCALL(ni_syscall)
+SYS32ONLY(sendfile64)
+COMPAT_SYS_SPU(io_setup)
+SYSCALL_SPU(io_destroy)
+COMPAT_SYS_SPU(io_getevents)
+COMPAT_SYS_SPU(io_submit)
+SYSCALL_SPU(io_cancel)
+SYSCALL(set_tid_address)
+SYSX_SPU(sys_fadvise64,ppc32_fadvise64,sys_fadvise64)
+SYSCALL(exit_group)
+SYSX(sys_lookup_dcookie,ppc32_lookup_dcookie,sys_lookup_dcookie)
+SYSCALL_SPU(epoll_create)
+SYSCALL_SPU(epoll_ctl)
+SYSCALL_SPU(epoll_wait)
+SYSCALL_SPU(remap_file_pages)
+SYSX_SPU(sys_timer_create,compat_sys_timer_create,sys_timer_create)
+COMPAT_SYS_SPU(timer_settime)
+COMPAT_SYS_SPU(timer_gettime)
+SYSCALL_SPU(timer_getoverrun)
+SYSCALL_SPU(timer_delete)
+COMPAT_SYS_SPU(clock_settime)
+COMPAT_SYS_SPU(clock_gettime)
+COMPAT_SYS_SPU(clock_getres)
+COMPAT_SYS_SPU(clock_nanosleep)
+SYSX(ppc64_swapcontext,ppc32_swapcontext,ppc_swapcontext)
+COMPAT_SYS_SPU(tgkill)
+COMPAT_SYS_SPU(utimes)
+COMPAT_SYS_SPU(statfs64)
+COMPAT_SYS_SPU(fstatfs64)
+SYSX(sys_ni_syscall, ppc_fadvise64_64, ppc_fadvise64_64)
+PPC_SYS_SPU(rtas)
+OLDSYS(debug_setcontext)
+SYSCALL(ni_syscall)
+SYSCALL(ni_syscall)
+COMPAT_SYS(mbind)
+COMPAT_SYS(get_mempolicy)
+COMPAT_SYS(set_mempolicy)
+COMPAT_SYS(mq_open)
+SYSCALL(mq_unlink)
+COMPAT_SYS(mq_timedsend)
+COMPAT_SYS(mq_timedreceive)
+COMPAT_SYS(mq_notify)
+COMPAT_SYS(mq_getsetattr)
+COMPAT_SYS(kexec_load)
+COMPAT_SYS(add_key)
+COMPAT_SYS(request_key)
+COMPAT_SYS(keyctl)
+COMPAT_SYS(waitid)
+COMPAT_SYS(ioprio_set)
+COMPAT_SYS(ioprio_get)
+SYSCALL(inotify_init)
+SYSCALL(inotify_add_watch)
+SYSCALL(inotify_rm_watch)
+SYSCALL(spu_run)
+SYSCALL(spu_create)
+COMPAT_SYS(pselect6)
+COMPAT_SYS(ppoll)
+SYSCALL_SPU(unshare)
+SYSCALL_SPU(splice)
+SYSCALL_SPU(tee)
+SYSCALL_SPU(vmsplice)
+COMPAT_SYS_SPU(openat)
+SYSCALL_SPU(mkdirat)
+SYSCALL_SPU(mknodat)
+SYSCALL_SPU(fchownat)
+COMPAT_SYS_SPU(futimesat)
+SYSX_SPU(sys_newfstatat, sys_fstatat64, sys_fstatat64)
+SYSCALL_SPU(unlinkat)
+SYSCALL_SPU(renameat)
+SYSCALL_SPU(linkat)
+SYSCALL_SPU(symlinkat)
+SYSCALL_SPU(readlinkat)
+SYSCALL_SPU(fchmodat)
+SYSCALL_SPU(faccessat)
+COMPAT_SYS_SPU(get_robust_list)
+COMPAT_SYS_SPU(set_robust_list)
diff --git a/include/asm-powerpc/tce.h b/include/asm-powerpc/tce.h
index 6fa200ad7a7f..c9483adbf599 100644
--- a/include/asm-powerpc/tce.h
+++ b/include/asm-powerpc/tce.h
@@ -35,32 +35,15 @@
#define TCE_PAGE_SIZE (1 << TCE_SHIFT)
#define TCE_PAGE_FACTOR (PAGE_SHIFT - TCE_SHIFT)
-
-/* tce_entry
- * Used by pSeries (SMP) and iSeries/pSeries LPAR, but there it's
- * abstracted so layout is irrelevant.
- */
-union tce_entry {
- unsigned long te_word;
- struct {
- unsigned int tb_cacheBits :6; /* Cache hash bits - not used */
- unsigned int tb_rsvd :6;
- unsigned long tb_rpn :40; /* Real page number */
- unsigned int tb_valid :1; /* Tce is valid (vb only) */
- unsigned int tb_allio :1; /* Tce is valid for all lps (vb only) */
- unsigned int tb_lpindex :8; /* LpIndex for user of TCE (vb only) */
- unsigned int tb_pciwr :1; /* Write allowed (pci only) */
- unsigned int tb_rdwr :1; /* Read allowed (pci), Write allowed (vb) */
- } te_bits;
-#define te_cacheBits te_bits.tb_cacheBits
-#define te_rpn te_bits.tb_rpn
-#define te_valid te_bits.tb_valid
-#define te_allio te_bits.tb_allio
-#define te_lpindex te_bits.tb_lpindex
-#define te_pciwr te_bits.tb_pciwr
-#define te_rdwr te_bits.tb_rdwr
-};
-
+#define TCE_ENTRY_SIZE 8 /* each TCE is 64 bits */
+
+#define TCE_RPN_MASK 0xfffffffffful /* 40-bit RPN (4K pages) */
+#define TCE_RPN_SHIFT 12
+#define TCE_VALID 0x800 /* TCE valid */
+#define TCE_ALLIO 0x400 /* TCE valid for all lpars */
+#define TCE_PCI_WRITE 0x2 /* write from PCI allowed */
+#define TCE_PCI_READ 0x1 /* read from PCI allowed */
+#define TCE_VB_WRITE 0x1 /* write from VB allowed */
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_TCE_H */
diff --git a/include/asm-powerpc/topology.h b/include/asm-powerpc/topology.h
index 19c575f39164..92f3e5507d22 100644
--- a/include/asm-powerpc/topology.h
+++ b/include/asm-powerpc/topology.h
@@ -31,8 +31,13 @@ static inline int node_to_first_cpu(int node)
int of_node_to_nid(struct device_node *device);
-#define pcibus_to_node(node) (-1)
-#define pcibus_to_cpumask(bus) (cpu_online_map)
+struct pci_bus;
+extern int pcibus_to_node(struct pci_bus *bus);
+
+#define pcibus_to_cpumask(bus) (pcibus_to_node(bus) == -1 ? \
+ CPU_MASK_ALL : \
+ node_to_cpumask(pcibus_to_node(bus)) \
+ )
/* sched_domains SD_NODE_INIT for PPC64 machines */
#define SD_NODE_INIT (struct sched_domain) { \
diff --git a/include/asm-powerpc/udbg.h b/include/asm-powerpc/udbg.h
index 5c4236c342bb..19a1517ac43b 100644
--- a/include/asm-powerpc/udbg.h
+++ b/include/asm-powerpc/udbg.h
@@ -23,7 +23,8 @@ extern int udbg_write(const char *s, int n);
extern int udbg_read(char *buf, int buflen);
extern void register_early_udbg_console(void);
-extern void udbg_printf(const char *fmt, ...);
+extern void udbg_printf(const char *fmt, ...)
+ __attribute__ ((format (printf, 1, 2)));
extern void udbg_progress(char *s, unsigned short hex);
extern void udbg_init_uart(void __iomem *comport, unsigned int speed,
diff --git a/include/asm-powerpc/vio.h b/include/asm-powerpc/vio.h
index be14c59846f9..dc9bd101ca14 100644
--- a/include/asm-powerpc/vio.h
+++ b/include/asm-powerpc/vio.h
@@ -63,32 +63,22 @@ struct vio_driver {
struct device_driver driver;
};
-struct vio_bus_ops {
- int (*match)(const struct vio_device_id *id, const struct vio_dev *dev);
- void (*unregister_device)(struct vio_dev *);
- void (*release_device)(struct device *);
-};
-
extern struct dma_mapping_ops vio_dma_ops;
extern struct bus_type vio_bus_type;
-extern struct vio_dev vio_bus_device;
extern int vio_register_driver(struct vio_driver *drv);
extern void vio_unregister_driver(struct vio_driver *drv);
-extern struct vio_dev * __devinit vio_register_device(struct vio_dev *viodev);
extern void __devinit vio_unregister_device(struct vio_dev *dev);
-extern int vio_bus_init(struct vio_bus_ops *);
-
-#ifdef CONFIG_PPC_PSERIES
struct device_node;
extern struct vio_dev * __devinit vio_register_device_node(
struct device_node *node_vdev);
-extern struct vio_dev *vio_find_node(struct device_node *vnode);
-extern const void *vio_get_attribute(struct vio_dev *vdev, void *which,
+extern const void *vio_get_attribute(struct vio_dev *vdev, char *which,
int *length);
+#ifdef CONFIG_PPC_PSERIES
+extern struct vio_dev *vio_find_node(struct device_node *vnode);
extern int vio_enable_interrupts(struct vio_dev *dev);
extern int vio_disable_interrupts(struct vio_dev *dev);
#endif
diff --git a/include/asm-ppc/mmu.h b/include/asm-ppc/mmu.h
index 0a70b05b3afb..14584e505ed5 100644
--- a/include/asm-ppc/mmu.h
+++ b/include/asm-ppc/mmu.h
@@ -23,25 +23,18 @@ extern phys_addr_t fixup_bigphys_addr(phys_addr_t, phys_addr_t);
#define PHYS_FMT "%16Lx"
#endif
-/* Default "unsigned long" context */
-typedef unsigned long mm_context_t;
+typedef struct {
+ unsigned long id;
+ unsigned long vdso_base;
+} mm_context_t;
/* Hardware Page Table Entry */
typedef struct _PTE {
-#ifdef CONFIG_PPC64BRIDGE
- unsigned long long vsid:52;
- unsigned long api:5;
- unsigned long :5;
- unsigned long h:1;
- unsigned long v:1;
- unsigned long long rpn:52;
-#else /* CONFIG_PPC64BRIDGE */
unsigned long v:1; /* Entry is valid */
unsigned long vsid:24; /* Virtual segment identifier */
unsigned long h:1; /* Hash algorithm indicator */
unsigned long api:6; /* Abbreviated page index */
unsigned long rpn:20; /* Real (physical) page number */
-#endif /* CONFIG_PPC64BRIDGE */
unsigned long :3; /* Unused */
unsigned long r:1; /* Referenced */
unsigned long c:1; /* Changed */
@@ -82,11 +75,7 @@ typedef struct _P601_BATU { /* Upper part of BAT for 601 processor */
} P601_BATU;
typedef struct _BATU { /* Upper part of BAT (all except 601) */
-#ifdef CONFIG_PPC64BRIDGE
- unsigned long long bepi:47;
-#else /* CONFIG_PPC64BRIDGE */
unsigned long bepi:15; /* Effective page index (virtual address) */
-#endif /* CONFIG_PPC64BRIDGE */
unsigned long :4; /* Unused */
unsigned long bl:11; /* Block size mask */
unsigned long vs:1; /* Supervisor valid */
@@ -101,11 +90,7 @@ typedef struct _P601_BATL { /* Lower part of BAT for 601 processor */
} P601_BATL;
typedef struct _BATL { /* Lower part of BAT (all except 601) */
-#ifdef CONFIG_PPC64BRIDGE
- unsigned long long brpn:47;
-#else /* CONFIG_PPC64BRIDGE */
unsigned long brpn:15; /* Real page index (physical address) */
-#endif /* CONFIG_PPC64BRIDGE */
unsigned long :10; /* Unused */
unsigned long w:1; /* Write-thru cache */
unsigned long i:1; /* Cache inhibit */
diff --git a/include/asm-ppc/mmu_context.h b/include/asm-ppc/mmu_context.h
index 94f2bf71310d..2bc8589cc451 100644
--- a/include/asm-ppc/mmu_context.h
+++ b/include/asm-ppc/mmu_context.h
@@ -70,7 +70,7 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
#else
/* PPC 6xx, 7xx CPUs */
-#define NO_CONTEXT ((mm_context_t) -1)
+#define NO_CONTEXT ((unsigned long) -1)
#define LAST_CONTEXT 32767
#define FIRST_CONTEXT 1
#endif
@@ -85,7 +85,7 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
* can be used for debugging on all processors (if you happen to have
* an Abatron).
*/
-extern void set_context(mm_context_t context, pgd_t *pgd);
+extern void set_context(unsigned long contextid, pgd_t *pgd);
/*
* Bitmap of contexts in use.
@@ -98,7 +98,7 @@ extern unsigned long context_map[];
* Its use is an optimization only, we can't rely on this context
* number to be free, but it usually will be.
*/
-extern mm_context_t next_mmu_context;
+extern unsigned long next_mmu_context;
/*
* If we don't have sufficient contexts to give one to every task
@@ -117,9 +117,9 @@ extern void steal_context(void);
*/
static inline void get_mmu_context(struct mm_struct *mm)
{
- mm_context_t ctx;
+ unsigned long ctx;
- if (mm->context != NO_CONTEXT)
+ if (mm->context.id != NO_CONTEXT)
return;
#ifdef FEW_CONTEXTS
while (atomic_dec_if_positive(&nr_free_contexts) < 0)
@@ -132,7 +132,7 @@ static inline void get_mmu_context(struct mm_struct *mm)
ctx = 0;
}
next_mmu_context = (ctx + 1) & LAST_CONTEXT;
- mm->context = ctx;
+ mm->context.id = ctx;
#ifdef FEW_CONTEXTS
context_mm[ctx] = mm;
#endif
@@ -141,7 +141,12 @@ static inline void get_mmu_context(struct mm_struct *mm)
/*
* Set up the context for a new address space.
*/
-#define init_new_context(tsk,mm) (((mm)->context = NO_CONTEXT), 0)
+static inline int init_new_context(struct task_struct *t, struct mm_struct *mm)
+{
+ mm->context.id = NO_CONTEXT;
+ mm->context.vdso_base = 0;
+ return 0;
+}
/*
* We're finished using the context for an address space.
@@ -149,9 +154,9 @@ static inline void get_mmu_context(struct mm_struct *mm)
static inline void destroy_context(struct mm_struct *mm)
{
preempt_disable();
- if (mm->context != NO_CONTEXT) {
- clear_bit(mm->context, context_map);
- mm->context = NO_CONTEXT;
+ if (mm->context.id != NO_CONTEXT) {
+ clear_bit(mm->context.id, context_map);
+ mm->context.id = NO_CONTEXT;
#ifdef FEW_CONTEXTS
atomic_inc(&nr_free_contexts);
#endif
@@ -179,7 +184,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
/* Setup new userspace context */
get_mmu_context(next);
- set_context(next->context, next->pgd);
+ set_context(next->context.id, next->pgd);
}
#define deactivate_mm(tsk,mm) do { } while (0)
diff --git a/include/asm-ppc/mpc85xx.h b/include/asm-ppc/mpc85xx.h
index c25bdd9debf8..9b4851199c76 100644
--- a/include/asm-ppc/mpc85xx.h
+++ b/include/asm-ppc/mpc85xx.h
@@ -27,6 +27,9 @@
#if defined(CONFIG_MPC8555_CDS) || defined(CONFIG_MPC8548_CDS)
#include <platforms/85xx/mpc8555_cds.h>
#endif
+#ifdef CONFIG_MPC85xx_CDS
+#include <platforms/85xx/mpc85xx_cds.h>
+#endif
#ifdef CONFIG_MPC8560_ADS
#include <platforms/85xx/mpc8560_ads.h>
#endif
diff --git a/include/asm-ppc/pgtable.h b/include/asm-ppc/pgtable.h
index 9cb83679836c..51fa7c662917 100644
--- a/include/asm-ppc/pgtable.h
+++ b/include/asm-ppc/pgtable.h
@@ -662,7 +662,7 @@ static inline int __ptep_test_and_clear_young(unsigned int context, unsigned lon
return (old & _PAGE_ACCESSED) != 0;
}
#define ptep_test_and_clear_young(__vma, __addr, __ptep) \
- __ptep_test_and_clear_young((__vma)->vm_mm->context, __addr, __ptep)
+ __ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep)
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma,
diff --git a/include/asm-s390/irq.h b/include/asm-s390/irq.h
index 916a1aa0b073..bd1a721f7aa2 100644
--- a/include/asm-s390/irq.h
+++ b/include/asm-s390/irq.h
@@ -21,10 +21,6 @@ enum interruption_class {
#define touch_nmi_watchdog() do { } while(0)
-struct irqaction;
-struct pt_regs;
-int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *);
-
#endif /* __KERNEL__ */
#endif
diff --git a/include/asm-sparc/irq.h b/include/asm-sparc/irq.h
index f2d64537e29d..3141ddfea97d 100644
--- a/include/asm-sparc/irq.h
+++ b/include/asm-sparc/irq.h
@@ -181,8 +181,4 @@ extern struct sun4m_intregs *sun4m_interrupts;
#define SUN4M_INT_SBUS(x) (1 << (x+7))
#define SUN4M_INT_VME(x) (1 << (x))
-struct irqaction;
-struct pt_regs;
-int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *);
-
#endif
diff --git a/include/asm-v850/irq.h b/include/asm-v850/irq.h
index 44431152b36d..1bf096db8f4c 100644
--- a/include/asm-v850/irq.h
+++ b/include/asm-v850/irq.h
@@ -62,8 +62,6 @@ extern void disable_irq (unsigned int irq);
/* Disable an irq without waiting. */
extern void disable_irq_nosync (unsigned int irq);
-extern int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *);
-
#endif /* !__ASSEMBLY__ */
#endif /* __V850_IRQ_H__ */
diff --git a/include/asm-x86_64/acpi.h b/include/asm-x86_64/acpi.h
index aa1c7b2e438c..2c95a319c056 100644
--- a/include/asm-x86_64/acpi.h
+++ b/include/asm-x86_64/acpi.h
@@ -162,6 +162,8 @@ extern int acpi_pci_disabled;
extern u8 x86_acpiid_to_apicid[];
+#define ARCH_HAS_POWER_INIT 1
+
extern int acpi_skip_timer_override;
#endif /*__KERNEL__*/
diff --git a/include/asm-x86_64/apicdef.h b/include/asm-x86_64/apicdef.h
index 5a48e9bcf218..1dd40067c67c 100644
--- a/include/asm-x86_64/apicdef.h
+++ b/include/asm-x86_64/apicdef.h
@@ -137,8 +137,6 @@
*/
#define u32 unsigned int
-#define lapic ((volatile struct local_apic *)APIC_BASE)
-
struct local_apic {
/*000*/ struct { u32 __reserved[4]; } __reserved_01;
diff --git a/include/asm-x86_64/mmzone.h b/include/asm-x86_64/mmzone.h
index 70bb9969766e..c38ebdf6f426 100644
--- a/include/asm-x86_64/mmzone.h
+++ b/include/asm-x86_64/mmzone.h
@@ -42,7 +42,6 @@ static inline __attribute__((pure)) int phys_to_nid(unsigned long addr)
#ifdef CONFIG_DISCONTIGMEM
#define pfn_to_nid(pfn) phys_to_nid((unsigned long)(pfn) << PAGE_SHIFT)
-#define kvaddr_to_nid(kaddr) phys_to_nid(__pa(kaddr))
extern int pfn_valid(unsigned long pfn);
#endif
diff --git a/include/asm-x86_64/numa.h b/include/asm-x86_64/numa.h
index 1cc92fe02503..933ff11ece15 100644
--- a/include/asm-x86_64/numa.h
+++ b/include/asm-x86_64/numa.h
@@ -8,7 +8,6 @@ struct bootnode {
};
extern int compute_hash_shift(struct bootnode *nodes, int numnodes);
-extern int pxm_to_node(int nid);
#define ZONE_ALIGN (1UL << (MAX_ORDER+PAGE_SHIFT))
diff --git a/include/asm-x86_64/unistd.h b/include/asm-x86_64/unistd.h
index 0aff22bdbb21..94387c915e53 100644
--- a/include/asm-x86_64/unistd.h
+++ b/include/asm-x86_64/unistd.h
@@ -617,10 +617,12 @@ __SYSCALL(__NR_tee, sys_tee)
__SYSCALL(__NR_sync_file_range, sys_sync_file_range)
#define __NR_vmsplice 278
__SYSCALL(__NR_vmsplice, sys_vmsplice)
+#define __NR_move_pages 279
+__SYSCALL(__NR_move_pages, sys_move_pages)
#ifdef __KERNEL__
-#define __NR_syscall_max __NR_vmsplice
+#define __NR_syscall_max __NR_move_pages
#ifndef __NO_STUBS
diff --git a/include/asm-xtensa/checksum.h b/include/asm-xtensa/checksum.h
index bdc00ae9be48..03114f8d1e18 100644
--- a/include/asm-xtensa/checksum.h
+++ b/include/asm-xtensa/checksum.h
@@ -43,8 +43,7 @@ asmlinkage unsigned int csum_partial_copy_generic( const char *src, char *dst, i
* Note: when you get a NULL pointer exception here this means someone
* passed in an incorrect kernel address to one of these functions.
*
- * If you use these functions directly please don't forget the
- * verify_area().
+ * If you use these functions directly please don't forget the access_ok().
*/
static inline
unsigned int csum_partial_copy_nocheck ( const char *src, char *dst,
diff --git a/include/asm-xtensa/rwsem.h b/include/asm-xtensa/rwsem.h
index 3c02b0e033f0..abcd86dc5ab9 100644
--- a/include/asm-xtensa/rwsem.h
+++ b/include/asm-xtensa/rwsem.h
@@ -172,4 +172,9 @@ static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
return atomic_add_return(delta, (atomic_t *)(&sem->count));
}
-#endif /* _XTENSA_RWSEM_XADD_H */
+static inline int rwsem_is_locked(struct rw_semaphore *sem)
+{
+ return (sem->count != 0);
+}
+
+#endif /* _XTENSA_RWSEM_H */
diff --git a/include/asm-xtensa/uaccess.h b/include/asm-xtensa/uaccess.h
index 06a22b83ba17..88a64e1144d5 100644
--- a/include/asm-xtensa/uaccess.h
+++ b/include/asm-xtensa/uaccess.h
@@ -154,35 +154,6 @@
.Laccess_ok_\@:
.endm
-/*
- * verify_area determines whether a memory access is allowed. It's
- * mostly an unnecessary wrapper for access_ok, but we provide it as a
- * duplicate of the verify_area() C inline function below. See the
- * equivalent C version below for clarity.
- *
- * On error, verify_area branches to a label indicated by parameter
- * <error>. This implies that the macro falls through to the next
- * instruction on success.
- *
- * Note that we assume success is the common case, and we optimize the
- * branch fall-through case on success.
- *
- * On Entry:
- * <aa> register containing memory address
- * <as> register containing memory size
- * <at> temp register
- * <error> label to branch to on error; implies fall-through
- * macro on success
- * On Exit:
- * <aa> preserved
- * <as> preserved
- * <at> destroyed
- */
- .macro verify_area aa, as, at, sp, error
- access_ok \at, \aa, \as, \sp, \error
- .endm
-
-
#else /* __ASSEMBLY__ not defined */
#include <linux/sched.h>
@@ -211,11 +182,6 @@
#define __access_ok(addr,size) (__kernel_ok || __user_ok((addr),(size)))
#define access_ok(type,addr,size) __access_ok((unsigned long)(addr),(size))
-static inline int verify_area(int type, const void * addr, unsigned long size)
-{
- return access_ok(type,addr,size) ? 0 : -EFAULT;
-}
-
/*
* These are the main single-value transfer routines. They
* automatically use the right size if we just have the right pointer
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 1cf0b91d05bd..90d6df1551ed 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -37,6 +37,7 @@
#include <acpi/acpi.h>
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
+#include <acpi/acpi_numa.h>
#include <asm/acpi.h>
@@ -407,10 +408,18 @@ void acpi_table_print_madt_entry (acpi_table_entry_header *madt);
void acpi_table_print_srat_entry (acpi_table_entry_header *srat);
/* the following four functions are architecture-dependent */
+#ifdef CONFIG_HAVE_ARCH_PARSE_SRAT
+#define NR_NODE_MEMBLKS MAX_NUMNODES
+#define acpi_numa_slit_init(slit) do {} while (0)
+#define acpi_numa_processor_affinity_init(pa) do {} while (0)
+#define acpi_numa_memory_affinity_init(ma) do {} while (0)
+#define acpi_numa_arch_fixup() do {} while (0)
+#else
void acpi_numa_slit_init (struct acpi_table_slit *slit);
void acpi_numa_processor_affinity_init (struct acpi_table_processor_affinity *pa);
void acpi_numa_memory_affinity_init (struct acpi_table_memory_affinity *ma);
void acpi_numa_arch_fixup(void);
+#endif
#ifdef CONFIG_ACPI_HOTPLUG_CPU
/* Arch dependent functions for cpu hotplug support */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 3457e7b97363..aafe82788b4e 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -151,11 +151,9 @@ struct request {
void *elevator_private;
void *completion_data;
- unsigned short ioprio;
-
int rq_status; /* should split this into a few status bits */
- struct gendisk *rq_disk;
int errors;
+ struct gendisk *rq_disk;
unsigned long start_time;
/* Number of scatter-gather DMA addr+len pairs after
@@ -170,8 +168,9 @@ struct request {
*/
unsigned short nr_hw_segments;
+ unsigned short ioprio;
+
int tag;
- char *buffer;
int ref_count;
request_queue_t *q;
@@ -179,6 +178,7 @@ struct request {
struct completion *waiting;
void *special;
+ char *buffer;
/*
* when request is used as a packet command carrier
@@ -187,20 +187,14 @@ struct request {
unsigned char cmd[BLK_MAX_CDB];
unsigned int data_len;
- void *data;
-
unsigned int sense_len;
+ void *data;
void *sense;
unsigned int timeout;
int retries;
/*
- * For Power Management requests
- */
- struct request_pm_state *pm;
-
- /*
* completion callback. end_io_data should be folded in with waiting
*/
rq_end_io_fn *end_io;
@@ -241,6 +235,7 @@ enum rq_flag_bits {
__REQ_PM_RESUME, /* resume request */
__REQ_PM_SHUTDOWN, /* shutdown request */
__REQ_ORDERED_COLOR, /* is before or after barrier */
+ __REQ_RW_SYNC, /* request is sync (O_DIRECT) */
__REQ_NR_BITS, /* stops here */
};
@@ -270,6 +265,7 @@ enum rq_flag_bits {
#define REQ_PM_RESUME (1 << __REQ_PM_RESUME)
#define REQ_PM_SHUTDOWN (1 << __REQ_PM_SHUTDOWN)
#define REQ_ORDERED_COLOR (1 << __REQ_ORDERED_COLOR)
+#define REQ_RW_SYNC (1 << __REQ_RW_SYNC)
/*
* State information carried for REQ_PM_SUSPEND and REQ_PM_RESUME
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
index eb1a867ed245..a7e8cef73d15 100644
--- a/include/linux/blktrace_api.h
+++ b/include/linux/blktrace_api.h
@@ -90,9 +90,9 @@ struct blk_io_trace {
* The remap event
*/
struct blk_io_trace_remap {
- u32 device;
+ __be32 device;
u32 __pad;
- u64 sector;
+ __be64 sector;
};
enum {
@@ -224,7 +224,7 @@ static inline void blk_add_trace_pdu_int(struct request_queue *q, u32 what,
struct bio *bio, unsigned int pdu)
{
struct blk_trace *bt = q->blk_trace;
- u64 rpdu = cpu_to_be64(pdu);
+ __be64 rpdu = cpu_to_be64(pdu);
if (likely(!bt))
return;
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index da2d107fe2cf..22866fa2d960 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -91,8 +91,8 @@ static inline void *alloc_remap(int nid, unsigned long size)
}
#endif
-extern unsigned long __initdata nr_kernel_pages;
-extern unsigned long __initdata nr_all_pages;
+extern unsigned long nr_kernel_pages;
+extern unsigned long nr_all_pages;
extern void *__init alloc_large_system_hash(const char *tablename,
unsigned long bucketsize,
diff --git a/include/linux/cn_proc.h b/include/linux/cn_proc.h
index 1417de935057..dbb7769009be 100644
--- a/include/linux/cn_proc.h
+++ b/include/linux/cn_proc.h
@@ -3,31 +3,22 @@
*
* Copyright (C) Matt Helsley, IBM Corp. 2005
* Based on cn_fork.h by Nguyen Anh Quynh and Guillaume Thouvenin
- * Original copyright notice follows:
* Copyright (C) 2005 Nguyen Anh Quynh <aquynh@gmail.com>
* Copyright (C) 2005 Guillaume Thouvenin <guillaume.thouvenin@bull.net>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2.1 of the GNU Lesser General Public License
+ * as published by the Free Software Foundation.
*
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*/
#ifndef CN_PROC_H
#define CN_PROC_H
#include <linux/types.h>
-#include <linux/time.h>
-#include <linux/connector.h>
/*
* Userspace sends this enum to register with the kernel that it is listening
diff --git a/include/linux/coda_linux.h b/include/linux/coda_linux.h
index b3ecf8f71d97..7b5c5df5cb69 100644
--- a/include/linux/coda_linux.h
+++ b/include/linux/coda_linux.h
@@ -36,7 +36,7 @@ extern const struct file_operations coda_ioctl_operations;
/* operations shared over more than one file */
int coda_open(struct inode *i, struct file *f);
-int coda_flush(struct file *f);
+int coda_flush(struct file *f, fl_owner_t id);
int coda_release(struct inode *i, struct file *f);
int coda_permission(struct inode *inode, int mask, struct nameidata *nd);
int coda_revalidate_inode(struct dentry *);
diff --git a/include/linux/coda_psdev.h b/include/linux/coda_psdev.h
index d539262a8f89..98f6c52c152b 100644
--- a/include/linux/coda_psdev.h
+++ b/include/linux/coda_psdev.h
@@ -70,7 +70,7 @@ int venus_pioctl(struct super_block *sb, struct CodaFid *fid,
unsigned int cmd, struct PioctlData *data);
int coda_downcall(int opcode, union outputArgs *out, struct super_block *sb);
int venus_fsync(struct super_block *sb, struct CodaFid *fid);
-int venus_statfs(struct super_block *sb, struct kstatfs *sfs);
+int venus_statfs(struct dentry *dentry, struct kstatfs *sfs);
/* messages between coda filesystem in kernel and Venus */
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 5aa95011f7e6..466fbe9e4899 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -72,6 +72,8 @@ struct cpufreq_real_policy {
struct cpufreq_policy {
cpumask_t cpus; /* affected CPUs */
+ unsigned int shared_type; /* ANY or ALL affected CPUs
+ should set cpufreq */
unsigned int cpu; /* cpu nr of registered CPU */
struct cpufreq_cpuinfo cpuinfo;/* see above */
@@ -98,6 +100,8 @@ struct cpufreq_policy {
#define CPUFREQ_INCOMPATIBLE (1)
#define CPUFREQ_NOTIFY (2)
+#define CPUFREQ_SHARED_TYPE_ALL (0) /* All dependent CPUs should set freq */
+#define CPUFREQ_SHARED_TYPE_ANY (1) /* Freq can be set from any dependent CPU */
/******************** cpufreq transition notifiers *******************/
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 46d0e079735d..0dd1610a94a9 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -217,7 +217,6 @@ extern struct dentry * d_alloc_anon(struct inode *);
extern struct dentry * d_splice_alias(struct inode *, struct dentry *);
extern void shrink_dcache_sb(struct super_block *);
extern void shrink_dcache_parent(struct dentry *);
-extern void shrink_dcache_anon(struct super_block *);
extern int d_invalidate(struct dentry *);
/* only used at mount-time */
diff --git a/include/linux/delay.h b/include/linux/delay.h
index acb74865b973..17ddb55430ae 100644
--- a/include/linux/delay.h
+++ b/include/linux/delay.h
@@ -25,10 +25,7 @@ extern unsigned long loops_per_jiffy;
#define MAX_UDELAY_MS 5
#endif
-#ifdef notdef
-#define mdelay(n) (\
- {unsigned long __ms=(n); while (__ms--) udelay(1000);})
-#else
+#ifndef mdelay
#define mdelay(n) (\
(__builtin_constant_p(n) && (n)<=MAX_UDELAY_MS) ? udelay((n)*1000) : \
({unsigned long __ms=(n); while (__ms--) udelay(1000);}))
diff --git a/include/linux/efi.h b/include/linux/efi.h
index e203613d3aec..66d621dbcb6c 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -294,6 +294,7 @@ extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if pos
extern u64 efi_get_iobase (void);
extern u32 efi_mem_type (unsigned long phys_addr);
extern u64 efi_mem_attributes (unsigned long phys_addr);
+extern u64 efi_mem_attribute (unsigned long phys_addr, unsigned long size);
extern int efi_mem_attribute_range (unsigned long phys_addr, unsigned long size,
u64 attr);
extern int __init efi_uart_console_only (void);
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index cf2abeca92a0..c6310aef5ab0 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -411,6 +411,8 @@ struct ethtool_ops {
#define ETHTOOL_GPERMADDR 0x00000020 /* Get permanent hardware address */
#define ETHTOOL_GUFO 0x00000021 /* Get UFO enable (ethtool_value) */
#define ETHTOOL_SUFO 0x00000022 /* Set UFO enable (ethtool_value) */
+#define ETHTOOL_GGSO 0x00000023 /* Get GSO enable (ethtool_value) */
+#define ETHTOOL_SGSO 0x00000024 /* Set GSO enable (ethtool_value) */
/* compatibility with older code */
#define SPARC_ETH_GSET ETHTOOL_GSET
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 73c7d6f04b31..dba4cbd157ee 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -376,7 +376,8 @@ struct address_space_operations {
struct page* (*get_xip_page)(struct address_space *, sector_t,
int);
/* migrate the contents of a page to the specified target */
- int (*migratepage) (struct page *, struct page *);
+ int (*migratepage) (struct address_space *,
+ struct page *, struct page *);
};
struct backing_dev_info;
@@ -682,6 +683,7 @@ extern spinlock_t files_lock;
#define FL_FLOCK 2
#define FL_ACCESS 8 /* not trying to lock, just looking */
#define FL_LEASE 32 /* lease held on this file */
+#define FL_CLOSE 64 /* unlock on close */
#define FL_SLEEP 128 /* A blocking lock */
/*
@@ -774,7 +776,6 @@ extern int posix_lock_file_conf(struct file *, struct file_lock *, struct file_l
extern int posix_lock_file(struct file *, struct file_lock *);
extern int posix_lock_file_wait(struct file *, struct file_lock *);
extern int posix_unblock_lock(struct file *, struct file_lock *);
-extern int posix_locks_deadlock(struct file_lock *, struct file_lock *);
extern int flock_lock_file_wait(struct file *filp, struct file_lock *fl);
extern int __break_lease(struct inode *inode, unsigned int flags);
extern void lease_get_mtime(struct inode *, struct timespec *time);
@@ -1024,7 +1025,7 @@ struct file_operations {
long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
int (*mmap) (struct file *, struct vm_area_struct *);
int (*open) (struct inode *, struct file *);
- int (*flush) (struct file *);
+ int (*flush) (struct file *, fl_owner_t id);
int (*release) (struct inode *, struct file *);
int (*fsync) (struct file *, struct dentry *, int datasync);
int (*aio_fsync) (struct kiocb *, int datasync);
@@ -1096,7 +1097,7 @@ struct super_operations {
int (*sync_fs)(struct super_block *sb, int wait);
void (*write_super_lockfs) (struct super_block *);
void (*unlockfs) (struct super_block *);
- int (*statfs) (struct super_block *, struct kstatfs *);
+ int (*statfs) (struct dentry *, struct kstatfs *);
int (*remount_fs) (struct super_block *, int *, char *);
void (*clear_inode) (struct inode *);
void (*umount_begin) (struct super_block *);
@@ -1269,23 +1270,26 @@ find_exported_dentry(struct super_block *sb, void *obj, void *parent,
struct file_system_type {
const char *name;
int fs_flags;
- struct super_block *(*get_sb) (struct file_system_type *, int,
- const char *, void *);
+ int (*get_sb) (struct file_system_type *, int,
+ const char *, void *, struct vfsmount *);
void (*kill_sb) (struct super_block *);
struct module *owner;
struct file_system_type * next;
struct list_head fs_supers;
};
-struct super_block *get_sb_bdev(struct file_system_type *fs_type,
+extern int get_sb_bdev(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data,
- int (*fill_super)(struct super_block *, void *, int));
-struct super_block *get_sb_single(struct file_system_type *fs_type,
+ int (*fill_super)(struct super_block *, void *, int),
+ struct vfsmount *mnt);
+extern int get_sb_single(struct file_system_type *fs_type,
int flags, void *data,
- int (*fill_super)(struct super_block *, void *, int));
-struct super_block *get_sb_nodev(struct file_system_type *fs_type,
+ int (*fill_super)(struct super_block *, void *, int),
+ struct vfsmount *mnt);
+extern int get_sb_nodev(struct file_system_type *fs_type,
int flags, void *data,
- int (*fill_super)(struct super_block *, void *, int));
+ int (*fill_super)(struct super_block *, void *, int),
+ struct vfsmount *mnt);
void generic_shutdown_super(struct super_block *sb);
void kill_block_super(struct super_block *sb);
void kill_anon_super(struct super_block *sb);
@@ -1296,8 +1300,10 @@ struct super_block *sget(struct file_system_type *type,
int (*test)(struct super_block *,void *),
int (*set)(struct super_block *,void *),
void *data);
-struct super_block *get_sb_pseudo(struct file_system_type *, char *,
- struct super_operations *ops, unsigned long);
+extern int get_sb_pseudo(struct file_system_type *, char *,
+ struct super_operations *ops, unsigned long,
+ struct vfsmount *mnt);
+extern int simple_set_mnt(struct vfsmount *mnt, struct super_block *sb);
int __put_super(struct super_block *sb);
int __put_super_and_need_restart(struct super_block *sb);
void unnamed_dev_init(void);
@@ -1320,7 +1326,7 @@ extern struct vfsmount *copy_tree(struct vfsmount *, struct dentry *, int);
extern void mnt_set_mountpoint(struct vfsmount *, struct dentry *,
struct vfsmount *);
-extern int vfs_statfs(struct super_block *, struct kstatfs *);
+extern int vfs_statfs(struct dentry *, struct kstatfs *);
/* /sys/fs */
extern struct subsystem fs_subsys;
@@ -1741,7 +1747,7 @@ extern int dcache_dir_close(struct inode *, struct file *);
extern loff_t dcache_dir_lseek(struct file *, loff_t, int);
extern int dcache_readdir(struct file *, void *, filldir_t);
extern int simple_getattr(struct vfsmount *, struct dentry *, struct kstat *);
-extern int simple_statfs(struct super_block *, struct kstatfs *);
+extern int simple_statfs(struct dentry *, struct kstatfs *);
extern int simple_link(struct dentry *, struct inode *, struct dentry *);
extern int simple_unlink(struct inode *, struct dentry *);
extern int simple_rmdir(struct inode *, struct dentry *);
@@ -1767,7 +1773,8 @@ extern void simple_release_fs(struct vfsmount **mount, int *count);
extern ssize_t simple_read_from_buffer(void __user *, size_t, loff_t *, const void *, size_t);
#ifdef CONFIG_MIGRATION
-extern int buffer_migrate_page(struct page *, struct page *);
+extern int buffer_migrate_page(struct address_space *,
+ struct page *, struct page *);
#else
#define buffer_migrate_page NULL
#endif
diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h
index 7fd0576a4454..690c42803d2e 100644
--- a/include/linux/genalloc.h
+++ b/include/linux/genalloc.h
@@ -4,37 +4,32 @@
* Uses for this includes on-device special memory, uncached memory
* etc.
*
- * This code is based on the buddy allocator found in the sym53c8xx_2
- * driver, adapted for general purpose use.
- *
* This source code is licensed under the GNU General Public License,
* Version 2. See the file COPYING for more details.
*/
-#include <linux/spinlock.h>
-#define ALLOC_MIN_SHIFT 5 /* 32 bytes minimum */
/*
- * Link between free memory chunks of a given size.
+ * General purpose special memory pool descriptor.
*/
-struct gen_pool_link {
- struct gen_pool_link *next;
+struct gen_pool {
+ rwlock_t lock;
+ struct list_head chunks; /* list of chunks in this pool */
+ int min_alloc_order; /* minimum allocation order */
};
/*
- * Memory pool descriptor.
+ * General purpose special memory pool chunk descriptor.
*/
-struct gen_pool {
+struct gen_pool_chunk {
spinlock_t lock;
- unsigned long (*get_new_chunk)(struct gen_pool *);
- struct gen_pool *next;
- struct gen_pool_link *h;
- unsigned long private;
- int max_chunk_shift;
+ struct list_head next_chunk; /* next chunk in pool */
+ unsigned long start_addr; /* starting address of memory chunk */
+ unsigned long end_addr; /* ending address of memory chunk */
+ unsigned long bits[0]; /* bitmap for allocating memory chunk */
};
-unsigned long gen_pool_alloc(struct gen_pool *poolp, int size);
-void gen_pool_free(struct gen_pool *mp, unsigned long ptr, int size);
-struct gen_pool *gen_pool_create(int nr_chunks, int max_chunk_shift,
- unsigned long (*fp)(struct gen_pool *),
- unsigned long data);
+extern struct gen_pool *gen_pool_create(int, int);
+extern int gen_pool_add(struct gen_pool *, unsigned long, size_t, int);
+extern unsigned long gen_pool_alloc(struct gen_pool *, size_t);
+extern void gen_pool_free(struct gen_pool *, unsigned long, size_t);
diff --git a/include/linux/hdlc.h b/include/linux/hdlc.h
index df695e9ae327..4513f9e40937 100644
--- a/include/linux/hdlc.h
+++ b/include/linux/hdlc.h
@@ -188,7 +188,7 @@ int hdlc_x25_ioctl(struct net_device *dev, struct ifreq *ifr);
int hdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
/* Must be used by hardware driver on module startup/exit */
-int register_hdlc_device(struct net_device *dev);
+#define register_hdlc_device(dev) register_netdev(dev)
void unregister_hdlc_device(struct net_device *dev);
struct net_device *alloc_hdlcdev(void *priv);
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 4c5e610fe442..c25a38d8f600 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -23,6 +23,8 @@ int hugetlb_report_node_meminfo(int, char *);
unsigned long hugetlb_total_pages(void);
int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, int write_access);
+int hugetlb_reserve_pages(struct inode *inode, long from, long to);
+void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed);
extern unsigned long max_huge_pages;
extern const unsigned long hugetlb_zero, hugetlb_infinity;
@@ -139,8 +141,6 @@ struct hugetlbfs_sb_info {
struct hugetlbfs_inode_info {
struct shared_policy policy;
- /* Protected by the (global) hugetlb_lock */
- unsigned long prereserved_hpages;
struct inode vfs_inode;
};
@@ -157,10 +157,6 @@ static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
extern const struct file_operations hugetlbfs_file_operations;
extern struct vm_operations_struct hugetlb_vm_ops;
struct file *hugetlb_zero_setup(size_t);
-int hugetlb_extend_reservation(struct hugetlbfs_inode_info *info,
- unsigned long atleast_hpages);
-void hugetlb_truncate_reservation(struct hugetlbfs_inode_info *info,
- unsigned long atmost_hpages);
int hugetlb_get_quota(struct address_space *mapping);
void hugetlb_put_quota(struct address_space *mapping);
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 9e0fefd7884a..70741e170114 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -7,32 +7,13 @@
#include <linux/bitops.h>
#include <linux/preempt.h>
#include <linux/cpumask.h>
+#include <linux/irqreturn.h>
#include <linux/hardirq.h>
#include <linux/sched.h>
#include <asm/atomic.h>
#include <asm/ptrace.h>
#include <asm/system.h>
-/*
- * For 2.4.x compatibility, 2.4.x can use
- *
- * typedef void irqreturn_t;
- * #define IRQ_NONE
- * #define IRQ_HANDLED
- * #define IRQ_RETVAL(x)
- *
- * To mix old-style and new-style irq handler returns.
- *
- * IRQ_NONE means we didn't handle it.
- * IRQ_HANDLED means that we did have a valid interrupt and handled it.
- * IRQ_RETVAL(x) selects on the two depending on x being non-zero (for handled)
- */
-typedef int irqreturn_t;
-
-#define IRQ_NONE (0)
-#define IRQ_HANDLED (1)
-#define IRQ_RETVAL(x) ((x) != 0)
-
struct irqaction {
irqreturn_t (*handler)(int, void *, struct pt_regs *);
unsigned long flags;
diff --git a/include/linux/ioc4.h b/include/linux/ioc4.h
index 3dd18b785ebd..de73a3289cc2 100644
--- a/include/linux/ioc4.h
+++ b/include/linux/ioc4.h
@@ -147,6 +147,10 @@ struct ioc4_misc_regs {
#define IOC4_GPCR_EDGE_6 0x40
#define IOC4_GPCR_EDGE_7 0x80
+#define IOC4_VARIANT_IO9 0x0900
+#define IOC4_VARIANT_PCI_RT 0x0901
+#define IOC4_VARIANT_IO10 0x1000
+
/* One of these per IOC4 */
struct ioc4_driver_data {
struct list_head idd_list;
@@ -156,6 +160,7 @@ struct ioc4_driver_data {
struct __iomem ioc4_misc_regs *idd_misc_regs;
unsigned long count_period;
void *idd_serial_data;
+ unsigned int idd_variant;
};
/* One per submodule */
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 42c9cd562860..676e00dfb21a 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -17,6 +17,7 @@
#include <linux/cache.h>
#include <linux/spinlock.h>
#include <linux/cpumask.h>
+#include <linux/irqreturn.h>
#include <asm/irq.h>
#include <asm/ptrace.h>
@@ -164,10 +165,18 @@ static inline void set_irq_info(int irq, cpumask_t mask)
#endif // CONFIG_SMP
+#ifdef CONFIG_IRQBALANCE
+extern void set_balance_irq_affinity(unsigned int irq, cpumask_t mask);
+#else
+static inline void set_balance_irq_affinity(unsigned int irq, cpumask_t mask)
+{
+}
+#endif
+
extern int no_irq_affinity;
extern int noirqdebug_setup(char *str);
-extern fastcall int handle_IRQ_event(unsigned int irq, struct pt_regs *regs,
+extern fastcall irqreturn_t handle_IRQ_event(unsigned int irq, struct pt_regs *regs,
struct irqaction *action);
extern fastcall unsigned int __do_IRQ(unsigned int irq, struct pt_regs *regs);
extern void note_interrupt(unsigned int irq, irq_desc_t *desc,
diff --git a/include/linux/irqreturn.h b/include/linux/irqreturn.h
new file mode 100644
index 000000000000..881883c2009d
--- /dev/null
+++ b/include/linux/irqreturn.h
@@ -0,0 +1,25 @@
+/* irqreturn.h */
+#ifndef _LINUX_IRQRETURN_H
+#define _LINUX_IRQRETURN_H
+
+/*
+ * For 2.4.x compatibility, 2.4.x can use
+ *
+ * typedef void irqreturn_t;
+ * #define IRQ_NONE
+ * #define IRQ_HANDLED
+ * #define IRQ_RETVAL(x)
+ *
+ * To mix old-style and new-style irq handler returns.
+ *
+ * IRQ_NONE means we didn't handle it.
+ * IRQ_HANDLED means that we did have a valid interrupt and handled it.
+ * IRQ_RETVAL(x) selects on the two depending on x being non-zero (for handled)
+ */
+typedef int irqreturn_t;
+
+#define IRQ_NONE (0)
+#define IRQ_HANDLED (1)
+#define IRQ_RETVAL(x) ((x) != 0)
+
+#endif
diff --git a/include/linux/jbd.h b/include/linux/jbd.h
index 6a425e370cb3..20eb34403d0c 100644
--- a/include/linux/jbd.h
+++ b/include/linux/jbd.h
@@ -501,6 +501,12 @@ struct transaction_s
struct journal_head *t_checkpoint_list;
/*
+ * Doubly-linked circular list of all buffers submitted for IO while
+ * checkpointing. [j_list_lock]
+ */
+ struct journal_head *t_checkpoint_io_list;
+
+ /*
* Doubly-linked circular list of temporary buffers currently undergoing
* IO in the log [j_list_lock]
*/
@@ -849,7 +855,7 @@ extern void journal_commit_transaction(journal_t *);
/* Checkpoint list management */
int __journal_clean_checkpoint_list(journal_t *journal);
-void __journal_remove_checkpoint(struct journal_head *);
+int __journal_remove_checkpoint(struct journal_head *);
void __journal_insert_checkpoint(struct journal_head *, transaction_t *);
/* Buffer IO */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index f4fc576ed4c4..8c21aaa248b4 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -24,6 +24,9 @@ extern const char linux_banner[];
#define LONG_MAX ((long)(~0UL>>1))
#define LONG_MIN (-LONG_MAX - 1)
#define ULONG_MAX (~0UL)
+#define LLONG_MAX ((long long)(~0ULL>>1))
+#define LLONG_MIN (-LLONG_MAX - 1)
+#define ULLONG_MAX (~0ULL)
#define STACK_MAGIC 0xdeadbeef
@@ -75,7 +78,7 @@ extern int cond_resched(void);
# define might_sleep() do { might_resched(); } while (0)
#endif
-#define might_sleep_if(cond) do { if (unlikely(cond)) might_sleep(); } while (0)
+#define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0)
#define abs(x) ({ \
int __x = (x); \
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index cfb3410e32b1..6427949ddf99 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -106,6 +106,7 @@ extern struct page *kimage_alloc_control_pages(struct kimage *image,
extern void crash_kexec(struct pt_regs *);
int kexec_should_crash(struct task_struct *);
extern struct kimage *kexec_image;
+extern struct kimage *kexec_crash_image;
#define KEXEC_ON_CRASH 0x00000001
#define KEXEC_ARCH_MASK 0xffff0000
diff --git a/include/linux/list.h b/include/linux/list.h
index 76f05718342c..a02642e4710a 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -197,12 +197,35 @@ static inline void list_del_rcu(struct list_head *entry)
entry->prev = LIST_POISON2;
}
+/**
+ * list_replace - replace old entry by new one
+ * @old : the element to be replaced
+ * @new : the new element to insert
+ * Note: if 'old' was empty, it will be overwritten.
+ */
+static inline void list_replace(struct list_head *old,
+ struct list_head *new)
+{
+ new->next = old->next;
+ new->next->prev = new;
+ new->prev = old->prev;
+ new->prev->next = new;
+}
+
+static inline void list_replace_init(struct list_head *old,
+ struct list_head *new)
+{
+ list_replace(old, new);
+ INIT_LIST_HEAD(old);
+}
+
/*
* list_replace_rcu - replace old entry by new one
* @old : the element to be replaced
* @new : the new element to insert
*
* The old entry will be replaced with the new entry atomically.
+ * Note: 'old' should not be empty.
*/
static inline void list_replace_rcu(struct list_head *old,
struct list_head *new)
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 6789c4940c9c..5dba23a1c0d0 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -3,17 +3,17 @@
#include <linux/mm.h>
+typedef struct page *new_page_t(struct page *, unsigned long private, int **);
+
#ifdef CONFIG_MIGRATION
extern int isolate_lru_page(struct page *p, struct list_head *pagelist);
extern int putback_lru_pages(struct list_head *l);
-extern int migrate_page(struct page *, struct page *);
-extern void migrate_page_copy(struct page *, struct page *);
-extern int migrate_page_remove_references(struct page *, struct page *, int);
-extern int migrate_pages(struct list_head *l, struct list_head *t,
- struct list_head *moved, struct list_head *failed);
-extern int migrate_pages_to(struct list_head *pagelist,
- struct vm_area_struct *vma, int dest);
-extern int fail_migrate_page(struct page *, struct page *);
+extern int migrate_page(struct address_space *,
+ struct page *, struct page *);
+extern int migrate_pages(struct list_head *l, new_page_t x, unsigned long);
+
+extern int fail_migrate_page(struct address_space *,
+ struct page *, struct page *);
extern int migrate_prep(void);
@@ -22,8 +22,8 @@ extern int migrate_prep(void);
static inline int isolate_lru_page(struct page *p, struct list_head *list)
{ return -ENOSYS; }
static inline int putback_lru_pages(struct list_head *l) { return 0; }
-static inline int migrate_pages(struct list_head *l, struct list_head *t,
- struct list_head *moved, struct list_head *failed) { return -ENOSYS; }
+static inline int migrate_pages(struct list_head *l, new_page_t x,
+ unsigned long private) { return -ENOSYS; }
static inline int migrate_pages_to(struct list_head *pagelist,
struct vm_area_struct *vma, int dest) { return 0; }
diff --git a/include/linux/mm.h b/include/linux/mm.h
index e2fa375e478e..3b09444121d9 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -199,6 +199,10 @@ struct vm_operations_struct {
void (*close)(struct vm_area_struct * area);
struct page * (*nopage)(struct vm_area_struct * area, unsigned long address, int *type);
int (*populate)(struct vm_area_struct * area, unsigned long address, unsigned long len, pgprot_t prot, unsigned long pgoff, int nonblock);
+
+ /* notification that a previously read-only page is about to become
+ * writable, if an error is returned it will cause a SIGBUS */
+ int (*page_mkwrite)(struct vm_area_struct *vma, struct page *page);
#ifdef CONFIG_NUMA
int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
@@ -465,10 +469,13 @@ static inline unsigned long page_zonenum(struct page *page)
struct zone;
extern struct zone *zone_table[];
+static inline int page_zone_id(struct page *page)
+{
+ return (page->flags >> ZONETABLE_PGSHIFT) & ZONETABLE_MASK;
+}
static inline struct zone *page_zone(struct page *page)
{
- return zone_table[(page->flags >> ZONETABLE_PGSHIFT) &
- ZONETABLE_MASK];
+ return zone_table[page_zone_id(page)];
}
static inline unsigned long page_to_nid(struct page *page)
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 9742e3c16222..d6120fa69116 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -197,7 +197,7 @@ struct zone {
/*
* wait_table -- the array holding the hash table
- * wait_table_size -- the size of the hash table array
+ * wait_table_hash_nr_entries -- the size of the hash table array
* wait_table_bits -- wait_table_size == (1 << wait_table_bits)
*
* The purpose of all these is to keep track of the people
@@ -220,7 +220,7 @@ struct zone {
* free_area_init_core() performs the initialization of them.
*/
wait_queue_head_t * wait_table;
- unsigned long wait_table_size;
+ unsigned long wait_table_hash_nr_entries;
unsigned long wait_table_bits;
/*
@@ -333,6 +333,9 @@ void wakeup_kswapd(struct zone *zone, int order);
int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
int classzone_idx, int alloc_flags);
+extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
+ unsigned long size);
+
#ifdef CONFIG_HAVE_MEMORY_PRESENT
void memory_present(int nid, unsigned long start, unsigned long end);
#else
@@ -506,6 +509,10 @@ struct mem_section {
* pages. However, it is stored with some other magic.
* (see sparse.c::sparse_init_one_section())
*
+ * Additionally during early boot we encode node id of
+ * the location of the section here to guide allocation.
+ * (see sparse.c::memory_present())
+ *
* Making it a UL at least makes someone do a cast
* before using it wrong.
*/
@@ -545,6 +552,7 @@ extern int __section_nr(struct mem_section* ms);
#define SECTION_HAS_MEM_MAP (1UL<<1)
#define SECTION_MAP_LAST_BIT (1UL<<2)
#define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1))
+#define SECTION_NID_SHIFT 2
static inline struct page *__section_mem_map_addr(struct mem_section *section)
{
diff --git a/include/linux/module.h b/include/linux/module.h
index c2d89e037af0..2d366098eab5 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -105,6 +105,8 @@ extern struct module __this_module;
* "GPL and additional rights" [GNU Public License v2 rights and more]
* "Dual BSD/GPL" [GNU Public License v2
* or BSD license choice]
+ * "Dual MIT/GPL" [GNU Public License v2
+ * or MIT license choice]
* "Dual MPL/GPL" [GNU Public License v2
* or Mozilla license choice]
*
diff --git a/include/linux/mount.h b/include/linux/mount.h
index b7472ae91fa4..60718f12caa9 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -17,6 +17,11 @@
#include <linux/spinlock.h>
#include <asm/atomic.h>
+struct super_block;
+struct vfsmount;
+struct dentry;
+struct namespace;
+
#define MNT_NOSUID 0x01
#define MNT_NODEV 0x02
#define MNT_NOEXEC 0x04
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index cead6be467ed..bc747e5d7138 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -308,9 +308,13 @@ struct net_device
#define NETIF_F_HW_VLAN_RX 256 /* Receive VLAN hw acceleration */
#define NETIF_F_HW_VLAN_FILTER 512 /* Receive filtering on VLAN */
#define NETIF_F_VLAN_CHALLENGED 1024 /* Device cannot handle VLAN packets */
-#define NETIF_F_TSO 2048 /* Can offload TCP/IP segmentation */
+#define NETIF_F_GSO 2048 /* Enable software GSO. */
#define NETIF_F_LLTX 4096 /* LockLess TX */
-#define NETIF_F_UFO 8192 /* Can offload UDP Large Send*/
+
+ /* Segmentation offload features */
+#define NETIF_F_GSO_SHIFT 16
+#define NETIF_F_TSO (SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT)
+#define NETIF_F_UFO (SKB_GSO_UDPV4 << NETIF_F_GSO_SHIFT)
#define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
#define NETIF_F_ALL_CSUM (NETIF_F_IP_CSUM | NETIF_F_GEN_CSUM)
@@ -402,6 +406,9 @@ struct net_device
struct list_head qdisc_list;
unsigned long tx_queue_len; /* Max frames per queue allowed */
+ /* Partially transmitted GSO packet. */
+ struct sk_buff *gso_skb;
+
/* ingress path synchronizer */
spinlock_t ingress_lock;
struct Qdisc *qdisc_ingress;
@@ -536,6 +543,7 @@ struct packet_type {
struct net_device *,
struct packet_type *,
struct net_device *);
+ struct sk_buff *(*gso_segment)(struct sk_buff *skb, int sg);
void *af_packet_priv;
struct list_head list;
};
@@ -686,7 +694,8 @@ extern int dev_change_name(struct net_device *, char *);
extern int dev_set_mtu(struct net_device *, int);
extern int dev_set_mac_address(struct net_device *,
struct sockaddr *);
-extern void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev);
+extern int dev_hard_start_xmit(struct sk_buff *skb,
+ struct net_device *dev);
extern void dev_init(void);
@@ -960,6 +969,7 @@ extern int netdev_max_backlog;
extern int weight_p;
extern int netdev_set_master(struct net_device *dev, struct net_device *master);
extern int skb_checksum_help(struct sk_buff *skb, int inward);
+extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, int sg);
#ifdef CONFIG_BUG
extern void netdev_rx_csum_fault(struct net_device *dev);
#else
@@ -979,6 +989,13 @@ extern void dev_seq_stop(struct seq_file *seq, void *v);
extern void linkwatch_run_queue(void);
+static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
+{
+ int feature = skb_shinfo(skb)->gso_type << NETIF_F_GSO_SHIFT;
+ return skb_shinfo(skb)->gso_size &&
+ (dev->features & feature) != feature;
+}
+
#endif /* __KERNEL__ */
#endif /* _LINUX_DEV_H */
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index d276a4e2f825..0c076d58c676 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -7,6 +7,8 @@
#include <linux/percpu.h>
#include <linux/cache.h>
+#include <linux/types.h>
+
#include <asm/pgtable.h>
/*
@@ -88,7 +90,17 @@
#define PG_nosave_free 18 /* Free, should not be written */
#define PG_buddy 19 /* Page is free, on buddy lists */
-#define PG_uncached 20 /* Page has been mapped as uncached */
+
+#if (BITS_PER_LONG > 32)
+/*
+ * 64-bit-only flags build down from bit 31
+ *
+ * 32 bit -------------------------------| FIELDS | FLAGS |
+ * 64 bit | FIELDS | ?????? FLAGS |
+ * 63 32 0
+ */
+#define PG_uncached 31 /* Page has been mapped as uncached */
+#endif
/*
* Global page accounting. One instance per CPU. Only unsigned longs are
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 7a1af574dedf..1245df7141aa 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -99,6 +99,13 @@ extern struct page * read_cache_page(struct address_space *mapping,
extern int read_cache_pages(struct address_space *mapping,
struct list_head *pages, filler_t *filler, void *data);
+static inline struct page *read_mapping_page(struct address_space *mapping,
+ unsigned long index, void *data)
+{
+ filler_t *filler = (filler_t *)mapping->a_ops->readpage;
+ return read_cache_page(mapping, index, filler, data);
+}
+
int add_to_page_cache(struct page *page, struct address_space *mapping,
unsigned long index, gfp_t gfp_mask);
int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h
index 66b5de404f22..f5aa593ccf32 100644
--- a/include/linux/percpu_counter.h
+++ b/include/linux/percpu_counter.h
@@ -10,13 +10,14 @@
#include <linux/smp.h>
#include <linux/threads.h>
#include <linux/percpu.h>
+#include <linux/types.h>
#ifdef CONFIG_SMP
struct percpu_counter {
spinlock_t lock;
- long count;
- long *counters;
+ s64 count;
+ s32 *counters;
};
#if NR_CPUS >= 16
@@ -25,11 +26,11 @@ struct percpu_counter {
#define FBC_BATCH (NR_CPUS*4)
#endif
-static inline void percpu_counter_init(struct percpu_counter *fbc)
+static inline void percpu_counter_init(struct percpu_counter *fbc, s64 amount)
{
spin_lock_init(&fbc->lock);
- fbc->count = 0;
- fbc->counters = alloc_percpu(long);
+ fbc->count = amount;
+ fbc->counters = alloc_percpu(s32);
}
static inline void percpu_counter_destroy(struct percpu_counter *fbc)
@@ -37,10 +38,10 @@ static inline void percpu_counter_destroy(struct percpu_counter *fbc)
free_percpu(fbc->counters);
}
-void percpu_counter_mod(struct percpu_counter *fbc, long amount);
-long percpu_counter_sum(struct percpu_counter *fbc);
+void percpu_counter_mod(struct percpu_counter *fbc, s32 amount);
+s64 percpu_counter_sum(struct percpu_counter *fbc);
-static inline long percpu_counter_read(struct percpu_counter *fbc)
+static inline s64 percpu_counter_read(struct percpu_counter *fbc)
{
return fbc->count;
}
@@ -48,13 +49,14 @@ static inline long percpu_counter_read(struct percpu_counter *fbc)
/*
* It is possible for the percpu_counter_read() to return a small negative
* number for some counter which should never be negative.
+ *
*/
-static inline long percpu_counter_read_positive(struct percpu_counter *fbc)
+static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
{
- long ret = fbc->count;
+ s64 ret = fbc->count;
barrier(); /* Prevent reloads of fbc->count */
- if (ret > 0)
+ if (ret >= 0)
return ret;
return 1;
}
@@ -62,12 +64,12 @@ static inline long percpu_counter_read_positive(struct percpu_counter *fbc)
#else
struct percpu_counter {
- long count;
+ s64 count;
};
-static inline void percpu_counter_init(struct percpu_counter *fbc)
+static inline void percpu_counter_init(struct percpu_counter *fbc, s64 amount)
{
- fbc->count = 0;
+ fbc->count = amount;
}
static inline void percpu_counter_destroy(struct percpu_counter *fbc)
@@ -75,24 +77,24 @@ static inline void percpu_counter_destroy(struct percpu_counter *fbc)
}
static inline void
-percpu_counter_mod(struct percpu_counter *fbc, long amount)
+percpu_counter_mod(struct percpu_counter *fbc, s32 amount)
{
preempt_disable();
fbc->count += amount;
preempt_enable();
}
-static inline long percpu_counter_read(struct percpu_counter *fbc)
+static inline s64 percpu_counter_read(struct percpu_counter *fbc)
{
return fbc->count;
}
-static inline long percpu_counter_read_positive(struct percpu_counter *fbc)
+static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
{
return fbc->count;
}
-static inline long percpu_counter_sum(struct percpu_counter *fbc)
+static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
{
return percpu_counter_read_positive(fbc);
}
diff --git a/include/linux/prctl.h b/include/linux/prctl.h
index bf022c43a18e..52a9be41250d 100644
--- a/include/linux/prctl.h
+++ b/include/linux/prctl.h
@@ -52,4 +52,11 @@
#define PR_SET_NAME 15 /* Set process name */
#define PR_GET_NAME 16 /* Get process name */
+/* Get/set process endian */
+#define PR_GET_ENDIAN 19
+#define PR_SET_ENDIAN 20
+# define PR_ENDIAN_BIG 0
+# define PR_ENDIAN_LITTLE 1 /* True little endian mode */
+# define PR_ENDIAN_PPC_LITTLE 2 /* "PowerPC" pseudo little endian */
+
#endif /* _LINUX_PRCTL_H */
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index 0d36750fc0f1..ee918bc6e18c 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -51,6 +51,10 @@
#ifdef __KERNEL__
/*
* Ptrace flags
+ *
+ * The owner ship rules for task->ptrace which holds the ptrace
+ * flags is simple. When a task is running it owns it's task->ptrace
+ * flags. When the a task is stopped the ptracer owns task->ptrace.
*/
#define PT_PTRACED 0x00000001
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index dd83cca28001..9158a68140c9 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -23,6 +23,9 @@
#include <linux/preempt.h>
#include <linux/types.h>
+#define RADIX_TREE_MAX_TAGS 2
+
+/* root tags are stored in gfp_mask, shifted by __GFP_BITS_SHIFT */
struct radix_tree_root {
unsigned int height;
gfp_t gfp_mask;
@@ -45,8 +48,6 @@ do { \
(root)->rnode = NULL; \
} while (0)
-#define RADIX_TREE_MAX_TAGS 2
-
int radix_tree_insert(struct radix_tree_root *, unsigned long, void *);
void *radix_tree_lookup(struct radix_tree_root *, unsigned long);
void **radix_tree_lookup_slot(struct radix_tree_root *, unsigned long);
diff --git a/include/linux/ramfs.h b/include/linux/ramfs.h
index 78ecfa28b1c2..00b340ba6612 100644
--- a/include/linux/ramfs.h
+++ b/include/linux/ramfs.h
@@ -2,8 +2,8 @@
#define _LINUX_RAMFS_H
struct inode *ramfs_get_inode(struct super_block *sb, int mode, dev_t dev);
-struct super_block *ramfs_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data);
+extern int ramfs_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt);
#ifndef CONFIG_MMU
extern unsigned long ramfs_nommu_get_unmapped_area(struct file *file,
diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h
index f37006f21664..8d5382e62c08 100644
--- a/include/linux/rbtree.h
+++ b/include/linux/rbtree.h
@@ -132,6 +132,10 @@ static inline void rb_set_color(struct rb_node *rb, int color)
#define RB_ROOT (struct rb_root) { NULL, }
#define rb_entry(ptr, type, member) container_of(ptr, type, member)
+#define RB_EMPTY_ROOT(root) ((root)->rb_node == NULL)
+#define RB_EMPTY_NODE(node) (rb_parent(node) != node)
+#define RB_CLEAR_NODE(node) (rb_set_parent(node, node))
+
extern void rb_insert_color(struct rb_node *, struct rb_root *);
extern void rb_erase(struct rb_node *, struct rb_root *);
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 970284f571a6..6312758393b6 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -246,7 +246,7 @@ extern int rcu_needs_cpu(int cpu);
* softirq handlers will have completed, since in some kernels, these
* handlers can run in process context, and can block.
*
- * This primitive provides the guarantees made by the (deprecated)
+ * This primitive provides the guarantees made by the (now removed)
* synchronize_kernel() API. In contrast, synchronize_rcu() only
* guarantees that rcu_read_lock() sections will have completed.
* In "classic RCU", these two guarantees happen to be one and
@@ -264,7 +264,6 @@ extern void FASTCALL(call_rcu(struct rcu_head *head,
void (*func)(struct rcu_head *head)));
extern void FASTCALL(call_rcu_bh(struct rcu_head *head,
void (*func)(struct rcu_head *head)));
-extern __deprecated_for_modules void synchronize_kernel(void);
extern void synchronize_rcu(void);
void synchronize_idle(void);
extern void rcu_barrier(void);
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 2d4c81a220db..bf97b0900014 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -91,7 +91,6 @@ static inline void page_dup_rmap(struct page *page)
*/
int page_referenced(struct page *, int is_locked);
int try_to_unmap(struct page *, int ignore_refs);
-void remove_from_swap(struct page *page);
/*
* Called from mm/filemap_xip.c to unmap empty zero page
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 267f15257040..38b4791e6a5d 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -941,12 +941,11 @@ static inline void put_task_struct(struct task_struct *t)
#define PF_KSWAPD 0x00040000 /* I am kswapd */
#define PF_SWAPOFF 0x00080000 /* I am in swapoff */
#define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */
-#define PF_SYNCWRITE 0x00200000 /* I am doing a sync write */
-#define PF_BORROWED_MM 0x00400000 /* I am a kthread doing use_mm */
-#define PF_RANDOMIZE 0x00800000 /* randomize virtual address space */
-#define PF_SWAPWRITE 0x01000000 /* Allowed to write to swap */
-#define PF_SPREAD_PAGE 0x04000000 /* Spread page cache over cpuset */
-#define PF_SPREAD_SLAB 0x08000000 /* Spread some slab caches over cpuset */
+#define PF_BORROWED_MM 0x00200000 /* I am a kthread doing use_mm */
+#define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */
+#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */
+#define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */
+#define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */
#define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */
/*
@@ -1225,7 +1224,7 @@ static inline int thread_group_empty(task_t *p)
(thread_group_leader(p) && !thread_group_empty(p))
/*
- * Protects ->fs, ->files, ->mm, ->ptrace, ->group_info, ->comm, keyring
+ * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
* subscriptions and synchronises with wait4(). Also used in procfs. Also
* pins the final release of task.io_context. Also protects ->cpuset.
*
diff --git a/include/linux/security.h b/include/linux/security.h
index 47722d355532..d2c17bd91a29 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -171,9 +171,9 @@ struct swap_info_struct;
* Deallocate and clear the sb->s_security field.
* @sb contains the super_block structure to be modified.
* @sb_statfs:
- * Check permission before obtaining filesystem statistics for the @sb
- * filesystem.
- * @sb contains the super_block structure for the filesystem.
+ * Check permission before obtaining filesystem statistics for the @mnt
+ * mountpoint.
+ * @dentry is a handle on the superblock for the filesystem.
* Return 0 if permission is granted.
* @sb_mount:
* Check permission before an object specified by @dev_name is mounted on
@@ -577,6 +577,11 @@ struct swap_info_struct;
* @p contains the task_struct of process.
* @nice contains the new nice value.
* Return 0 if permission is granted.
+ * @task_setioprio
+ * Check permission before setting the ioprio value of @p to @ioprio.
+ * @p contains the task_struct of process.
+ * @ioprio contains the new ioprio value
+ * Return 0 if permission is granted.
* @task_setrlimit:
* Check permission before setting the resource limits of the current
* process for @resource to @new_rlim. The old resource limit values can
@@ -596,6 +601,10 @@ struct swap_info_struct;
* @p.
* @p contains the task_struct for process.
* Return 0 if permission is granted.
+ * @task_movememory
+ * Check permission before moving memory owned by process @p.
+ * @p contains the task_struct for process.
+ * Return 0 if permission is granted.
* @task_kill:
* Check permission before sending signal @sig to @p. @info can be NULL,
* the constant 1, or a pointer to a siginfo structure. If @info is 1 or
@@ -1127,7 +1136,7 @@ struct security_operations {
int (*sb_copy_data)(struct file_system_type *type,
void *orig, void *copy);
int (*sb_kern_mount) (struct super_block *sb, void *data);
- int (*sb_statfs) (struct super_block * sb);
+ int (*sb_statfs) (struct dentry *dentry);
int (*sb_mount) (char *dev_name, struct nameidata * nd,
char *type, unsigned long flags, void *data);
int (*sb_check_sb) (struct vfsmount * mnt, struct nameidata * nd);
@@ -1210,10 +1219,12 @@ struct security_operations {
int (*task_getsid) (struct task_struct * p);
int (*task_setgroups) (struct group_info *group_info);
int (*task_setnice) (struct task_struct * p, int nice);
+ int (*task_setioprio) (struct task_struct * p, int ioprio);
int (*task_setrlimit) (unsigned int resource, struct rlimit * new_rlim);
int (*task_setscheduler) (struct task_struct * p, int policy,
struct sched_param * lp);
int (*task_getscheduler) (struct task_struct * p);
+ int (*task_movememory) (struct task_struct * p);
int (*task_kill) (struct task_struct * p,
struct siginfo * info, int sig);
int (*task_wait) (struct task_struct * p);
@@ -1450,9 +1461,9 @@ static inline int security_sb_kern_mount (struct super_block *sb, void *data)
return security_ops->sb_kern_mount (sb, data);
}
-static inline int security_sb_statfs (struct super_block *sb)
+static inline int security_sb_statfs (struct dentry *dentry)
{
- return security_ops->sb_statfs (sb);
+ return security_ops->sb_statfs (dentry);
}
static inline int security_sb_mount (char *dev_name, struct nameidata *nd,
@@ -1836,6 +1847,11 @@ static inline int security_task_setnice (struct task_struct *p, int nice)
return security_ops->task_setnice (p, nice);
}
+static inline int security_task_setioprio (struct task_struct *p, int ioprio)
+{
+ return security_ops->task_setioprio (p, ioprio);
+}
+
static inline int security_task_setrlimit (unsigned int resource,
struct rlimit *new_rlim)
{
@@ -1854,6 +1870,11 @@ static inline int security_task_getscheduler (struct task_struct *p)
return security_ops->task_getscheduler (p);
}
+static inline int security_task_movememory (struct task_struct *p)
+{
+ return security_ops->task_movememory (p);
+}
+
static inline int security_task_kill (struct task_struct *p,
struct siginfo *info, int sig)
{
@@ -2162,7 +2183,7 @@ static inline int security_sb_kern_mount (struct super_block *sb, void *data)
return 0;
}
-static inline int security_sb_statfs (struct super_block *sb)
+static inline int security_sb_statfs (struct dentry *dentry)
{
return 0;
}
@@ -2478,6 +2499,11 @@ static inline int security_task_setnice (struct task_struct *p, int nice)
return 0;
}
+static inline int security_task_setioprio (struct task_struct *p, int ioprio)
+{
+ return 0;
+}
+
static inline int security_task_setrlimit (unsigned int resource,
struct rlimit *new_rlim)
{
@@ -2496,6 +2522,11 @@ static inline int security_task_getscheduler (struct task_struct *p)
return 0;
}
+static inline int security_task_movememory (struct task_struct *p)
+{
+ return 0;
+}
+
static inline int security_task_kill (struct task_struct *p,
struct siginfo *info, int sig)
{
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 66f8819f9568..16eef03ce0eb 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -134,9 +134,10 @@ struct skb_frag_struct {
struct skb_shared_info {
atomic_t dataref;
unsigned short nr_frags;
- unsigned short tso_size;
- unsigned short tso_segs;
- unsigned short ufo_size;
+ unsigned short gso_size;
+ /* Warning: this field is not always filled in (UFO)! */
+ unsigned short gso_segs;
+ unsigned short gso_type;
unsigned int ip6_frag_id;
struct sk_buff *frag_list;
skb_frag_t frags[MAX_SKB_FRAGS];
@@ -168,6 +169,11 @@ enum {
SKB_FCLONE_CLONE,
};
+enum {
+ SKB_GSO_TCPV4 = 1 << 0,
+ SKB_GSO_UDPV4 = 1 << 1,
+};
+
/**
* struct sk_buff - socket buffer
* @next: Next buffer in list
@@ -209,6 +215,8 @@ enum {
* @nf_bridge: Saved data about a bridged frame - see br_netfilter.c
* @tc_index: Traffic control index
* @tc_verd: traffic control verdict
+ * @dma_cookie: a cookie to one of several possible DMA operations
+ * done by skb DMA functions
* @secmark: security marking
*/
@@ -345,7 +353,7 @@ extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
int newheadroom, int newtailroom,
gfp_t priority);
-extern struct sk_buff * skb_pad(struct sk_buff *skb, int pad);
+extern int skb_pad(struct sk_buff *skb, int pad);
#define dev_kfree_skb(a) kfree_skb(a)
extern void skb_over_panic(struct sk_buff *skb, int len,
void *here);
@@ -1122,16 +1130,15 @@ static inline int skb_cow(struct sk_buff *skb, unsigned int headroom)
*
* Pads up a buffer to ensure the trailing bytes exist and are
* blanked. If the buffer already contains sufficient data it
- * is untouched. Returns the buffer, which may be a replacement
- * for the original, or NULL for out of memory - in which case
- * the original buffer is still freed.
+ * is untouched. Otherwise it is extended. Returns zero on
+ * success. The skb is freed on error.
*/
-static inline struct sk_buff *skb_padto(struct sk_buff *skb, unsigned int len)
+static inline int skb_padto(struct sk_buff *skb, unsigned int len)
{
unsigned int size = skb->len;
if (likely(size >= len))
- return skb;
+ return 0;
return skb_pad(skb, len-size);
}
@@ -1292,6 +1299,7 @@ extern void skb_split(struct sk_buff *skb,
struct sk_buff *skb1, const u32 len);
extern void skb_release_data(struct sk_buff *skb);
+extern struct sk_buff *skb_segment(struct sk_buff *skb, int sg);
static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
int len, void *buffer)
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 9dc93163e065..45ad55b70d1c 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -86,6 +86,51 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, void*);
__kmalloc_track_caller(size, flags, __builtin_return_address(0))
#endif
+/**
+ * kmalloc - allocate memory
+ * @size: how many bytes of memory are required.
+ * @flags: the type of memory to allocate.
+ *
+ * kmalloc is the normal method of allocating memory
+ * in the kernel.
+ *
+ * The @flags argument may be one of:
+ *
+ * %GFP_USER - Allocate memory on behalf of user. May sleep.
+ *
+ * %GFP_KERNEL - Allocate normal kernel ram. May sleep.
+ *
+ * %GFP_ATOMIC - Allocation will not sleep.
+ * For example, use this inside interrupt handlers.
+ *
+ * %GFP_HIGHUSER - Allocate pages from high memory.
+ *
+ * %GFP_NOIO - Do not do any I/O at all while trying to get memory.
+ *
+ * %GFP_NOFS - Do not make any fs calls while trying to get memory.
+ *
+ * Also it is possible to set different flags by OR'ing
+ * in one or more of the following additional @flags:
+ *
+ * %__GFP_COLD - Request cache-cold pages instead of
+ * trying to return cache-warm pages.
+ *
+ * %__GFP_DMA - Request memory from the DMA-capable zone.
+ *
+ * %__GFP_HIGH - This allocation has high priority and may use emergency pools.
+ *
+ * %__GFP_HIGHMEM - Allocated memory may be from highmem.
+ *
+ * %__GFP_NOFAIL - Indicate that this allocation is in no way allowed to fail
+ * (think twice before using).
+ *
+ * %__GFP_NORETRY - If memory is not immediately available,
+ * then give up at once.
+ *
+ * %__GFP_NOWARN - If allocation fails, don't issue any warnings.
+ *
+ * %__GFP_REPEAT - If allocation fails initially, try once more before failing.
+ */
static inline void *kmalloc(size_t size, gfp_t flags)
{
if (__builtin_constant_p(size)) {
@@ -111,6 +156,11 @@ found:
extern void *__kzalloc(size_t, gfp_t);
+/**
+ * kzalloc - allocate memory. The memory is set to zero.
+ * @size: how many bytes of memory are required.
+ * @flags: the type of memory to allocate (see kmalloc).
+ */
static inline void *kzalloc(size_t size, gfp_t flags)
{
if (__builtin_constant_p(size)) {
diff --git a/include/linux/string.h b/include/linux/string.h
index c61306da8c52..e4c755860316 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -56,6 +56,7 @@ extern char * strnchr(const char *, size_t, int);
#ifndef __HAVE_ARCH_STRRCHR
extern char * strrchr(const char *,int);
#endif
+extern char * strstrip(char *);
#ifndef __HAVE_ARCH_STRSTR
extern char * strstr(const char *,const char *);
#endif
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 96e31aa64cc7..e82cb10fb3ea 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -71,6 +71,7 @@ struct saved_context;
void __save_processor_state(struct saved_context *ctxt);
void __restore_processor_state(struct saved_context *ctxt);
unsigned long get_safe_page(gfp_t gfp_mask);
+int swsusp_add_arch_pages(unsigned long start, unsigned long end);
/*
* XXX: We try to keep some more pages free so that I/O operations succeed
diff --git a/include/linux/swap.h b/include/linux/swap.h
index aca9bfae208f..dc3f3aa0c83e 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -28,7 +28,14 @@ static inline int current_is_kswapd(void)
* the type/offset into the pte as 5/27 as well.
*/
#define MAX_SWAPFILES_SHIFT 5
+#ifndef CONFIG_MIGRATION
#define MAX_SWAPFILES (1 << MAX_SWAPFILES_SHIFT)
+#else
+/* Use last two entries for page migration swap entries */
+#define MAX_SWAPFILES ((1 << MAX_SWAPFILES_SHIFT)-2)
+#define SWP_MIGRATION_READ MAX_SWAPFILES
+#define SWP_MIGRATION_WRITE (MAX_SWAPFILES + 1)
+#endif
/*
* Magic header for a swap area. The first part of the union is
@@ -48,12 +55,14 @@ union swap_header {
char magic[10]; /* SWAP-SPACE or SWAPSPACE2 */
} magic;
struct {
- char bootbits[1024]; /* Space for disklabel etc. */
- unsigned int version;
- unsigned int last_page;
- unsigned int nr_badpages;
- unsigned int padding[125];
- unsigned int badpages[1];
+ char bootbits[1024]; /* Space for disklabel etc. */
+ __u32 version;
+ __u32 last_page;
+ __u32 nr_badpages;
+ unsigned char sws_uuid[16];
+ unsigned char sws_volume[16];
+ __u32 padding[117];
+ __u32 badpages[1];
} info;
};
@@ -176,20 +185,7 @@ extern unsigned long try_to_free_pages(struct zone **, gfp_t);
extern unsigned long shrink_all_memory(unsigned long nr_pages);
extern int vm_swappiness;
extern int remove_mapping(struct address_space *mapping, struct page *page);
-
-/* possible outcome of pageout() */
-typedef enum {
- /* failed to write page out, page is locked */
- PAGE_KEEP,
- /* move page to the active list, page is locked */
- PAGE_ACTIVATE,
- /* page has been sent to the disk successfully, page is unlocked */
- PAGE_SUCCESS,
- /* page is clean and locked */
- PAGE_CLEAN,
-} pageout_t;
-
-extern pageout_t pageout(struct page *page, struct address_space *mapping);
+extern long vm_total_pages;
#ifdef CONFIG_NUMA
extern int zone_reclaim_mode;
@@ -250,7 +246,6 @@ extern int remove_exclusive_swap_page(struct page *);
struct backing_dev_info;
extern spinlock_t swap_lock;
-extern int remove_vma_swap(struct vm_area_struct *vma, struct page *page);
/* linux/mm/thrash.c */
extern struct mm_struct * swap_token_mm;
@@ -288,18 +283,60 @@ static inline void disable_swap_token(void)
#define free_pages_and_swap_cache(pages, nr) \
release_pages((pages), (nr), 0);
-#define show_swap_cache_info() /*NOTHING*/
-#define free_swap_and_cache(swp) /*NOTHING*/
-#define swap_duplicate(swp) /*NOTHING*/
-#define swap_free(swp) /*NOTHING*/
-#define read_swap_cache_async(swp,vma,addr) NULL
-#define lookup_swap_cache(swp) NULL
-#define valid_swaphandles(swp, off) 0
+static inline void show_swap_cache_info(void)
+{
+}
+
+static inline void free_swap_and_cache(swp_entry_t swp)
+{
+}
+
+static inline int swap_duplicate(swp_entry_t swp)
+{
+ return 0;
+}
+
+static inline void swap_free(swp_entry_t swp)
+{
+}
+
+static inline struct page *read_swap_cache_async(swp_entry_t swp,
+ struct vm_area_struct *vma, unsigned long addr)
+{
+ return NULL;
+}
+
+static inline struct page *lookup_swap_cache(swp_entry_t swp)
+{
+ return NULL;
+}
+
+static inline int valid_swaphandles(swp_entry_t entry, unsigned long *offset)
+{
+ return 0;
+}
+
#define can_share_swap_page(p) (page_mapcount(p) == 1)
-#define move_to_swap_cache(p, swp) 1
-#define move_from_swap_cache(p, i, m) 1
-#define __delete_from_swap_cache(p) /*NOTHING*/
-#define delete_from_swap_cache(p) /*NOTHING*/
+
+static inline int move_to_swap_cache(struct page *page, swp_entry_t entry)
+{
+ return 1;
+}
+
+static inline int move_from_swap_cache(struct page *page, unsigned long index,
+ struct address_space *mapping)
+{
+ return 1;
+}
+
+static inline void __delete_from_swap_cache(struct page *page)
+{
+}
+
+static inline void delete_from_swap_cache(struct page *page)
+{
+}
+
#define swap_token_default_timeout 0
static inline int remove_exclusive_swap_page(struct page *p)
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index 87b9d14c710d..ec639aa3a1d3 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -67,3 +67,56 @@ static inline pte_t swp_entry_to_pte(swp_entry_t entry)
BUG_ON(pte_file(__swp_entry_to_pte(arch_entry)));
return __swp_entry_to_pte(arch_entry);
}
+
+#ifdef CONFIG_MIGRATION
+static inline swp_entry_t make_migration_entry(struct page *page, int write)
+{
+ BUG_ON(!PageLocked(page));
+ return swp_entry(write ? SWP_MIGRATION_WRITE : SWP_MIGRATION_READ,
+ page_to_pfn(page));
+}
+
+static inline int is_migration_entry(swp_entry_t entry)
+{
+ return unlikely(swp_type(entry) == SWP_MIGRATION_READ ||
+ swp_type(entry) == SWP_MIGRATION_WRITE);
+}
+
+static inline int is_write_migration_entry(swp_entry_t entry)
+{
+ return unlikely(swp_type(entry) == SWP_MIGRATION_WRITE);
+}
+
+static inline struct page *migration_entry_to_page(swp_entry_t entry)
+{
+ struct page *p = pfn_to_page(swp_offset(entry));
+ /*
+ * Any use of migration entries may only occur while the
+ * corresponding page is locked
+ */
+ BUG_ON(!PageLocked(p));
+ return p;
+}
+
+static inline void make_migration_entry_read(swp_entry_t *entry)
+{
+ *entry = swp_entry(SWP_MIGRATION_READ, swp_offset(*entry));
+}
+
+extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
+ unsigned long address);
+#else
+
+#define make_migration_entry(page, write) swp_entry(0, 0)
+#define is_migration_entry(swp) 0
+#define migration_entry_to_page(swp) NULL
+static inline void make_migration_entry_read(swp_entry_t *entryp) { }
+static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
+ unsigned long address) { }
+static inline int is_write_migration_entry(swp_entry_t entry)
+{
+ return 0;
+}
+
+#endif
+
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index bd67a4413df7..33785b79d548 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -516,6 +516,16 @@ asmlinkage long sys_set_mempolicy(int mode, unsigned long __user *nmask,
asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode,
const unsigned long __user *from,
const unsigned long __user *to);
+asmlinkage long sys_move_pages(pid_t pid, unsigned long nr_pages,
+ const void __user * __user *pages,
+ const int __user *nodes,
+ int __user *status,
+ int flags);
+asmlinkage long compat_sys_move_pages(pid_t pid, unsigned long nr_page,
+ __u32 __user *pages,
+ const int __user *nodes,
+ int __user *status,
+ int flags);
asmlinkage long sys_mbind(unsigned long start, unsigned long len,
unsigned long mode,
unsigned long __user *nmask,
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index cee944dbdcd4..c7132029af0f 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -186,6 +186,7 @@ enum
VM_PERCPU_PAGELIST_FRACTION=30,/* int: fraction of pages in each percpu_pagelist */
VM_ZONE_RECLAIM_MODE=31, /* reclaim local zone memory before going off node */
VM_ZONE_RECLAIM_INTERVAL=32, /* time period to wait after reclaim failure */
+ VM_PANIC_ON_OOM=33, /* panic at out-of-memory */
};
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 420a689c3fb4..8ebf497907f8 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -18,7 +18,6 @@
#define _LINUX_TCP_H
#include <linux/types.h>
-#include <linux/dmaengine.h>
#include <asm/byteorder.h>
struct tcphdr {
@@ -161,6 +160,7 @@ struct tcp_info
#ifdef __KERNEL__
#include <linux/skbuff.h>
+#include <linux/dmaengine.h>
#include <net/sock.h>
#include <net/inet_connection_sock.h>
#include <net/inet_timewait_sock.h>
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
new file mode 100644
index 000000000000..391e7ed1eb3f
--- /dev/null
+++ b/include/linux/uaccess.h
@@ -0,0 +1,22 @@
+#ifndef __LINUX_UACCESS_H__
+#define __LINUX_UACCESS_H__
+
+#include <asm/uaccess.h>
+
+#ifndef ARCH_HAS_NOCACHE_UACCESS
+
+static inline unsigned long __copy_from_user_inatomic_nocache(void *to,
+ const void __user *from, unsigned long n)
+{
+ return __copy_from_user_inatomic(to, from, n);
+}
+
+static inline unsigned long __copy_from_user_nocache(void *to,
+ const void __user *from, unsigned long n)
+{
+ return __copy_from_user(to, from, n);
+}
+
+#endif /* ARCH_HAS_NOCACHE_UACCESS */
+
+#endif /* __LINUX_UACCESS_H__ */
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 1d5577b2b752..f6024ab4eff0 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -4,10 +4,13 @@
#include <linux/spinlock.h>
#include <asm/page.h> /* pgprot_t */
+struct vm_area_struct;
+
/* bits in vm_struct->flags */
#define VM_IOREMAP 0x00000001 /* ioremap() and friends */
#define VM_ALLOC 0x00000002 /* vmalloc() */
#define VM_MAP 0x00000004 /* vmap()ed pages */
+#define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
/* bits [20..32] reserved for arch specific ioremap internals */
/*
@@ -32,9 +35,11 @@ struct vm_struct {
* Highlevel APIs for driver use
*/
extern void *vmalloc(unsigned long size);
+extern void *vmalloc_user(unsigned long size);
extern void *vmalloc_node(unsigned long size, int node);
extern void *vmalloc_exec(unsigned long size);
extern void *vmalloc_32(unsigned long size);
+extern void *vmalloc_32_user(unsigned long size);
extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
extern void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask,
pgprot_t prot);
@@ -45,6 +50,9 @@ extern void vfree(void *addr);
extern void *vmap(struct page **pages, unsigned int count,
unsigned long flags, pgprot_t prot);
extern void vunmap(void *addr);
+
+extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
+ unsigned long pgoff);
/*
* Lowlevel-APIs (not for driver use!)
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 56f92fcbe94a..9e38b566d0e7 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -50,14 +50,15 @@ struct writeback_control {
* a hint that the filesystem need only write out the pages inside that
* byterange. The byte at `end' is included in the writeout request.
*/
- loff_t start;
- loff_t end;
+ loff_t range_start;
+ loff_t range_end;
unsigned nonblocking:1; /* Don't get stuck on request queues */
unsigned encountered_congestion:1; /* An output: a queue is full */
unsigned for_kupdate:1; /* A kupdate writeback */
unsigned for_reclaim:1; /* Invoked from the page allocator */
unsigned for_writepages:1; /* This is a writepages() call */
+ unsigned range_cyclic:1; /* range_start is cyclic */
};
/*
diff --git a/include/linux/zorro.h b/include/linux/zorro.h
index 2f135cf6eef1..913bfc226dda 100644
--- a/include/linux/zorro.h
+++ b/include/linux/zorro.h
@@ -11,8 +11,6 @@
#ifndef _LINUX_ZORRO_H
#define _LINUX_ZORRO_H
-#ifndef __ASSEMBLY__
-
#include <linux/device.h>
@@ -112,45 +110,6 @@ struct ConfigDev {
__u32 cd_Unused[4]; /* for whatever the driver wants */
} __attribute__ ((packed));
-#else /* __ASSEMBLY__ */
-
-LN_Succ = 0
-LN_Pred = LN_Succ+4
-LN_Type = LN_Pred+4
-LN_Pri = LN_Type+1
-LN_Name = LN_Pri+1
-LN_sizeof = LN_Name+4
-
-ER_Type = 0
-ER_Product = ER_Type+1
-ER_Flags = ER_Product+1
-ER_Reserved03 = ER_Flags+1
-ER_Manufacturer = ER_Reserved03+1
-ER_SerialNumber = ER_Manufacturer+2
-ER_InitDiagVec = ER_SerialNumber+4
-ER_Reserved0c = ER_InitDiagVec+2
-ER_Reserved0d = ER_Reserved0c+1
-ER_Reserved0e = ER_Reserved0d+1
-ER_Reserved0f = ER_Reserved0e+1
-ER_sizeof = ER_Reserved0f+1
-
-CD_Node = 0
-CD_Flags = CD_Node+LN_sizeof
-CD_Pad = CD_Flags+1
-CD_Rom = CD_Pad+1
-CD_BoardAddr = CD_Rom+ER_sizeof
-CD_BoardSize = CD_BoardAddr+4
-CD_SlotAddr = CD_BoardSize+4
-CD_SlotSize = CD_SlotAddr+2
-CD_Driver = CD_SlotSize+2
-CD_NextCD = CD_Driver+4
-CD_Unused = CD_NextCD+4
-CD_sizeof = CD_Unused+(4*4)
-
-#endif /* __ASSEMBLY__ */
-
-#ifndef __ASSEMBLY__
-
#define ZORRO_NUM_AUTO 16
#ifdef __KERNEL__
@@ -290,7 +249,6 @@ extern DECLARE_BITMAP(zorro_unused_z2ram, 128);
#define Z2RAM_CHUNKSHIFT (16)
-#endif /* !__ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _LINUX_ZORRO_H */
diff --git a/include/net/ieee80211.h b/include/net/ieee80211.h
index d5147770ad47..ecc42864b001 100644
--- a/include/net/ieee80211.h
+++ b/include/net/ieee80211.h
@@ -968,6 +968,7 @@ enum ieee80211_state {
enum {
IEEE80211_CH_PASSIVE_ONLY = (1 << 0),
+ IEEE80211_CH_80211H_RULES = (1 << 1),
IEEE80211_CH_B_ONLY = (1 << 2),
IEEE80211_CH_NO_IBSS = (1 << 3),
IEEE80211_CH_UNIFORM_SPREADING = (1 << 4),
@@ -976,10 +977,10 @@ enum {
};
struct ieee80211_channel {
- u32 freq;
+ u32 freq; /* in MHz */
u8 channel;
u8 flags;
- u8 max_power;
+ u8 max_power; /* in dBm */
};
struct ieee80211_geo {
diff --git a/include/net/protocol.h b/include/net/protocol.h
index bcaee39bd2ff..3b6dc15c68a5 100644
--- a/include/net/protocol.h
+++ b/include/net/protocol.h
@@ -36,6 +36,7 @@
struct net_protocol {
int (*handler)(struct sk_buff *skb);
void (*err_handler)(struct sk_buff *skb, u32 info);
+ struct sk_buff *(*gso_segment)(struct sk_buff *skb, int sg);
int no_policy;
};
diff --git a/include/net/sock.h b/include/net/sock.h
index d10dfecb6cbd..2d8d6adf1616 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1030,9 +1030,13 @@ static inline void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
{
__sk_dst_set(sk, dst);
sk->sk_route_caps = dst->dev->features;
+ if (sk->sk_route_caps & NETIF_F_GSO)
+ sk->sk_route_caps |= NETIF_F_TSO;
if (sk->sk_route_caps & NETIF_F_TSO) {
if (sock_flag(sk, SOCK_NO_LARGESEND) || dst->header_len)
sk->sk_route_caps &= ~NETIF_F_TSO;
+ else
+ sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
}
}
@@ -1265,6 +1269,7 @@ sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
* sk_eat_skb - Release a skb if it is no longer needed
* @sk: socket to eat this skb from
* @skb: socket buffer to eat
+ * @copied_early: flag indicating whether DMA operations copied this data early
*
* This routine must be called with interrupts disabled or with the socket
* locked so that the sk_buff queue operation is ok.
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 5f4eb5c79689..ca3d38dfc00b 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -569,13 +569,13 @@ struct tcp_skb_cb {
*/
static inline int tcp_skb_pcount(const struct sk_buff *skb)
{
- return skb_shinfo(skb)->tso_segs;
+ return skb_shinfo(skb)->gso_segs;
}
/* This is valid iff tcp_skb_pcount() > 1. */
static inline int tcp_skb_mss(const struct sk_buff *skb)
{
- return skb_shinfo(skb)->tso_size;
+ return skb_shinfo(skb)->gso_size;
}
static inline void tcp_dec_pcount_approx(__u32 *count,
@@ -1086,6 +1086,8 @@ extern struct request_sock_ops tcp_request_sock_ops;
extern int tcp_v4_destroy_sock(struct sock *sk);
+extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int sg);
+
#ifdef CONFIG_PROC_FS
extern int tcp4_proc_init(void);
extern void tcp4_proc_exit(void);
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index 1511714a9585..02e6f6798972 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -205,11 +205,11 @@ static int mqueue_fill_super(struct super_block *sb, void *data, int silent)
return 0;
}
-static struct super_block *mqueue_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name,
- void *data)
+static int mqueue_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name,
+ void *data, struct vfsmount *mnt)
{
- return get_sb_single(fs_type, flags, data, mqueue_fill_super);
+ return get_sb_single(fs_type, flags, data, mqueue_fill_super, mnt);
}
static void init_once(void *foo, kmem_cache_t * cachep, unsigned long flags)
@@ -359,7 +359,7 @@ static ssize_t mqueue_read_file(struct file *filp, char __user *u_data,
return count;
}
-static int mqueue_flush_file(struct file *filp)
+static int mqueue_flush_file(struct file *filp, fl_owner_t id)
{
struct mqueue_inode_info *info = MQUEUE_I(filp->f_dentry->d_inode);
diff --git a/ipc/shm.c b/ipc/shm.c
index 4f133d24030f..fe7ae73b6981 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -698,7 +698,6 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
int err;
unsigned long flags;
unsigned long prot;
- unsigned long o_flags;
int acc_mode;
void *user_addr;
@@ -725,11 +724,9 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
if (shmflg & SHM_RDONLY) {
prot = PROT_READ;
- o_flags = O_RDONLY;
acc_mode = S_IRUGO;
} else {
prot = PROT_READ | PROT_WRITE;
- o_flags = O_RDWR;
acc_mode = S_IRUGO | S_IWUGO;
}
if (shmflg & SHM_EXEC) {
diff --git a/kernel/acct.c b/kernel/acct.c
index b327f4d20104..6802020e0ceb 100644
--- a/kernel/acct.c
+++ b/kernel/acct.c
@@ -118,7 +118,7 @@ static int check_free_space(struct file *file)
spin_unlock(&acct_globals.lock);
/* May block */
- if (vfs_statfs(file->f_dentry->d_inode->i_sb, &sbuf))
+ if (vfs_statfs(file->f_dentry, &sbuf))
return res;
suspend = sbuf.f_blocks * SUSPEND;
resume = sbuf.f_blocks * RESUME;
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index b097ccb4eb7e..9ebd96fda295 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -1558,6 +1558,7 @@ int __audit_ipc_obj(struct kern_ipc_perm *ipcp)
* @uid: msgq user id
* @gid: msgq group id
* @mode: msgq mode (permissions)
+ * @ipcp: in-kernel IPC permissions
*
* Returns 0 for success or NULL context or < 0 on error.
*/
diff --git a/kernel/compat.c b/kernel/compat.c
index c1601a84f8d8..2f672332430f 100644
--- a/kernel/compat.c
+++ b/kernel/compat.c
@@ -21,6 +21,7 @@
#include <linux/unistd.h>
#include <linux/security.h>
#include <linux/timex.h>
+#include <linux/migrate.h>
#include <asm/uaccess.h>
@@ -934,3 +935,25 @@ asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp)
return ret;
}
+
+#ifdef CONFIG_NUMA
+asmlinkage long compat_sys_move_pages(pid_t pid, unsigned long nr_pages,
+ compat_uptr_t __user *pages32,
+ const int __user *nodes,
+ int __user *status,
+ int flags)
+{
+ const void __user * __user *pages;
+ int i;
+
+ pages = compat_alloc_user_space(nr_pages * sizeof(void *));
+ for (i = 0; i < nr_pages; i++) {
+ compat_uptr_t p;
+
+ if (get_user(p, pages32 + i) ||
+ put_user(compat_ptr(p), pages + i))
+ return -EFAULT;
+ }
+ return sys_move_pages(pid, nr_pages, pages, nodes, status, flags);
+}
+#endif
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index ab81fdd4572b..b602f73fb38d 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -41,6 +41,7 @@
#include <linux/rcupdate.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
+#include <linux/security.h>
#include <linux/slab.h>
#include <linux/smp_lock.h>
#include <linux/spinlock.h>
@@ -392,11 +393,11 @@ static int cpuset_fill_super(struct super_block *sb, void *unused_data,
return 0;
}
-static struct super_block *cpuset_get_sb(struct file_system_type *fs_type,
- int flags, const char *unused_dev_name,
- void *data)
+static int cpuset_get_sb(struct file_system_type *fs_type,
+ int flags, const char *unused_dev_name,
+ void *data, struct vfsmount *mnt)
{
- return get_sb_single(fs_type, flags, data, cpuset_fill_super);
+ return get_sb_single(fs_type, flags, data, cpuset_fill_super, mnt);
}
static struct file_system_type cpuset_fs_type = {
@@ -1177,6 +1178,7 @@ static int attach_task(struct cpuset *cs, char *pidbuf, char **ppathbuf)
cpumask_t cpus;
nodemask_t from, to;
struct mm_struct *mm;
+ int retval;
if (sscanf(pidbuf, "%d", &pid) != 1)
return -EIO;
@@ -1205,6 +1207,12 @@ static int attach_task(struct cpuset *cs, char *pidbuf, char **ppathbuf)
get_task_struct(tsk);
}
+ retval = security_task_setscheduler(tsk, 0, NULL);
+ if (retval) {
+ put_task_struct(tsk);
+ return retval;
+ }
+
mutex_lock(&callback_mutex);
task_lock(tsk);
diff --git a/kernel/exit.c b/kernel/exit.c
index e06d0c10a24e..a3baf92462bd 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -579,7 +579,7 @@ static void exit_mm(struct task_struct * tsk)
down_read(&mm->mmap_sem);
}
atomic_inc(&mm->mm_count);
- if (mm != tsk->active_mm) BUG();
+ BUG_ON(mm != tsk->active_mm);
/* more a memory barrier than a real lock */
task_lock(tsk);
tsk->mm = NULL;
@@ -1530,8 +1530,7 @@ check_continued:
if (options & __WNOTHREAD)
break;
tsk = next_thread(tsk);
- if (tsk->signal != current->signal)
- BUG();
+ BUG_ON(tsk->signal != current->signal);
} while (tsk != current);
read_unlock(&tasklist_lock);
diff --git a/kernel/fork.c b/kernel/fork.c
index ac8100e3088a..49adc0e8d47c 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -368,6 +368,8 @@ void fastcall __mmdrop(struct mm_struct *mm)
*/
void mmput(struct mm_struct *mm)
{
+ might_sleep();
+
if (atomic_dec_and_test(&mm->mm_users)) {
exit_aio(mm);
exit_mmap(mm);
@@ -623,6 +625,7 @@ out:
/*
* Allocate a new files structure and copy contents from the
* passed in files structure.
+ * errorp will be valid only when the returned files_struct is NULL.
*/
static struct files_struct *dup_fd(struct files_struct *oldf, int *errorp)
{
@@ -631,6 +634,7 @@ static struct files_struct *dup_fd(struct files_struct *oldf, int *errorp)
int open_files, size, i, expand;
struct fdtable *old_fdt, *new_fdt;
+ *errorp = -ENOMEM;
newf = alloc_files();
if (!newf)
goto out;
@@ -744,7 +748,6 @@ static int copy_files(unsigned long clone_flags, struct task_struct * tsk)
* break this.
*/
tsk->files = NULL;
- error = -ENOMEM;
newf = dup_fd(oldf, &error);
if (!newf)
goto out;
diff --git a/kernel/futex.c b/kernel/futex.c
index 5699c512057b..e1a380c77a5a 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -1056,11 +1056,11 @@ asmlinkage long sys_futex(u32 __user *uaddr, int op, int val,
(unsigned long)uaddr2, val2, val3);
}
-static struct super_block *
-futexfs_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int futexfs_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data,
+ struct vfsmount *mnt)
{
- return get_sb_pseudo(fs_type, "futex", NULL, 0xBAD1DEA);
+ return get_sb_pseudo(fs_type, "futex", NULL, 0xBAD1DEA, mnt);
}
static struct file_system_type futex_fs_type = {
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index 51df337b37db..0f6530117105 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -76,10 +76,11 @@ irqreturn_t no_action(int cpl, void *dev_id, struct pt_regs *regs)
/*
* Have got an event to handle:
*/
-fastcall int handle_IRQ_event(unsigned int irq, struct pt_regs *regs,
+fastcall irqreturn_t handle_IRQ_event(unsigned int irq, struct pt_regs *regs,
struct irqaction *action)
{
- int ret, retval = 0, status = 0;
+ irqreturn_t ret, retval = IRQ_NONE;
+ unsigned int status = 0;
if (!(action->flags & SA_INTERRUPT))
local_irq_enable();
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c
index 134f9f2e0e39..a12d00eb5e7c 100644
--- a/kernel/irq/migration.c
+++ b/kernel/irq/migration.c
@@ -30,7 +30,7 @@ void move_native_irq(int irq)
desc->move_irq = 0;
- if (likely(cpus_empty(pending_irq_cpumask[irq])))
+ if (unlikely(cpus_empty(pending_irq_cpumask[irq])))
return;
if (!desc->handler->set_affinity)
@@ -49,7 +49,7 @@ void move_native_irq(int irq)
* cause some ioapics to mal-function.
* Being paranoid i guess!
*/
- if (unlikely(!cpus_empty(tmp))) {
+ if (likely(!cpus_empty(tmp))) {
if (likely(!(desc->status & IRQ_DISABLED)))
desc->handler->disable(irq);
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index d03b5eef8ce0..afacd6f585fa 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -24,6 +24,8 @@ static struct proc_dir_entry *smp_affinity_entry[NR_IRQS];
#ifdef CONFIG_GENERIC_PENDING_IRQ
void proc_set_irq_affinity(unsigned int irq, cpumask_t mask_val)
{
+ set_balance_irq_affinity(irq, mask_val);
+
/*
* Save these away for later use. Re-progam when the
* interrupt is pending
@@ -33,6 +35,7 @@ void proc_set_irq_affinity(unsigned int irq, cpumask_t mask_val)
#else
void proc_set_irq_affinity(unsigned int irq, cpumask_t mask_val)
{
+ set_balance_irq_affinity(irq, mask_val);
irq_affinity[irq] = mask_val;
irq_desc[irq].handler->set_affinity(irq, mask_val);
}
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
index 7df9abd5ec86..b2fb3c18d06b 100644
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -11,7 +11,7 @@
#include <linux/kallsyms.h>
#include <linux/interrupt.h>
-static int irqfixup;
+static int irqfixup __read_mostly;
/*
* Recovery handler for misrouted interrupts.
@@ -136,9 +136,9 @@ static void report_bad_irq(unsigned int irq, irq_desc_t *desc, irqreturn_t actio
void note_interrupt(unsigned int irq, irq_desc_t *desc, irqreturn_t action_ret,
struct pt_regs *regs)
{
- if (action_ret != IRQ_HANDLED) {
+ if (unlikely(action_ret != IRQ_HANDLED)) {
desc->irqs_unhandled++;
- if (action_ret != IRQ_NONE)
+ if (unlikely(action_ret != IRQ_NONE))
report_bad_irq(irq, desc, action_ret);
}
@@ -152,11 +152,11 @@ void note_interrupt(unsigned int irq, irq_desc_t *desc, irqreturn_t action_ret,
}
desc->irq_count++;
- if (desc->irq_count < 100000)
+ if (likely(desc->irq_count < 100000))
return;
desc->irq_count = 0;
- if (desc->irqs_unhandled > 99900) {
+ if (unlikely(desc->irqs_unhandled > 99900)) {
/*
* The interrupt is stuck
*/
@@ -171,7 +171,7 @@ void note_interrupt(unsigned int irq, irq_desc_t *desc, irqreturn_t action_ret,
desc->irqs_unhandled = 0;
}
-int noirqdebug;
+int noirqdebug __read_mostly;
int __init noirqdebug_setup(char *str)
{
diff --git a/kernel/kexec.c b/kernel/kexec.c
index bf39d28e4c0e..58f0f382597c 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -902,14 +902,14 @@ static int kimage_load_segment(struct kimage *image,
* kexec does not sync, or unmount filesystems so if you need
* that to happen you need to do that yourself.
*/
-struct kimage *kexec_image = NULL;
-static struct kimage *kexec_crash_image = NULL;
+struct kimage *kexec_image;
+struct kimage *kexec_crash_image;
/*
* A home grown binary mutex.
* Nothing can wait so this mutex is safe to use
* in interrupt context :)
*/
-static int kexec_lock = 0;
+static int kexec_lock;
asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments,
struct kexec_segment __user *segments,
diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
index f119e098e67b..9e28478a17a5 100644
--- a/kernel/ksysfs.c
+++ b/kernel/ksysfs.c
@@ -14,6 +14,7 @@
#include <linux/sysfs.h>
#include <linux/module.h>
#include <linux/init.h>
+#include <linux/kexec.h>
#define KERNEL_ATTR_RO(_name) \
static struct subsys_attribute _name##_attr = __ATTR_RO(_name)
@@ -48,6 +49,20 @@ static ssize_t uevent_helper_store(struct subsystem *subsys, const char *page, s
KERNEL_ATTR_RW(uevent_helper);
#endif
+#ifdef CONFIG_KEXEC
+static ssize_t kexec_loaded_show(struct subsystem *subsys, char *page)
+{
+ return sprintf(page, "%d\n", !!kexec_image);
+}
+KERNEL_ATTR_RO(kexec_loaded);
+
+static ssize_t kexec_crash_loaded_show(struct subsystem *subsys, char *page)
+{
+ return sprintf(page, "%d\n", !!kexec_crash_image);
+}
+KERNEL_ATTR_RO(kexec_crash_loaded);
+#endif /* CONFIG_KEXEC */
+
decl_subsys(kernel, NULL, NULL);
EXPORT_SYMBOL_GPL(kernel_subsys);
@@ -56,6 +71,10 @@ static struct attribute * kernel_attrs[] = {
&uevent_seqnum_attr.attr,
&uevent_helper_attr.attr,
#endif
+#ifdef CONFIG_KEXEC
+ &kexec_loaded_attr.attr,
+ &kexec_crash_loaded_attr.attr,
+#endif
NULL
};
diff --git a/kernel/power/power.h b/kernel/power/power.h
index f06f12f21767..98c41423f3b1 100644
--- a/kernel/power/power.h
+++ b/kernel/power/power.h
@@ -55,7 +55,7 @@ struct snapshot_handle {
unsigned int page;
unsigned int page_offset;
unsigned int prev;
- struct pbe *pbe;
+ struct pbe *pbe, *last_pbe;
void *buffer;
unsigned int buf_offset;
};
@@ -105,6 +105,10 @@ extern struct bitmap_page *alloc_bitmap(unsigned int nr_bits);
extern unsigned long alloc_swap_page(int swap, struct bitmap_page *bitmap);
extern void free_all_swap_pages(int swap, struct bitmap_page *bitmap);
+extern unsigned int count_special_pages(void);
+extern int save_special_mem(void);
+extern int restore_special_mem(void);
+
extern int swsusp_check(void);
extern int swsusp_shrink_memory(void);
extern void swsusp_free(void);
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 3eeedbb13b78..3d9284100b22 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -39,8 +39,90 @@ static unsigned int nr_copy_pages;
static unsigned int nr_meta_pages;
static unsigned long *buffer;
+struct arch_saveable_page {
+ unsigned long start;
+ unsigned long end;
+ char *data;
+ struct arch_saveable_page *next;
+};
+static struct arch_saveable_page *arch_pages;
+
+int swsusp_add_arch_pages(unsigned long start, unsigned long end)
+{
+ struct arch_saveable_page *tmp;
+
+ while (start < end) {
+ tmp = kzalloc(sizeof(struct arch_saveable_page), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+ tmp->start = start;
+ tmp->end = ((start >> PAGE_SHIFT) + 1) << PAGE_SHIFT;
+ if (tmp->end > end)
+ tmp->end = end;
+ tmp->next = arch_pages;
+ start = tmp->end;
+ arch_pages = tmp;
+ }
+ return 0;
+}
+
+static unsigned int count_arch_pages(void)
+{
+ unsigned int count = 0;
+ struct arch_saveable_page *tmp = arch_pages;
+ while (tmp) {
+ count++;
+ tmp = tmp->next;
+ }
+ return count;
+}
+
+static int save_arch_mem(void)
+{
+ char *kaddr;
+ struct arch_saveable_page *tmp = arch_pages;
+ int offset;
+
+ pr_debug("swsusp: Saving arch specific memory");
+ while (tmp) {
+ tmp->data = (char *)__get_free_page(GFP_ATOMIC);
+ if (!tmp->data)
+ return -ENOMEM;
+ offset = tmp->start - (tmp->start & PAGE_MASK);
+ /* arch pages might haven't a 'struct page' */
+ kaddr = kmap_atomic_pfn(tmp->start >> PAGE_SHIFT, KM_USER0);
+ memcpy(tmp->data + offset, kaddr + offset,
+ tmp->end - tmp->start);
+ kunmap_atomic(kaddr, KM_USER0);
+
+ tmp = tmp->next;
+ }
+ return 0;
+}
+
+static int restore_arch_mem(void)
+{
+ char *kaddr;
+ struct arch_saveable_page *tmp = arch_pages;
+ int offset;
+
+ while (tmp) {
+ if (!tmp->data)
+ continue;
+ offset = tmp->start - (tmp->start & PAGE_MASK);
+ kaddr = kmap_atomic_pfn(tmp->start >> PAGE_SHIFT, KM_USER0);
+ memcpy(kaddr + offset, tmp->data + offset,
+ tmp->end - tmp->start);
+ kunmap_atomic(kaddr, KM_USER0);
+ free_page((long)tmp->data);
+ tmp->data = NULL;
+ tmp = tmp->next;
+ }
+ return 0;
+}
+
#ifdef CONFIG_HIGHMEM
-unsigned int count_highmem_pages(void)
+static unsigned int count_highmem_pages(void)
{
struct zone *zone;
unsigned long zone_pfn;
@@ -117,7 +199,7 @@ static int save_highmem_zone(struct zone *zone)
return 0;
}
-int save_highmem(void)
+static int save_highmem(void)
{
struct zone *zone;
int res = 0;
@@ -134,7 +216,7 @@ int save_highmem(void)
return 0;
}
-int restore_highmem(void)
+static int restore_highmem(void)
{
printk("swsusp: Restoring Highmem\n");
while (highmem_copy) {
@@ -150,8 +232,35 @@ int restore_highmem(void)
}
return 0;
}
+#else
+static inline unsigned int count_highmem_pages(void) {return 0;}
+static inline int save_highmem(void) {return 0;}
+static inline int restore_highmem(void) {return 0;}
#endif
+unsigned int count_special_pages(void)
+{
+ return count_arch_pages() + count_highmem_pages();
+}
+
+int save_special_mem(void)
+{
+ int ret;
+ ret = save_arch_mem();
+ if (!ret)
+ ret = save_highmem();
+ return ret;
+}
+
+int restore_special_mem(void)
+{
+ int ret;
+ ret = restore_arch_mem();
+ if (!ret)
+ ret = restore_highmem();
+ return ret;
+}
+
static int pfn_is_nosave(unsigned long pfn)
{
unsigned long nosave_begin_pfn = __pa(&__nosave_begin) >> PAGE_SHIFT;
@@ -177,7 +286,6 @@ static int saveable(struct zone *zone, unsigned long *zone_pfn)
return 0;
page = pfn_to_page(pfn);
- BUG_ON(PageReserved(page) && PageNosave(page));
if (PageNosave(page))
return 0;
if (PageReserved(page) && pfn_is_nosave(pfn))
@@ -293,62 +401,29 @@ static inline void create_pbe_list(struct pbe *pblist, unsigned int nr_pages)
}
}
-/**
- * On resume it is necessary to trace and eventually free the unsafe
- * pages that have been allocated, because they are needed for I/O
- * (on x86-64 we likely will "eat" these pages once again while
- * creating the temporary page translation tables)
- */
-
-struct eaten_page {
- struct eaten_page *next;
- char padding[PAGE_SIZE - sizeof(void *)];
-};
-
-static struct eaten_page *eaten_pages = NULL;
-
-static void release_eaten_pages(void)
-{
- struct eaten_page *p, *q;
-
- p = eaten_pages;
- while (p) {
- q = p->next;
- /* We don't want swsusp_free() to free this page again */
- ClearPageNosave(virt_to_page(p));
- free_page((unsigned long)p);
- p = q;
- }
- eaten_pages = NULL;
-}
+static unsigned int unsafe_pages;
/**
* @safe_needed - on resume, for storing the PBE list and the image,
* we can only use memory pages that do not conflict with the pages
- * which had been used before suspend.
+ * used before suspend.
*
* The unsafe pages are marked with the PG_nosave_free flag
- *
- * Allocated but unusable (ie eaten) memory pages should be marked
- * so that swsusp_free() can release them
+ * and we count them using unsafe_pages
*/
static inline void *alloc_image_page(gfp_t gfp_mask, int safe_needed)
{
void *res;
+ res = (void *)get_zeroed_page(gfp_mask);
if (safe_needed)
- do {
+ while (res && PageNosaveFree(virt_to_page(res))) {
+ /* The page is unsafe, mark it for swsusp_free() */
+ SetPageNosave(virt_to_page(res));
+ unsafe_pages++;
res = (void *)get_zeroed_page(gfp_mask);
- if (res && PageNosaveFree(virt_to_page(res))) {
- /* This is for swsusp_free() */
- SetPageNosave(virt_to_page(res));
- ((struct eaten_page *)res)->next = eaten_pages;
- eaten_pages = res;
- }
- } while (res && PageNosaveFree(virt_to_page(res)));
- else
- res = (void *)get_zeroed_page(gfp_mask);
+ }
if (res) {
SetPageNosave(virt_to_page(res));
SetPageNosaveFree(virt_to_page(res));
@@ -374,7 +449,8 @@ unsigned long get_safe_page(gfp_t gfp_mask)
* On each page we set up a list of struct_pbe elements.
*/
-struct pbe *alloc_pagedir(unsigned int nr_pages, gfp_t gfp_mask, int safe_needed)
+static struct pbe *alloc_pagedir(unsigned int nr_pages, gfp_t gfp_mask,
+ int safe_needed)
{
unsigned int num;
struct pbe *pblist, *pbe;
@@ -642,6 +718,8 @@ static int mark_unsafe_pages(struct pbe *pblist)
return -EFAULT;
}
+ unsafe_pages = 0;
+
return 0;
}
@@ -719,42 +797,99 @@ static inline struct pbe *unpack_orig_addresses(unsigned long *buf,
}
/**
- * create_image - use metadata contained in the PBE list
+ * prepare_image - use metadata contained in the PBE list
* pointed to by pagedir_nosave to mark the pages that will
* be overwritten in the process of restoring the system
- * memory state from the image and allocate memory for
- * the image avoiding these pages
+ * memory state from the image ("unsafe" pages) and allocate
+ * memory for the image
+ *
+ * The idea is to allocate the PBE list first and then
+ * allocate as many pages as it's needed for the image data,
+ * but not to assign these pages to the PBEs initially.
+ * Instead, we just mark them as allocated and create a list
+ * of "safe" which will be used later
*/
-static int create_image(struct snapshot_handle *handle)
+struct safe_page {
+ struct safe_page *next;
+ char padding[PAGE_SIZE - sizeof(void *)];
+};
+
+static struct safe_page *safe_pages;
+
+static int prepare_image(struct snapshot_handle *handle)
{
int error = 0;
- struct pbe *p, *pblist;
+ unsigned int nr_pages = nr_copy_pages;
+ struct pbe *p, *pblist = NULL;
p = pagedir_nosave;
error = mark_unsafe_pages(p);
if (!error) {
- pblist = alloc_pagedir(nr_copy_pages, GFP_ATOMIC, 1);
+ pblist = alloc_pagedir(nr_pages, GFP_ATOMIC, 1);
if (pblist)
copy_page_backup_list(pblist, p);
free_pagedir(p, 0);
if (!pblist)
error = -ENOMEM;
}
- if (!error)
- error = alloc_data_pages(pblist, GFP_ATOMIC, 1);
+ safe_pages = NULL;
+ if (!error && nr_pages > unsafe_pages) {
+ nr_pages -= unsafe_pages;
+ while (nr_pages--) {
+ struct safe_page *ptr;
+
+ ptr = (struct safe_page *)get_zeroed_page(GFP_ATOMIC);
+ if (!ptr) {
+ error = -ENOMEM;
+ break;
+ }
+ if (!PageNosaveFree(virt_to_page(ptr))) {
+ /* The page is "safe", add it to the list */
+ ptr->next = safe_pages;
+ safe_pages = ptr;
+ }
+ /* Mark the page as allocated */
+ SetPageNosave(virt_to_page(ptr));
+ SetPageNosaveFree(virt_to_page(ptr));
+ }
+ }
if (!error) {
- release_eaten_pages();
pagedir_nosave = pblist;
} else {
- pagedir_nosave = NULL;
handle->pbe = NULL;
- nr_copy_pages = 0;
- nr_meta_pages = 0;
+ swsusp_free();
}
return error;
}
+static void *get_buffer(struct snapshot_handle *handle)
+{
+ struct pbe *pbe = handle->pbe, *last = handle->last_pbe;
+ struct page *page = virt_to_page(pbe->orig_address);
+
+ if (PageNosave(page) && PageNosaveFree(page)) {
+ /*
+ * We have allocated the "original" page frame and we can
+ * use it directly to store the read page
+ */
+ pbe->address = 0;
+ if (last && last->next)
+ last->next = NULL;
+ return (void *)pbe->orig_address;
+ }
+ /*
+ * The "original" page frame has not been allocated and we have to
+ * use a "safe" page frame to store the read page
+ */
+ pbe->address = (unsigned long)safe_pages;
+ safe_pages = safe_pages->next;
+ if (last)
+ last->next = pbe;
+ handle->last_pbe = pbe;
+ return (void *)pbe->address;
+}
+
/**
* snapshot_write_next - used for writing the system memory snapshot.
*
@@ -799,15 +934,16 @@ int snapshot_write_next(struct snapshot_handle *handle, size_t count)
} else if (handle->prev <= nr_meta_pages) {
handle->pbe = unpack_orig_addresses(buffer, handle->pbe);
if (!handle->pbe) {
- error = create_image(handle);
+ error = prepare_image(handle);
if (error)
return error;
handle->pbe = pagedir_nosave;
- handle->buffer = (void *)handle->pbe->address;
+ handle->last_pbe = NULL;
+ handle->buffer = get_buffer(handle);
}
} else {
handle->pbe = handle->pbe->next;
- handle->buffer = (void *)handle->pbe->address;
+ handle->buffer = get_buffer(handle);
}
handle->prev = handle->page;
}
diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c
index c4016cbbd3e0..f0ee4e7780d6 100644
--- a/kernel/power/swsusp.c
+++ b/kernel/power/swsusp.c
@@ -62,16 +62,6 @@ unsigned long image_size = 500 * 1024 * 1024;
int in_suspend __nosavedata = 0;
-#ifdef CONFIG_HIGHMEM
-unsigned int count_highmem_pages(void);
-int save_highmem(void);
-int restore_highmem(void);
-#else
-static int save_highmem(void) { return 0; }
-static int restore_highmem(void) { return 0; }
-static unsigned int count_highmem_pages(void) { return 0; }
-#endif
-
/**
* The following functions are used for tracing the allocated
* swap pages, so that they can be freed in case of an error.
@@ -175,6 +165,12 @@ void free_all_swap_pages(int swap, struct bitmap_page *bitmap)
*/
#define SHRINK_BITE 10000
+static inline unsigned long __shrink_memory(long tmp)
+{
+ if (tmp > SHRINK_BITE)
+ tmp = SHRINK_BITE;
+ return shrink_all_memory(tmp);
+}
int swsusp_shrink_memory(void)
{
@@ -186,21 +182,23 @@ int swsusp_shrink_memory(void)
printk("Shrinking memory... ");
do {
- size = 2 * count_highmem_pages();
+ size = 2 * count_special_pages();
size += size / 50 + count_data_pages();
size += (size + PBES_PER_PAGE - 1) / PBES_PER_PAGE +
PAGES_FOR_IO;
tmp = size;
for_each_zone (zone)
- if (!is_highmem(zone))
+ if (!is_highmem(zone) && populated_zone(zone)) {
tmp -= zone->free_pages;
+ tmp += zone->lowmem_reserve[ZONE_NORMAL];
+ }
if (tmp > 0) {
- tmp = shrink_all_memory(SHRINK_BITE);
+ tmp = __shrink_memory(tmp);
if (!tmp)
return -ENOMEM;
pages += tmp;
} else if (size > image_size / PAGE_SIZE) {
- tmp = shrink_all_memory(SHRINK_BITE);
+ tmp = __shrink_memory(size - (image_size / PAGE_SIZE));
pages += tmp;
}
printk("\b%c", p[i++%4]);
@@ -228,7 +226,7 @@ int swsusp_suspend(void)
goto Enable_irqs;
}
- if ((error = save_highmem())) {
+ if ((error = save_special_mem())) {
printk(KERN_ERR "swsusp: Not enough free pages for highmem\n");
goto Restore_highmem;
}
@@ -239,7 +237,7 @@ int swsusp_suspend(void)
/* Restore control flow magically appears here */
restore_processor_state();
Restore_highmem:
- restore_highmem();
+ restore_special_mem();
device_power_up();
Enable_irqs:
local_irq_enable();
@@ -265,7 +263,7 @@ int swsusp_resume(void)
*/
swsusp_free();
restore_processor_state();
- restore_highmem();
+ restore_special_mem();
touch_softlockup_watchdog();
device_power_up();
local_irq_enable();
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index 2058f88c7bbb..20e9710fc21c 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -612,14 +612,6 @@ void synchronize_rcu(void)
wait_for_completion(&rcu.completion);
}
-/*
- * Deprecated, use synchronize_rcu() or synchronize_sched() instead.
- */
-void synchronize_kernel(void)
-{
- synchronize_rcu();
-}
-
module_param(blimit, int, 0);
module_param(qhimark, int, 0);
module_param(qlowmark, int, 0);
@@ -627,7 +619,6 @@ module_param(qlowmark, int, 0);
module_param(rsinterval, int, 0);
#endif
EXPORT_SYMBOL_GPL(rcu_batches_completed);
-EXPORT_SYMBOL_GPL_FUTURE(call_rcu); /* WARNING: GPL-only in April 2006. */
-EXPORT_SYMBOL_GPL_FUTURE(call_rcu_bh); /* WARNING: GPL-only in April 2006. */
+EXPORT_SYMBOL_GPL(call_rcu);
+EXPORT_SYMBOL_GPL(call_rcu_bh);
EXPORT_SYMBOL_GPL(synchronize_rcu);
-EXPORT_SYMBOL_GPL_FUTURE(synchronize_kernel); /* WARNING: GPL-only in April 2006. */
diff --git a/kernel/sched.c b/kernel/sched.c
index c13f1bd2df7d..5dbc42694477 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3886,6 +3886,10 @@ long sched_setaffinity(pid_t pid, cpumask_t new_mask)
!capable(CAP_SYS_NICE))
goto out_unlock;
+ retval = security_task_setscheduler(p, 0, NULL);
+ if (retval)
+ goto out_unlock;
+
cpus_allowed = cpuset_cpus_allowed(p);
cpus_and(new_mask, new_mask, cpus_allowed);
retval = set_cpus_allowed(p, new_mask);
@@ -3954,7 +3958,10 @@ long sched_getaffinity(pid_t pid, cpumask_t *mask)
if (!p)
goto out_unlock;
- retval = 0;
+ retval = security_task_getscheduler(p);
+ if (retval)
+ goto out_unlock;
+
cpus_and(*mask, p->cpus_allowed, cpu_online_map);
out_unlock:
@@ -4046,6 +4053,9 @@ asmlinkage long sys_sched_yield(void)
static inline void __cond_resched(void)
{
+#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
+ __might_sleep(__FILE__, __LINE__);
+#endif
/*
* The BKS might be reacquired before we have dropped
* PREEMPT_ACTIVE, which could trigger a second
diff --git a/kernel/sys.c b/kernel/sys.c
index fc9ebbbaba0c..90930b28d2ca 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -13,7 +13,6 @@
#include <linux/notifier.h>
#include <linux/reboot.h>
#include <linux/prctl.h>
-#include <linux/init.h>
#include <linux/highuid.h>
#include <linux/fs.h>
#include <linux/kernel.h>
@@ -57,6 +56,12 @@
#ifndef GET_FPEXC_CTL
# define GET_FPEXC_CTL(a,b) (-EINVAL)
#endif
+#ifndef GET_ENDIAN
+# define GET_ENDIAN(a,b) (-EINVAL)
+#endif
+#ifndef SET_ENDIAN
+# define SET_ENDIAN(a,b) (-EINVAL)
+#endif
/*
* this is where the system-wide overflow UID and GID are defined, for
@@ -2045,6 +2050,13 @@ asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
return -EFAULT;
return 0;
}
+ case PR_GET_ENDIAN:
+ error = GET_ENDIAN(current, arg2);
+ break;
+ case PR_SET_ENDIAN:
+ error = SET_ENDIAN(current, arg2);
+ break;
+
default:
error = -EINVAL;
break;
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index 5433195040f1..6991bece67e8 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -87,6 +87,7 @@ cond_syscall(sys_inotify_init);
cond_syscall(sys_inotify_add_watch);
cond_syscall(sys_inotify_rm_watch);
cond_syscall(sys_migrate_pages);
+cond_syscall(sys_move_pages);
cond_syscall(sys_chown16);
cond_syscall(sys_fchown16);
cond_syscall(sys_getegid16);
@@ -132,3 +133,4 @@ cond_syscall(sys_mincore);
cond_syscall(sys_madvise);
cond_syscall(sys_mremap);
cond_syscall(sys_remap_file_pages);
+cond_syscall(compat_sys_move_pages);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 0d656e61621d..eb8bd214e7d7 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -59,6 +59,7 @@ extern int proc_nr_files(ctl_table *table, int write, struct file *filp,
extern int C_A_D;
extern int sysctl_overcommit_memory;
extern int sysctl_overcommit_ratio;
+extern int sysctl_panic_on_oom;
extern int max_threads;
extern int sysrq_enabled;
extern int core_uses_pid;
@@ -398,7 +399,7 @@ static ctl_table kern_table[] = {
.strategy = &sysctl_string,
},
#endif
-#ifdef CONFIG_HOTPLUG
+#if defined(CONFIG_HOTPLUG) && defined(CONFIG_NET)
{
.ctl_name = KERN_HOTPLUG,
.procname = "hotplug",
@@ -702,6 +703,14 @@ static ctl_table vm_table[] = {
.proc_handler = &proc_dointvec,
},
{
+ .ctl_name = VM_PANIC_ON_OOM,
+ .procname = "panic_on_oom",
+ .data = &sysctl_panic_on_oom,
+ .maxlen = sizeof(sysctl_panic_on_oom),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ },
+ {
.ctl_name = VM_OVERCOMMIT_RATIO,
.procname = "overcommit_ratio",
.data = &sysctl_overcommit_ratio,
diff --git a/kernel/timer.c b/kernel/timer.c
index 9e49deed468c..f35b3939e937 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -383,23 +383,19 @@ EXPORT_SYMBOL(del_timer_sync);
static int cascade(tvec_base_t *base, tvec_t *tv, int index)
{
/* cascade all the timers from tv up one level */
- struct list_head *head, *curr;
+ struct timer_list *timer, *tmp;
+ struct list_head tv_list;
+
+ list_replace_init(tv->vec + index, &tv_list);
- head = tv->vec + index;
- curr = head->next;
/*
- * We are removing _all_ timers from the list, so we don't have to
- * detach them individually, just clear the list afterwards.
+ * We are removing _all_ timers from the list, so we
+ * don't have to detach them individually.
*/
- while (curr != head) {
- struct timer_list *tmp;
-
- tmp = list_entry(curr, struct timer_list, entry);
- BUG_ON(tmp->base != base);
- curr = curr->next;
- internal_add_timer(base, tmp);
+ list_for_each_entry_safe(timer, tmp, &tv_list, entry) {
+ BUG_ON(timer->base != base);
+ internal_add_timer(base, timer);
}
- INIT_LIST_HEAD(head);
return index;
}
@@ -419,10 +415,10 @@ static inline void __run_timers(tvec_base_t *base)
spin_lock_irq(&base->lock);
while (time_after_eq(jiffies, base->timer_jiffies)) {
- struct list_head work_list = LIST_HEAD_INIT(work_list);
+ struct list_head work_list;
struct list_head *head = &work_list;
int index = base->timer_jiffies & TVR_MASK;
-
+
/*
* Cascade timers:
*/
@@ -431,8 +427,8 @@ static inline void __run_timers(tvec_base_t *base)
(!cascade(base, &base->tv3, INDEX(1))) &&
!cascade(base, &base->tv4, INDEX(2)))
cascade(base, &base->tv5, INDEX(3));
- ++base->timer_jiffies;
- list_splice_init(base->tv1.vec + index, &work_list);
+ ++base->timer_jiffies;
+ list_replace_init(base->tv1.vec + index, &work_list);
while (!list_empty(head)) {
void (*fn)(unsigned long);
unsigned long data;
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 880fb415a8f6..740c5abceb07 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -531,11 +531,11 @@ int current_is_keventd(void)
static void take_over_work(struct workqueue_struct *wq, unsigned int cpu)
{
struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
- LIST_HEAD(list);
+ struct list_head list;
struct work_struct *work;
spin_lock_irq(&cwq->lock);
- list_splice_init(&cwq->worklist, &list);
+ list_replace_init(&cwq->worklist, &list);
while (!list_empty(&list)) {
printk("Taking work for %s\n", wq->name);
diff --git a/lib/Makefile b/lib/Makefile
index b830c9a15541..79358ad1f113 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -46,6 +46,7 @@ obj-$(CONFIG_TEXTSEARCH) += textsearch.o
obj-$(CONFIG_TEXTSEARCH_KMP) += ts_kmp.o
obj-$(CONFIG_TEXTSEARCH_BM) += ts_bm.o
obj-$(CONFIG_TEXTSEARCH_FSM) += ts_fsm.o
+obj-$(CONFIG_SMP) += percpu_counter.o
obj-$(CONFIG_SWIOTLB) += swiotlb.o
diff --git a/lib/genalloc.c b/lib/genalloc.c
index 9ce0a6a3b85a..71338b48e889 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -4,10 +4,6 @@
* Uses for this includes on-device special memory, uncached memory
* etc.
*
- * This code is based on the buddy allocator found in the sym53c8xx_2
- * driver Copyright (C) 1999-2001 Gerard Roudier <groudier@free.fr>,
- * and adapted for general purpose use.
- *
* Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org>
*
* This source code is licensed under the GNU General Public License,
@@ -15,172 +11,155 @@
*/
#include <linux/module.h>
-#include <linux/stddef.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/mm.h>
-#include <linux/spinlock.h>
#include <linux/genalloc.h>
-#include <asm/page.h>
-
-struct gen_pool *gen_pool_create(int nr_chunks, int max_chunk_shift,
- unsigned long (*fp)(struct gen_pool *),
- unsigned long data)
+/*
+ * Create a new special memory pool.
+ *
+ * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
+ * @nid: node id of the node the pool structure should be allocated on, or -1
+ */
+struct gen_pool *gen_pool_create(int min_alloc_order, int nid)
{
- struct gen_pool *poolp;
- unsigned long tmp;
- int i;
-
- /*
- * This is really an arbitrary limit, +10 is enough for
- * IA64_GRANULE_SHIFT, aka 16MB. If anyone needs a large limit
- * this can be increased without problems.
- */
- if ((max_chunk_shift > (PAGE_SHIFT + 10)) ||
- ((max_chunk_shift < ALLOC_MIN_SHIFT) && max_chunk_shift))
- return NULL;
-
- if (!max_chunk_shift)
- max_chunk_shift = PAGE_SHIFT;
-
- poolp = kmalloc(sizeof(struct gen_pool), GFP_KERNEL);
- if (!poolp)
- return NULL;
- memset(poolp, 0, sizeof(struct gen_pool));
- poolp->h = kmalloc(sizeof(struct gen_pool_link) *
- (max_chunk_shift - ALLOC_MIN_SHIFT + 1),
- GFP_KERNEL);
- if (!poolp->h) {
- printk(KERN_WARNING "gen_pool_alloc() failed to allocate\n");
- kfree(poolp);
- return NULL;
- }
- memset(poolp->h, 0, sizeof(struct gen_pool_link) *
- (max_chunk_shift - ALLOC_MIN_SHIFT + 1));
-
- spin_lock_init(&poolp->lock);
- poolp->get_new_chunk = fp;
- poolp->max_chunk_shift = max_chunk_shift;
- poolp->private = data;
-
- for (i = 0; i < nr_chunks; i++) {
- tmp = poolp->get_new_chunk(poolp);
- printk(KERN_INFO "allocated %lx\n", tmp);
- if (!tmp)
- break;
- gen_pool_free(poolp, tmp, (1 << poolp->max_chunk_shift));
- }
+ struct gen_pool *pool;
- return poolp;
+ pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid);
+ if (pool != NULL) {
+ rwlock_init(&pool->lock);
+ INIT_LIST_HEAD(&pool->chunks);
+ pool->min_alloc_order = min_alloc_order;
+ }
+ return pool;
}
EXPORT_SYMBOL(gen_pool_create);
/*
- * Simple power of two buddy-like generic allocator.
- * Provides naturally aligned memory chunks.
+ * Add a new chunk of memory to the specified pool.
+ *
+ * @pool: pool to add new memory chunk to
+ * @addr: starting address of memory chunk to add to pool
+ * @size: size in bytes of the memory chunk to add to pool
+ * @nid: node id of the node the chunk structure and bitmap should be
+ * allocated on, or -1
*/
-unsigned long gen_pool_alloc(struct gen_pool *poolp, int size)
+int gen_pool_add(struct gen_pool *pool, unsigned long addr, size_t size,
+ int nid)
{
- int j, i, s, max_chunk_size;
- unsigned long a, flags;
- struct gen_pool_link *h = poolp->h;
+ struct gen_pool_chunk *chunk;
+ int nbits = size >> pool->min_alloc_order;
+ int nbytes = sizeof(struct gen_pool_chunk) +
+ (nbits + BITS_PER_BYTE - 1) / BITS_PER_BYTE;
- max_chunk_size = 1 << poolp->max_chunk_shift;
+ chunk = kmalloc_node(nbytes, GFP_KERNEL, nid);
+ if (unlikely(chunk == NULL))
+ return -1;
- if (size > max_chunk_size)
- return 0;
+ memset(chunk, 0, nbytes);
+ spin_lock_init(&chunk->lock);
+ chunk->start_addr = addr;
+ chunk->end_addr = addr + size;
- size = max(size, 1 << ALLOC_MIN_SHIFT);
- i = fls(size - 1);
- s = 1 << i;
- j = i -= ALLOC_MIN_SHIFT;
-
- spin_lock_irqsave(&poolp->lock, flags);
- while (!h[j].next) {
- if (s == max_chunk_size) {
- struct gen_pool_link *ptr;
- spin_unlock_irqrestore(&poolp->lock, flags);
- ptr = (struct gen_pool_link *)poolp->get_new_chunk(poolp);
- spin_lock_irqsave(&poolp->lock, flags);
- h[j].next = ptr;
- if (h[j].next)
- h[j].next->next = NULL;
- break;
- }
- j++;
- s <<= 1;
- }
- a = (unsigned long) h[j].next;
- if (a) {
- h[j].next = h[j].next->next;
- /*
- * This should be split into a seperate function doing
- * the chunk split in order to support custom
- * handling memory not physically accessible by host
- */
- while (j > i) {
- j -= 1;
- s >>= 1;
- h[j].next = (struct gen_pool_link *) (a + s);
- h[j].next->next = NULL;
- }
- }
- spin_unlock_irqrestore(&poolp->lock, flags);
- return a;
+ write_lock(&pool->lock);
+ list_add(&chunk->next_chunk, &pool->chunks);
+ write_unlock(&pool->lock);
+
+ return 0;
}
-EXPORT_SYMBOL(gen_pool_alloc);
+EXPORT_SYMBOL(gen_pool_add);
/*
- * Counter-part of the generic allocator.
+ * Allocate the requested number of bytes from the specified pool.
+ * Uses a first-fit algorithm.
+ *
+ * @pool: pool to allocate from
+ * @size: number of bytes to allocate from the pool
*/
-void gen_pool_free(struct gen_pool *poolp, unsigned long ptr, int size)
+unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
{
- struct gen_pool_link *q;
- struct gen_pool_link *h = poolp->h;
- unsigned long a, b, flags;
- int i, s, max_chunk_size;
-
- max_chunk_size = 1 << poolp->max_chunk_shift;
+ struct list_head *_chunk;
+ struct gen_pool_chunk *chunk;
+ unsigned long addr, flags;
+ int order = pool->min_alloc_order;
+ int nbits, bit, start_bit, end_bit;
- if (size > max_chunk_size)
- return;
-
- size = max(size, 1 << ALLOC_MIN_SHIFT);
- i = fls(size - 1);
- s = 1 << i;
- i -= ALLOC_MIN_SHIFT;
-
- a = ptr;
+ if (size == 0)
+ return 0;
- spin_lock_irqsave(&poolp->lock, flags);
- while (1) {
- if (s == max_chunk_size) {
- ((struct gen_pool_link *)a)->next = h[i].next;
- h[i].next = (struct gen_pool_link *)a;
- break;
+ nbits = (size + (1UL << order) - 1) >> order;
+
+ read_lock(&pool->lock);
+ list_for_each(_chunk, &pool->chunks) {
+ chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
+
+ end_bit = (chunk->end_addr - chunk->start_addr) >> order;
+ end_bit -= nbits + 1;
+
+ spin_lock_irqsave(&chunk->lock, flags);
+ bit = -1;
+ while (bit + 1 < end_bit) {
+ bit = find_next_zero_bit(chunk->bits, end_bit, bit + 1);
+ if (bit >= end_bit)
+ break;
+
+ start_bit = bit;
+ if (nbits > 1) {
+ bit = find_next_bit(chunk->bits, bit + nbits,
+ bit + 1);
+ if (bit - start_bit < nbits)
+ continue;
+ }
+
+ addr = chunk->start_addr +
+ ((unsigned long)start_bit << order);
+ while (nbits--)
+ __set_bit(start_bit++, &chunk->bits);
+ spin_unlock_irqrestore(&chunk->lock, flags);
+ read_unlock(&pool->lock);
+ return addr;
}
- b = a ^ s;
- q = &h[i];
+ spin_unlock_irqrestore(&chunk->lock, flags);
+ }
+ read_unlock(&pool->lock);
+ return 0;
+}
+EXPORT_SYMBOL(gen_pool_alloc);
- while (q->next && q->next != (struct gen_pool_link *)b)
- q = q->next;
- if (!q->next) {
- ((struct gen_pool_link *)a)->next = h[i].next;
- h[i].next = (struct gen_pool_link *)a;
+/*
+ * Free the specified memory back to the specified pool.
+ *
+ * @pool: pool to free to
+ * @addr: starting address of memory to free back to pool
+ * @size: size in bytes of memory to free
+ */
+void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size)
+{
+ struct list_head *_chunk;
+ struct gen_pool_chunk *chunk;
+ unsigned long flags;
+ int order = pool->min_alloc_order;
+ int bit, nbits;
+
+ nbits = (size + (1UL << order) - 1) >> order;
+
+ read_lock(&pool->lock);
+ list_for_each(_chunk, &pool->chunks) {
+ chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
+
+ if (addr >= chunk->start_addr && addr < chunk->end_addr) {
+ BUG_ON(addr + size > chunk->end_addr);
+ spin_lock_irqsave(&chunk->lock, flags);
+ bit = (addr - chunk->start_addr) >> order;
+ while (nbits--)
+ __clear_bit(bit++, &chunk->bits);
+ spin_unlock_irqrestore(&chunk->lock, flags);
break;
}
- q->next = q->next->next;
- a = a & b;
- s <<= 1;
- i++;
}
- spin_unlock_irqrestore(&poolp->lock, flags);
+ BUG_ON(nbits > 0);
+ read_unlock(&pool->lock);
}
EXPORT_SYMBOL(gen_pool_free);
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
new file mode 100644
index 000000000000..850449080e1c
--- /dev/null
+++ b/lib/percpu_counter.c
@@ -0,0 +1,46 @@
+/*
+ * Fast batching percpu counters.
+ */
+
+#include <linux/percpu_counter.h>
+#include <linux/module.h>
+
+void percpu_counter_mod(struct percpu_counter *fbc, s32 amount)
+{
+ long count;
+ s32 *pcount;
+ int cpu = get_cpu();
+
+ pcount = per_cpu_ptr(fbc->counters, cpu);
+ count = *pcount + amount;
+ if (count >= FBC_BATCH || count <= -FBC_BATCH) {
+ spin_lock(&fbc->lock);
+ fbc->count += count;
+ *pcount = 0;
+ spin_unlock(&fbc->lock);
+ } else {
+ *pcount = count;
+ }
+ put_cpu();
+}
+EXPORT_SYMBOL(percpu_counter_mod);
+
+/*
+ * Add up all the per-cpu counts, return the result. This is a more accurate
+ * but much slower version of percpu_counter_read_positive()
+ */
+s64 percpu_counter_sum(struct percpu_counter *fbc)
+{
+ s64 ret;
+ int cpu;
+
+ spin_lock(&fbc->lock);
+ ret = fbc->count;
+ for_each_possible_cpu(cpu) {
+ s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
+ ret += *pcount;
+ }
+ spin_unlock(&fbc->lock);
+ return ret < 0 ? 0 : ret;
+}
+EXPORT_SYMBOL(percpu_counter_sum);
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 7097bb239e40..b32efae7688e 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -33,7 +33,7 @@
#ifdef __KERNEL__
-#define RADIX_TREE_MAP_SHIFT 6
+#define RADIX_TREE_MAP_SHIFT (CONFIG_BASE_SMALL ? 4 : 6)
#else
#define RADIX_TREE_MAP_SHIFT 3 /* For more stressful testing */
#endif
@@ -74,6 +74,11 @@ struct radix_tree_preload {
};
DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
+static inline gfp_t root_gfp_mask(struct radix_tree_root *root)
+{
+ return root->gfp_mask & __GFP_BITS_MASK;
+}
+
/*
* This assumes that the caller has performed appropriate preallocation, and
* that the caller has pinned this thread of control to the current CPU.
@@ -82,9 +87,10 @@ static struct radix_tree_node *
radix_tree_node_alloc(struct radix_tree_root *root)
{
struct radix_tree_node *ret;
+ gfp_t gfp_mask = root_gfp_mask(root);
- ret = kmem_cache_alloc(radix_tree_node_cachep, root->gfp_mask);
- if (ret == NULL && !(root->gfp_mask & __GFP_WAIT)) {
+ ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
+ if (ret == NULL && !(gfp_mask & __GFP_WAIT)) {
struct radix_tree_preload *rtp;
rtp = &__get_cpu_var(radix_tree_preloads);
@@ -152,6 +158,27 @@ static inline int tag_get(struct radix_tree_node *node, unsigned int tag,
return test_bit(offset, node->tags[tag]);
}
+static inline void root_tag_set(struct radix_tree_root *root, unsigned int tag)
+{
+ root->gfp_mask |= (1 << (tag + __GFP_BITS_SHIFT));
+}
+
+
+static inline void root_tag_clear(struct radix_tree_root *root, unsigned int tag)
+{
+ root->gfp_mask &= ~(1 << (tag + __GFP_BITS_SHIFT));
+}
+
+static inline void root_tag_clear_all(struct radix_tree_root *root)
+{
+ root->gfp_mask &= __GFP_BITS_MASK;
+}
+
+static inline int root_tag_get(struct radix_tree_root *root, unsigned int tag)
+{
+ return root->gfp_mask & (1 << (tag + __GFP_BITS_SHIFT));
+}
+
/*
* Returns 1 if any slot in the node has this tag set.
* Otherwise returns 0.
@@ -182,7 +209,6 @@ static int radix_tree_extend(struct radix_tree_root *root, unsigned long index)
{
struct radix_tree_node *node;
unsigned int height;
- char tags[RADIX_TREE_MAX_TAGS];
int tag;
/* Figure out what the height should be. */
@@ -195,16 +221,6 @@ static int radix_tree_extend(struct radix_tree_root *root, unsigned long index)
goto out;
}
- /*
- * Prepare the tag status of the top-level node for propagation
- * into the newly-pushed top-level node(s)
- */
- for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) {
- tags[tag] = 0;
- if (any_tag_set(root->rnode, tag))
- tags[tag] = 1;
- }
-
do {
if (!(node = radix_tree_node_alloc(root)))
return -ENOMEM;
@@ -214,7 +230,7 @@ static int radix_tree_extend(struct radix_tree_root *root, unsigned long index)
/* Propagate the aggregated tag info into the new root */
for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) {
- if (tags[tag])
+ if (root_tag_get(root, tag))
tag_set(node, tag, 0);
}
@@ -243,8 +259,7 @@ int radix_tree_insert(struct radix_tree_root *root,
int error;
/* Make sure the tree is high enough. */
- if ((!index && !root->rnode) ||
- index > radix_tree_maxindex(root->height)) {
+ if (index > radix_tree_maxindex(root->height)) {
error = radix_tree_extend(root, index);
if (error)
return error;
@@ -255,7 +270,7 @@ int radix_tree_insert(struct radix_tree_root *root,
shift = (height-1) * RADIX_TREE_MAP_SHIFT;
offset = 0; /* uninitialised var warning */
- do {
+ while (height > 0) {
if (slot == NULL) {
/* Have to add a child node. */
if (!(slot = radix_tree_node_alloc(root)))
@@ -273,16 +288,21 @@ int radix_tree_insert(struct radix_tree_root *root,
slot = node->slots[offset];
shift -= RADIX_TREE_MAP_SHIFT;
height--;
- } while (height > 0);
+ }
if (slot != NULL)
return -EEXIST;
- BUG_ON(!node);
- node->count++;
- node->slots[offset] = item;
- BUG_ON(tag_get(node, 0, offset));
- BUG_ON(tag_get(node, 1, offset));
+ if (node) {
+ node->count++;
+ node->slots[offset] = item;
+ BUG_ON(tag_get(node, 0, offset));
+ BUG_ON(tag_get(node, 1, offset));
+ } else {
+ root->rnode = item;
+ BUG_ON(root_tag_get(root, 0));
+ BUG_ON(root_tag_get(root, 1));
+ }
return 0;
}
@@ -295,9 +315,13 @@ static inline void **__lookup_slot(struct radix_tree_root *root,
struct radix_tree_node **slot;
height = root->height;
+
if (index > radix_tree_maxindex(height))
return NULL;
+ if (height == 0 && root->rnode)
+ return (void **)&root->rnode;
+
shift = (height-1) * RADIX_TREE_MAP_SHIFT;
slot = &root->rnode;
@@ -365,11 +389,10 @@ void *radix_tree_tag_set(struct radix_tree_root *root,
struct radix_tree_node *slot;
height = root->height;
- if (index > radix_tree_maxindex(height))
- return NULL;
+ BUG_ON(index > radix_tree_maxindex(height));
- shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
slot = root->rnode;
+ shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
while (height > 0) {
int offset;
@@ -383,6 +406,10 @@ void *radix_tree_tag_set(struct radix_tree_root *root,
height--;
}
+ /* set the root's tag bit */
+ if (slot && !root_tag_get(root, tag))
+ root_tag_set(root, tag);
+
return slot;
}
EXPORT_SYMBOL(radix_tree_tag_set);
@@ -405,9 +432,8 @@ void *radix_tree_tag_clear(struct radix_tree_root *root,
unsigned long index, unsigned int tag)
{
struct radix_tree_path path[RADIX_TREE_MAX_PATH], *pathp = path;
- struct radix_tree_node *slot;
+ struct radix_tree_node *slot = NULL;
unsigned int height, shift;
- void *ret = NULL;
height = root->height;
if (index > radix_tree_maxindex(height))
@@ -432,20 +458,24 @@ void *radix_tree_tag_clear(struct radix_tree_root *root,
height--;
}
- ret = slot;
- if (ret == NULL)
+ if (slot == NULL)
goto out;
- do {
+ while (pathp->node) {
if (!tag_get(pathp->node, tag, pathp->offset))
goto out;
tag_clear(pathp->node, tag, pathp->offset);
if (any_tag_set(pathp->node, tag))
goto out;
pathp--;
- } while (pathp->node);
+ }
+
+ /* clear the root's tag bit */
+ if (root_tag_get(root, tag))
+ root_tag_clear(root, tag);
+
out:
- return ret;
+ return slot;
}
EXPORT_SYMBOL(radix_tree_tag_clear);
@@ -458,9 +488,8 @@ EXPORT_SYMBOL(radix_tree_tag_clear);
*
* Return values:
*
- * 0: tag not present
- * 1: tag present, set
- * -1: tag present, unset
+ * 0: tag not present or not set
+ * 1: tag set
*/
int radix_tree_tag_get(struct radix_tree_root *root,
unsigned long index, unsigned int tag)
@@ -473,6 +502,13 @@ int radix_tree_tag_get(struct radix_tree_root *root,
if (index > radix_tree_maxindex(height))
return 0;
+ /* check the root's tag bit */
+ if (!root_tag_get(root, tag))
+ return 0;
+
+ if (height == 0)
+ return 1;
+
shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
slot = root->rnode;
@@ -494,7 +530,7 @@ int radix_tree_tag_get(struct radix_tree_root *root,
int ret = tag_get(slot, tag, offset);
BUG_ON(ret && saw_unset_tag);
- return ret ? 1 : -1;
+ return ret;
}
slot = slot->slots[offset];
shift -= RADIX_TREE_MAP_SHIFT;
@@ -514,8 +550,11 @@ __lookup(struct radix_tree_root *root, void **results, unsigned long index,
unsigned long i;
height = root->height;
- if (height == 0)
+ if (height == 0) {
+ if (root->rnode && index == 0)
+ results[nr_found++] = root->rnode;
goto out;
+ }
shift = (height-1) * RADIX_TREE_MAP_SHIFT;
slot = root->rnode;
@@ -603,10 +642,16 @@ __lookup_tag(struct radix_tree_root *root, void **results, unsigned long index,
unsigned int height = root->height;
struct radix_tree_node *slot;
+ if (height == 0) {
+ if (root->rnode && index == 0)
+ results[nr_found++] = root->rnode;
+ goto out;
+ }
+
shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
slot = root->rnode;
- while (height > 0) {
+ do {
unsigned long i = (index >> shift) & RADIX_TREE_MAP_MASK;
for ( ; i < RADIX_TREE_MAP_SIZE; i++) {
@@ -637,7 +682,7 @@ __lookup_tag(struct radix_tree_root *root, void **results, unsigned long index,
}
shift -= RADIX_TREE_MAP_SHIFT;
slot = slot->slots[i];
- }
+ } while (height > 0);
out:
*next_index = index;
return nr_found;
@@ -665,6 +710,10 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results,
unsigned long cur_index = first_index;
unsigned int ret = 0;
+ /* check the root's tag bit */
+ if (!root_tag_get(root, tag))
+ return 0;
+
while (ret < max_items) {
unsigned int nr_found;
unsigned long next_index; /* Index of next search */
@@ -689,7 +738,7 @@ EXPORT_SYMBOL(radix_tree_gang_lookup_tag);
static inline void radix_tree_shrink(struct radix_tree_root *root)
{
/* try to shrink tree height */
- while (root->height > 1 &&
+ while (root->height > 0 &&
root->rnode->count == 1 &&
root->rnode->slots[0]) {
struct radix_tree_node *to_free = root->rnode;
@@ -717,12 +766,8 @@ static inline void radix_tree_shrink(struct radix_tree_root *root)
void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
{
struct radix_tree_path path[RADIX_TREE_MAX_PATH], *pathp = path;
- struct radix_tree_path *orig_pathp;
- struct radix_tree_node *slot;
+ struct radix_tree_node *slot = NULL;
unsigned int height, shift;
- void *ret = NULL;
- char tags[RADIX_TREE_MAX_TAGS];
- int nr_cleared_tags;
int tag;
int offset;
@@ -730,11 +775,17 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
if (index > radix_tree_maxindex(height))
goto out;
+ slot = root->rnode;
+ if (height == 0 && root->rnode) {
+ root_tag_clear_all(root);
+ root->rnode = NULL;
+ goto out;
+ }
+
shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
pathp->node = NULL;
- slot = root->rnode;
- for ( ; height > 0; height--) {
+ do {
if (slot == NULL)
goto out;
@@ -744,44 +795,22 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
pathp->node = slot;
slot = slot->slots[offset];
shift -= RADIX_TREE_MAP_SHIFT;
- }
+ height--;
+ } while (height > 0);
- ret = slot;
- if (ret == NULL)
+ if (slot == NULL)
goto out;
- orig_pathp = pathp;
-
/*
* Clear all tags associated with the just-deleted item
*/
- nr_cleared_tags = 0;
for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) {
- tags[tag] = 1;
- if (tag_get(pathp->node, tag, pathp->offset)) {
- tag_clear(pathp->node, tag, pathp->offset);
- if (!any_tag_set(pathp->node, tag)) {
- tags[tag] = 0;
- nr_cleared_tags++;
- }
- }
- }
-
- for (pathp--; nr_cleared_tags && pathp->node; pathp--) {
- for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) {
- if (tags[tag])
- continue;
-
- tag_clear(pathp->node, tag, pathp->offset);
- if (any_tag_set(pathp->node, tag)) {
- tags[tag] = 1;
- nr_cleared_tags--;
- }
- }
+ if (tag_get(pathp->node, tag, pathp->offset))
+ radix_tree_tag_clear(root, index, tag);
}
/* Now free the nodes we do not need anymore */
- for (pathp = orig_pathp; pathp->node; pathp--) {
+ while (pathp->node) {
pathp->node->slots[pathp->offset] = NULL;
pathp->node->count--;
@@ -793,11 +822,15 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
/* Node with zero slots in use so free it */
radix_tree_node_free(pathp->node);
+
+ pathp--;
}
- root->rnode = NULL;
+ root_tag_clear_all(root);
root->height = 0;
+ root->rnode = NULL;
+
out:
- return ret;
+ return slot;
}
EXPORT_SYMBOL(radix_tree_delete);
@@ -808,11 +841,7 @@ EXPORT_SYMBOL(radix_tree_delete);
*/
int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag)
{
- struct radix_tree_node *rnode;
- rnode = root->rnode;
- if (!rnode)
- return 0;
- return any_tag_set(rnode, tag);
+ return root_tag_get(root, tag);
}
EXPORT_SYMBOL(radix_tree_tagged);
diff --git a/lib/string.c b/lib/string.c
index 064f6315b1c3..63077267367e 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -301,6 +301,36 @@ char *strnchr(const char *s, size_t count, int c)
EXPORT_SYMBOL(strnchr);
#endif
+/**
+ * strstrip - Removes leading and trailing whitespace from @s.
+ * @s: The string to be stripped.
+ *
+ * Note that the first trailing whitespace is replaced with a %NUL-terminator
+ * in the given string @s. Returns a pointer to the first non-whitespace
+ * character in @s.
+ */
+char *strstrip(char *s)
+{
+ size_t size;
+ char *end;
+
+ size = strlen(s);
+
+ if (!size)
+ return s;
+
+ end = s + size - 1;
+ while (end != s && isspace(*end))
+ end--;
+ *(end + 1) = '\0';
+
+ while (*s && isspace(*s))
+ s++;
+
+ return s;
+}
+EXPORT_SYMBOL(strstrip);
+
#ifndef __HAVE_ARCH_STRLEN
/**
* strlen - Find the length of a string
diff --git a/mm/Kconfig b/mm/Kconfig
index 332f5c29b53a..66e65ab39426 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -138,8 +138,8 @@ config SPLIT_PTLOCK_CPUS
#
config MIGRATION
bool "Page migration"
- def_bool y if NUMA
- depends on SWAP && NUMA
+ def_bool y
+ depends on NUMA
help
Allows the migration of the physical location of pages of processes
while the virtual addresses are not changed. This is useful for
diff --git a/mm/filemap.c b/mm/filemap.c
index fd57442186cb..807a463fd5ed 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -14,6 +14,7 @@
#include <linux/slab.h>
#include <linux/compiler.h>
#include <linux/fs.h>
+#include <linux/uaccess.h>
#include <linux/aio.h>
#include <linux/capability.h>
#include <linux/kernel_stat.h>
@@ -38,7 +39,6 @@
*/
#include <linux/buffer_head.h> /* for generic_osync_inode */
-#include <asm/uaccess.h>
#include <asm/mman.h>
static ssize_t
@@ -171,15 +171,17 @@ static int sync_page(void *word)
}
/**
- * filemap_fdatawrite_range - start writeback against all of a mapping's
- * dirty pages that lie within the byte offsets <start, end>
+ * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
* @mapping: address space structure to write
* @start: offset in bytes where the range starts
* @end: offset in bytes where the range ends (inclusive)
* @sync_mode: enable synchronous operation
*
+ * Start writeback against all of a mapping's dirty pages that lie
+ * within the byte offsets <start, end> inclusive.
+ *
* If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as
- * opposed to a regular memory * cleansing writeback. The difference between
+ * opposed to a regular memory cleansing writeback. The difference between
* these two operations is that if a dirty page/buffer is encountered, it must
* be waited upon, and not just skipped over.
*/
@@ -190,8 +192,8 @@ int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
struct writeback_control wbc = {
.sync_mode = sync_mode,
.nr_to_write = mapping->nrpages * 2,
- .start = start,
- .end = end,
+ .range_start = start,
+ .range_end = end,
};
if (!mapping_cap_writeback_dirty(mapping))
@@ -204,7 +206,7 @@ int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
static inline int __filemap_fdatawrite(struct address_space *mapping,
int sync_mode)
{
- return __filemap_fdatawrite_range(mapping, 0, 0, sync_mode);
+ return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode);
}
int filemap_fdatawrite(struct address_space *mapping)
@@ -219,7 +221,10 @@ static int filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL);
}
-/*
+/**
+ * filemap_flush - mostly a non-blocking flush
+ * @mapping: target address_space
+ *
* This is a mostly non-blocking flush. Not suitable for data-integrity
* purposes - I/O may not be started against all dirty pages.
*/
@@ -229,7 +234,12 @@ int filemap_flush(struct address_space *mapping)
}
EXPORT_SYMBOL(filemap_flush);
-/*
+/**
+ * wait_on_page_writeback_range - wait for writeback to complete
+ * @mapping: target address_space
+ * @start: beginning page index
+ * @end: ending page index
+ *
* Wait for writeback to complete against pages indexed by start->end
* inclusive
*/
@@ -276,7 +286,13 @@ int wait_on_page_writeback_range(struct address_space *mapping,
return ret;
}
-/*
+/**
+ * sync_page_range - write and wait on all pages in the passed range
+ * @inode: target inode
+ * @mapping: target address_space
+ * @pos: beginning offset in pages to write
+ * @count: number of bytes to write
+ *
* Write and wait upon all the pages in the passed range. This is a "data
* integrity" operation. It waits upon in-flight writeout before starting and
* waiting upon new writeout. If there was an IO error, return it.
@@ -305,7 +321,13 @@ int sync_page_range(struct inode *inode, struct address_space *mapping,
}
EXPORT_SYMBOL(sync_page_range);
-/*
+/**
+ * sync_page_range_nolock
+ * @inode: target inode
+ * @mapping: target address_space
+ * @pos: beginning offset in pages to write
+ * @count: number of bytes to write
+ *
* Note: Holding i_mutex across sync_page_range_nolock is not a good idea
* as it forces O_SYNC writers to different parts of the same file
* to be serialised right until io completion.
@@ -329,10 +351,11 @@ int sync_page_range_nolock(struct inode *inode, struct address_space *mapping,
EXPORT_SYMBOL(sync_page_range_nolock);
/**
- * filemap_fdatawait - walk the list of under-writeback pages of the given
- * address space and wait for all of them.
- *
+ * filemap_fdatawait - wait for all under-writeback pages to complete
* @mapping: address space structure to wait for
+ *
+ * Walk the list of under-writeback pages of the given address space
+ * and wait for all of them.
*/
int filemap_fdatawait(struct address_space *mapping)
{
@@ -368,7 +391,12 @@ int filemap_write_and_wait(struct address_space *mapping)
}
EXPORT_SYMBOL(filemap_write_and_wait);
-/*
+/**
+ * filemap_write_and_wait_range - write out & wait on a file range
+ * @mapping: the address_space for the pages
+ * @lstart: offset in bytes where the range starts
+ * @lend: offset in bytes where the range ends (inclusive)
+ *
* Write out and wait upon file offsets lstart->lend, inclusive.
*
* Note that `lend' is inclusive (describes the last byte to be written) so
@@ -394,8 +422,14 @@ int filemap_write_and_wait_range(struct address_space *mapping,
return err;
}
-/*
- * This function is used to add newly allocated pagecache pages:
+/**
+ * add_to_page_cache - add newly allocated pagecache pages
+ * @page: page to add
+ * @mapping: the page's address_space
+ * @offset: page index
+ * @gfp_mask: page allocation mode
+ *
+ * This function is used to add newly allocated pagecache pages;
* the page is new, so we can just run SetPageLocked() against it.
* The other page state flags were set by rmqueue().
*
@@ -422,7 +456,6 @@ int add_to_page_cache(struct page *page, struct address_space *mapping,
}
return error;
}
-
EXPORT_SYMBOL(add_to_page_cache);
int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
@@ -489,8 +522,7 @@ void fastcall wait_on_page_bit(struct page *page, int bit_nr)
EXPORT_SYMBOL(wait_on_page_bit);
/**
- * unlock_page() - unlock a locked page
- *
+ * unlock_page - unlock a locked page
* @page: the page
*
* Unlocks the page and wakes up sleepers in ___wait_on_page_locked().
@@ -513,8 +545,9 @@ void fastcall unlock_page(struct page *page)
}
EXPORT_SYMBOL(unlock_page);
-/*
- * End writeback against a page.
+/**
+ * end_page_writeback - end writeback against a page
+ * @page: the page
*/
void end_page_writeback(struct page *page)
{
@@ -527,10 +560,11 @@ void end_page_writeback(struct page *page)
}
EXPORT_SYMBOL(end_page_writeback);
-/*
- * Get a lock on the page, assuming we need to sleep to get it.
+/**
+ * __lock_page - get a lock on the page, assuming we need to sleep to get it
+ * @page: the page to lock
*
- * Ugly: running sync_page() in state TASK_UNINTERRUPTIBLE is scary. If some
+ * Ugly. Running sync_page() in state TASK_UNINTERRUPTIBLE is scary. If some
* random driver's requestfn sets TASK_RUNNING, we could busywait. However
* chances are that on the second loop, the block layer's plug list is empty,
* so sync_page() will then return in state TASK_UNINTERRUPTIBLE.
@@ -544,8 +578,12 @@ void fastcall __lock_page(struct page *page)
}
EXPORT_SYMBOL(__lock_page);
-/*
- * a rather lightweight function, finding and getting a reference to a
+/**
+ * find_get_page - find and get a page reference
+ * @mapping: the address_space to search
+ * @offset: the page index
+ *
+ * A rather lightweight function, finding and getting a reference to a
* hashed page atomically.
*/
struct page * find_get_page(struct address_space *mapping, unsigned long offset)
@@ -559,11 +597,14 @@ struct page * find_get_page(struct address_space *mapping, unsigned long offset)
read_unlock_irq(&mapping->tree_lock);
return page;
}
-
EXPORT_SYMBOL(find_get_page);
-/*
- * Same as above, but trylock it instead of incrementing the count.
+/**
+ * find_trylock_page - find and lock a page
+ * @mapping: the address_space to search
+ * @offset: the page index
+ *
+ * Same as find_get_page(), but trylock it instead of incrementing the count.
*/
struct page *find_trylock_page(struct address_space *mapping, unsigned long offset)
{
@@ -576,12 +617,10 @@ struct page *find_trylock_page(struct address_space *mapping, unsigned long offs
read_unlock_irq(&mapping->tree_lock);
return page;
}
-
EXPORT_SYMBOL(find_trylock_page);
/**
* find_lock_page - locate, pin and lock a pagecache page
- *
* @mapping: the address_space to search
* @offset: the page index
*
@@ -617,12 +656,10 @@ repeat:
read_unlock_irq(&mapping->tree_lock);
return page;
}
-
EXPORT_SYMBOL(find_lock_page);
/**
* find_or_create_page - locate or add a pagecache page
- *
* @mapping: the page's address_space
* @index: the page's index into the mapping
* @gfp_mask: page allocation mode
@@ -663,7 +700,6 @@ repeat:
page_cache_release(cached_page);
return page;
}
-
EXPORT_SYMBOL(find_or_create_page);
/**
@@ -729,9 +765,16 @@ unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index,
return i;
}
-/*
+/**
+ * find_get_pages_tag - find and return pages that match @tag
+ * @mapping: the address_space to search
+ * @index: the starting page index
+ * @tag: the tag index
+ * @nr_pages: the maximum number of pages
+ * @pages: where the resulting pages are placed
+ *
* Like find_get_pages, except we only return pages which are tagged with
- * `tag'. We update *index to index the next page for the traversal.
+ * @tag. We update @index to index the next page for the traversal.
*/
unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
int tag, unsigned int nr_pages, struct page **pages)
@@ -750,7 +793,11 @@ unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
return ret;
}
-/*
+/**
+ * grab_cache_page_nowait - returns locked page at given index in given cache
+ * @mapping: target address_space
+ * @index: the page index
+ *
* Same as grab_cache_page, but do not wait if the page is unavailable.
* This is intended for speculative data generators, where the data can
* be regenerated if the page couldn't be grabbed. This routine should
@@ -779,19 +826,25 @@ grab_cache_page_nowait(struct address_space *mapping, unsigned long index)
}
return page;
}
-
EXPORT_SYMBOL(grab_cache_page_nowait);
-/*
+/**
+ * do_generic_mapping_read - generic file read routine
+ * @mapping: address_space to be read
+ * @_ra: file's readahead state
+ * @filp: the file to read
+ * @ppos: current file position
+ * @desc: read_descriptor
+ * @actor: read method
+ *
* This is a generic file read routine, and uses the
- * mapping->a_ops->readpage() function for the actual low-level
- * stuff.
+ * mapping->a_ops->readpage() function for the actual low-level stuff.
*
* This is really ugly. But the goto's actually try to clarify some
* of the logic when it comes to error handling etc.
*
- * Note the struct file* is only passed for the use of readpage. It may be
- * NULL.
+ * Note the struct file* is only passed for the use of readpage.
+ * It may be NULL.
*/
void do_generic_mapping_read(struct address_space *mapping,
struct file_ra_state *_ra,
@@ -1004,7 +1057,6 @@ out:
if (filp)
file_accessed(filp);
}
-
EXPORT_SYMBOL(do_generic_mapping_read);
int file_read_actor(read_descriptor_t *desc, struct page *page,
@@ -1045,7 +1097,13 @@ success:
return size;
}
-/*
+/**
+ * __generic_file_aio_read - generic filesystem read routine
+ * @iocb: kernel I/O control block
+ * @iov: io vector request
+ * @nr_segs: number of segments in the iovec
+ * @ppos: current file position
+ *
* This is the "read()" routine for all filesystems
* that can use the page cache directly.
*/
@@ -1124,7 +1182,6 @@ __generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
out:
return retval;
}
-
EXPORT_SYMBOL(__generic_file_aio_read);
ssize_t
@@ -1135,7 +1192,6 @@ generic_file_aio_read(struct kiocb *iocb, char __user *buf, size_t count, loff_t
BUG_ON(iocb->ki_pos != pos);
return __generic_file_aio_read(iocb, &local_iov, 1, &iocb->ki_pos);
}
-
EXPORT_SYMBOL(generic_file_aio_read);
ssize_t
@@ -1151,7 +1207,6 @@ generic_file_read(struct file *filp, char __user *buf, size_t count, loff_t *ppo
ret = wait_on_sync_kiocb(&kiocb);
return ret;
}
-
EXPORT_SYMBOL(generic_file_read);
int file_send_actor(read_descriptor_t * desc, struct page *page, unsigned long offset, unsigned long size)
@@ -1192,7 +1247,6 @@ ssize_t generic_file_sendfile(struct file *in_file, loff_t *ppos,
return desc.written;
return desc.error;
}
-
EXPORT_SYMBOL(generic_file_sendfile);
static ssize_t
@@ -1228,11 +1282,15 @@ asmlinkage ssize_t sys_readahead(int fd, loff_t offset, size_t count)
}
#ifdef CONFIG_MMU
-/*
+static int FASTCALL(page_cache_read(struct file * file, unsigned long offset));
+/**
+ * page_cache_read - adds requested page to the page cache if not already there
+ * @file: file to read
+ * @offset: page index
+ *
* This adds the requested page to the page cache if it isn't already there,
* and schedules an I/O to read in its contents from disk.
*/
-static int FASTCALL(page_cache_read(struct file * file, unsigned long offset));
static int fastcall page_cache_read(struct file * file, unsigned long offset)
{
struct address_space *mapping = file->f_mapping;
@@ -1259,7 +1317,12 @@ static int fastcall page_cache_read(struct file * file, unsigned long offset)
#define MMAP_LOTSAMISS (100)
-/*
+/**
+ * filemap_nopage - read in file data for page fault handling
+ * @area: the applicable vm_area
+ * @address: target address to read in
+ * @type: returned with VM_FAULT_{MINOR,MAJOR} if not %NULL
+ *
* filemap_nopage() is invoked via the vma operations vector for a
* mapped memory region to read in file data during a page fault.
*
@@ -1462,7 +1525,6 @@ page_not_uptodate:
page_cache_release(page);
return NULL;
}
-
EXPORT_SYMBOL(filemap_nopage);
static struct page * filemap_getpage(struct file *file, unsigned long pgoff,
@@ -1716,7 +1778,13 @@ repeat:
return page;
}
-/*
+/**
+ * read_cache_page - read into page cache, fill it if needed
+ * @mapping: the page's address_space
+ * @index: the page index
+ * @filler: function to perform the read
+ * @data: destination for read data
+ *
* Read into the page cache. If a page already exists,
* and PageUptodate() is not set, try to fill the page.
*/
@@ -1754,7 +1822,6 @@ retry:
out:
return page;
}
-
EXPORT_SYMBOL(read_cache_page);
/*
@@ -1835,7 +1902,7 @@ __filemap_copy_from_user_iovec(char *vaddr,
int copy = min(bytes, iov->iov_len - base);
base = 0;
- left = __copy_from_user_inatomic(vaddr, buf, copy);
+ left = __copy_from_user_inatomic_nocache(vaddr, buf, copy);
copied += copy;
bytes -= copy;
vaddr += copy;
@@ -1854,7 +1921,7 @@ __filemap_copy_from_user_iovec(char *vaddr,
/*
* Performs necessary checks before doing a write
*
- * Can adjust writing position aor amount of bytes to write.
+ * Can adjust writing position or amount of bytes to write.
* Returns appropriate error code that caller should return or
* zero in case that write should be allowed.
*/
diff --git a/mm/filemap.h b/mm/filemap.h
index 13793ba0ce17..5683cde22055 100644
--- a/mm/filemap.h
+++ b/mm/filemap.h
@@ -13,7 +13,7 @@
#include <linux/highmem.h>
#include <linux/uio.h>
#include <linux/config.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
size_t
__filemap_copy_from_user_iovec(char *vaddr,
@@ -34,13 +34,13 @@ filemap_copy_from_user(struct page *page, unsigned long offset,
int left;
kaddr = kmap_atomic(page, KM_USER0);
- left = __copy_from_user_inatomic(kaddr + offset, buf, bytes);
+ left = __copy_from_user_inatomic_nocache(kaddr + offset, buf, bytes);
kunmap_atomic(kaddr, KM_USER0);
if (left != 0) {
/* Do it the slow way */
kaddr = kmap(page);
- left = __copy_from_user(kaddr + offset, buf, bytes);
+ left = __copy_from_user_nocache(kaddr + offset, buf, bytes);
kunmap(page);
}
return bytes - left;
diff --git a/mm/fremap.c b/mm/fremap.c
index 9f381e58bf44..21b7d0cbc98c 100644
--- a/mm/fremap.c
+++ b/mm/fremap.c
@@ -83,6 +83,7 @@ int install_page(struct mm_struct *mm, struct vm_area_struct *vma,
page_add_file_rmap(page);
pte_val = *pte;
update_mmu_cache(vma, addr, pte_val);
+ lazy_mmu_prot_update(pte_val);
err = 0;
unlock:
pte_unmap_unlock(pte, ptl);
@@ -114,7 +115,13 @@ int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma,
set_pte_at(mm, addr, pte, pgoff_to_pte(pgoff));
pte_val = *pte;
- update_mmu_cache(vma, addr, pte_val);
+ /*
+ * We don't need to run update_mmu_cache() here because the "file pte"
+ * being installed by install_file_pte() is not a real pte - it's a
+ * non-present entry (like a swap entry), noting what file offset should
+ * be mapped there when there's a fault (in a non-linear vma where
+ * that's not obvious).
+ */
pte_unmap_unlock(pte, ptl);
err = 0;
out:
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 832f676ca038..df499973255f 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -22,7 +22,7 @@
#include "internal.h"
const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
-static unsigned long nr_huge_pages, free_huge_pages, reserved_huge_pages;
+static unsigned long nr_huge_pages, free_huge_pages, resv_huge_pages;
unsigned long max_huge_pages;
static struct list_head hugepage_freelists[MAX_NUMNODES];
static unsigned int nr_huge_pages_node[MAX_NUMNODES];
@@ -123,39 +123,13 @@ static int alloc_fresh_huge_page(void)
static struct page *alloc_huge_page(struct vm_area_struct *vma,
unsigned long addr)
{
- struct inode *inode = vma->vm_file->f_dentry->d_inode;
struct page *page;
- int use_reserve = 0;
- unsigned long idx;
spin_lock(&hugetlb_lock);
-
- if (vma->vm_flags & VM_MAYSHARE) {
-
- /* idx = radix tree index, i.e. offset into file in
- * HPAGE_SIZE units */
- idx = ((addr - vma->vm_start) >> HPAGE_SHIFT)
- + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
-
- /* The hugetlbfs specific inode info stores the number
- * of "guaranteed available" (huge) pages. That is,
- * the first 'prereserved_hpages' pages of the inode
- * are either already instantiated, or have been
- * pre-reserved (by hugetlb_reserve_for_inode()). Here
- * we're in the process of instantiating the page, so
- * we use this to determine whether to draw from the
- * pre-reserved pool or the truly free pool. */
- if (idx < HUGETLBFS_I(inode)->prereserved_hpages)
- use_reserve = 1;
- }
-
- if (!use_reserve) {
- if (free_huge_pages <= reserved_huge_pages)
- goto fail;
- } else {
- BUG_ON(reserved_huge_pages == 0);
- reserved_huge_pages--;
- }
+ if (vma->vm_flags & VM_MAYSHARE)
+ resv_huge_pages--;
+ else if (free_huge_pages <= resv_huge_pages)
+ goto fail;
page = dequeue_huge_page(vma, addr);
if (!page)
@@ -165,96 +139,11 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
set_page_refcounted(page);
return page;
- fail:
- WARN_ON(use_reserve); /* reserved allocations shouldn't fail */
+fail:
spin_unlock(&hugetlb_lock);
return NULL;
}
-/* hugetlb_extend_reservation()
- *
- * Ensure that at least 'atleast' hugepages are, and will remain,
- * available to instantiate the first 'atleast' pages of the given
- * inode. If the inode doesn't already have this many pages reserved
- * or instantiated, set aside some hugepages in the reserved pool to
- * satisfy later faults (or fail now if there aren't enough, rather
- * than getting the SIGBUS later).
- */
-int hugetlb_extend_reservation(struct hugetlbfs_inode_info *info,
- unsigned long atleast)
-{
- struct inode *inode = &info->vfs_inode;
- unsigned long change_in_reserve = 0;
- int ret = 0;
-
- spin_lock(&hugetlb_lock);
- read_lock_irq(&inode->i_mapping->tree_lock);
-
- if (info->prereserved_hpages >= atleast)
- goto out;
-
- /* Because we always call this on shared mappings, none of the
- * pages beyond info->prereserved_hpages can have been
- * instantiated, so we need to reserve all of them now. */
- change_in_reserve = atleast - info->prereserved_hpages;
-
- if ((reserved_huge_pages + change_in_reserve) > free_huge_pages) {
- ret = -ENOMEM;
- goto out;
- }
-
- reserved_huge_pages += change_in_reserve;
- info->prereserved_hpages = atleast;
-
- out:
- read_unlock_irq(&inode->i_mapping->tree_lock);
- spin_unlock(&hugetlb_lock);
-
- return ret;
-}
-
-/* hugetlb_truncate_reservation()
- *
- * This returns pages reserved for the given inode to the general free
- * hugepage pool. If the inode has any pages prereserved, but not
- * instantiated, beyond offset (atmost << HPAGE_SIZE), then release
- * them.
- */
-void hugetlb_truncate_reservation(struct hugetlbfs_inode_info *info,
- unsigned long atmost)
-{
- struct inode *inode = &info->vfs_inode;
- struct address_space *mapping = inode->i_mapping;
- unsigned long idx;
- unsigned long change_in_reserve = 0;
- struct page *page;
-
- spin_lock(&hugetlb_lock);
- read_lock_irq(&inode->i_mapping->tree_lock);
-
- if (info->prereserved_hpages <= atmost)
- goto out;
-
- /* Count pages which were reserved, but not instantiated, and
- * which we can now release. */
- for (idx = atmost; idx < info->prereserved_hpages; idx++) {
- page = radix_tree_lookup(&mapping->page_tree, idx);
- if (!page)
- /* Pages which are already instantiated can't
- * be unreserved (and in fact have already
- * been removed from the reserved pool) */
- change_in_reserve++;
- }
-
- BUG_ON(reserved_huge_pages < change_in_reserve);
- reserved_huge_pages -= change_in_reserve;
- info->prereserved_hpages = atmost;
-
- out:
- read_unlock_irq(&inode->i_mapping->tree_lock);
- spin_unlock(&hugetlb_lock);
-}
-
static int __init hugetlb_init(void)
{
unsigned long i;
@@ -334,7 +223,7 @@ static unsigned long set_max_huge_pages(unsigned long count)
return nr_huge_pages;
spin_lock(&hugetlb_lock);
- count = max(count, reserved_huge_pages);
+ count = max(count, resv_huge_pages);
try_to_free_low(count);
while (count < nr_huge_pages) {
struct page *page = dequeue_huge_page(NULL, 0);
@@ -361,11 +250,11 @@ int hugetlb_report_meminfo(char *buf)
return sprintf(buf,
"HugePages_Total: %5lu\n"
"HugePages_Free: %5lu\n"
- "HugePages_Rsvd: %5lu\n"
+ "HugePages_Rsvd: %5lu\n"
"Hugepagesize: %5lu kB\n",
nr_huge_pages,
free_huge_pages,
- reserved_huge_pages,
+ resv_huge_pages,
HPAGE_SIZE/1024);
}
@@ -754,3 +643,156 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
flush_tlb_range(vma, start, end);
}
+struct file_region {
+ struct list_head link;
+ long from;
+ long to;
+};
+
+static long region_add(struct list_head *head, long f, long t)
+{
+ struct file_region *rg, *nrg, *trg;
+
+ /* Locate the region we are either in or before. */
+ list_for_each_entry(rg, head, link)
+ if (f <= rg->to)
+ break;
+
+ /* Round our left edge to the current segment if it encloses us. */
+ if (f > rg->from)
+ f = rg->from;
+
+ /* Check for and consume any regions we now overlap with. */
+ nrg = rg;
+ list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
+ if (&rg->link == head)
+ break;
+ if (rg->from > t)
+ break;
+
+ /* If this area reaches higher then extend our area to
+ * include it completely. If this is not the first area
+ * which we intend to reuse, free it. */
+ if (rg->to > t)
+ t = rg->to;
+ if (rg != nrg) {
+ list_del(&rg->link);
+ kfree(rg);
+ }
+ }
+ nrg->from = f;
+ nrg->to = t;
+ return 0;
+}
+
+static long region_chg(struct list_head *head, long f, long t)
+{
+ struct file_region *rg, *nrg;
+ long chg = 0;
+
+ /* Locate the region we are before or in. */
+ list_for_each_entry(rg, head, link)
+ if (f <= rg->to)
+ break;
+
+ /* If we are below the current region then a new region is required.
+ * Subtle, allocate a new region at the position but make it zero
+ * size such that we can guarentee to record the reservation. */
+ if (&rg->link == head || t < rg->from) {
+ nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
+ if (nrg == 0)
+ return -ENOMEM;
+ nrg->from = f;
+ nrg->to = f;
+ INIT_LIST_HEAD(&nrg->link);
+ list_add(&nrg->link, rg->link.prev);
+
+ return t - f;
+ }
+
+ /* Round our left edge to the current segment if it encloses us. */
+ if (f > rg->from)
+ f = rg->from;
+ chg = t - f;
+
+ /* Check for and consume any regions we now overlap with. */
+ list_for_each_entry(rg, rg->link.prev, link) {
+ if (&rg->link == head)
+ break;
+ if (rg->from > t)
+ return chg;
+
+ /* We overlap with this area, if it extends futher than
+ * us then we must extend ourselves. Account for its
+ * existing reservation. */
+ if (rg->to > t) {
+ chg += rg->to - t;
+ t = rg->to;
+ }
+ chg -= rg->to - rg->from;
+ }
+ return chg;
+}
+
+static long region_truncate(struct list_head *head, long end)
+{
+ struct file_region *rg, *trg;
+ long chg = 0;
+
+ /* Locate the region we are either in or before. */
+ list_for_each_entry(rg, head, link)
+ if (end <= rg->to)
+ break;
+ if (&rg->link == head)
+ return 0;
+
+ /* If we are in the middle of a region then adjust it. */
+ if (end > rg->from) {
+ chg = rg->to - end;
+ rg->to = end;
+ rg = list_entry(rg->link.next, typeof(*rg), link);
+ }
+
+ /* Drop any remaining regions. */
+ list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
+ if (&rg->link == head)
+ break;
+ chg += rg->to - rg->from;
+ list_del(&rg->link);
+ kfree(rg);
+ }
+ return chg;
+}
+
+static int hugetlb_acct_memory(long delta)
+{
+ int ret = -ENOMEM;
+
+ spin_lock(&hugetlb_lock);
+ if ((delta + resv_huge_pages) <= free_huge_pages) {
+ resv_huge_pages += delta;
+ ret = 0;
+ }
+ spin_unlock(&hugetlb_lock);
+ return ret;
+}
+
+int hugetlb_reserve_pages(struct inode *inode, long from, long to)
+{
+ long ret, chg;
+
+ chg = region_chg(&inode->i_mapping->private_list, from, to);
+ if (chg < 0)
+ return chg;
+ ret = hugetlb_acct_memory(chg);
+ if (ret < 0)
+ return ret;
+ region_add(&inode->i_mapping->private_list, from, to);
+ return 0;
+}
+
+void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
+{
+ long chg = region_truncate(&inode->i_mapping->private_list, offset);
+ hugetlb_acct_memory(freed - chg);
+}
diff --git a/mm/memory.c b/mm/memory.c
index 0ec7bc644271..247b5c312b9b 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -434,7 +434,9 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
/* pte contains position in swap or file, so copy. */
if (unlikely(!pte_present(pte))) {
if (!pte_file(pte)) {
- swap_duplicate(pte_to_swp_entry(pte));
+ swp_entry_t entry = pte_to_swp_entry(pte);
+
+ swap_duplicate(entry);
/* make sure dst_mm is on swapoff's mmlist. */
if (unlikely(list_empty(&dst_mm->mmlist))) {
spin_lock(&mmlist_lock);
@@ -443,6 +445,16 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
&src_mm->mmlist);
spin_unlock(&mmlist_lock);
}
+ if (is_write_migration_entry(entry) &&
+ is_cow_mapping(vm_flags)) {
+ /*
+ * COW mappings require pages in both parent
+ * and child to be set to read.
+ */
+ make_migration_entry_read(&entry);
+ pte = swp_entry_to_pte(entry);
+ set_pte_at(src_mm, addr, src_pte, pte);
+ }
}
goto out_set_pte;
}
@@ -1445,25 +1457,60 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
{
struct page *old_page, *new_page;
pte_t entry;
- int ret = VM_FAULT_MINOR;
+ int reuse, ret = VM_FAULT_MINOR;
old_page = vm_normal_page(vma, address, orig_pte);
if (!old_page)
goto gotten;
- if (PageAnon(old_page) && !TestSetPageLocked(old_page)) {
- int reuse = can_share_swap_page(old_page);
- unlock_page(old_page);
- if (reuse) {
- flush_cache_page(vma, address, pte_pfn(orig_pte));
- entry = pte_mkyoung(orig_pte);
- entry = maybe_mkwrite(pte_mkdirty(entry), vma);
- ptep_set_access_flags(vma, address, page_table, entry, 1);
- update_mmu_cache(vma, address, entry);
- lazy_mmu_prot_update(entry);
- ret |= VM_FAULT_WRITE;
- goto unlock;
+ if (unlikely((vma->vm_flags & (VM_SHARED|VM_WRITE)) ==
+ (VM_SHARED|VM_WRITE))) {
+ if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
+ /*
+ * Notify the address space that the page is about to
+ * become writable so that it can prohibit this or wait
+ * for the page to get into an appropriate state.
+ *
+ * We do this without the lock held, so that it can
+ * sleep if it needs to.
+ */
+ page_cache_get(old_page);
+ pte_unmap_unlock(page_table, ptl);
+
+ if (vma->vm_ops->page_mkwrite(vma, old_page) < 0)
+ goto unwritable_page;
+
+ page_cache_release(old_page);
+
+ /*
+ * Since we dropped the lock we need to revalidate
+ * the PTE as someone else may have changed it. If
+ * they did, we just return, as we can count on the
+ * MMU to tell us if they didn't also make it writable.
+ */
+ page_table = pte_offset_map_lock(mm, pmd, address,
+ &ptl);
+ if (!pte_same(*page_table, orig_pte))
+ goto unlock;
}
+
+ reuse = 1;
+ } else if (PageAnon(old_page) && !TestSetPageLocked(old_page)) {
+ reuse = can_share_swap_page(old_page);
+ unlock_page(old_page);
+ } else {
+ reuse = 0;
+ }
+
+ if (reuse) {
+ flush_cache_page(vma, address, pte_pfn(orig_pte));
+ entry = pte_mkyoung(orig_pte);
+ entry = maybe_mkwrite(pte_mkdirty(entry), vma);
+ ptep_set_access_flags(vma, address, page_table, entry, 1);
+ update_mmu_cache(vma, address, entry);
+ lazy_mmu_prot_update(entry);
+ ret |= VM_FAULT_WRITE;
+ goto unlock;
}
/*
@@ -1523,6 +1570,10 @@ oom:
if (old_page)
page_cache_release(old_page);
return VM_FAULT_OOM;
+
+unwritable_page:
+ page_cache_release(old_page);
+ return VM_FAULT_SIGBUS;
}
/*
@@ -1879,7 +1930,10 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
goto out;
entry = pte_to_swp_entry(orig_pte);
-again:
+ if (is_migration_entry(entry)) {
+ migration_entry_wait(mm, pmd, address);
+ goto out;
+ }
page = lookup_swap_cache(entry);
if (!page) {
swapin_readahead(entry, address, vma);
@@ -1903,12 +1957,6 @@ again:
mark_page_accessed(page);
lock_page(page);
- if (!PageSwapCache(page)) {
- /* Page migration has occured */
- unlock_page(page);
- page_cache_release(page);
- goto again;
- }
/*
* Back out if somebody else already faulted in this pte.
@@ -2074,18 +2122,31 @@ retry:
/*
* Should we do an early C-O-W break?
*/
- if (write_access && !(vma->vm_flags & VM_SHARED)) {
- struct page *page;
+ if (write_access) {
+ if (!(vma->vm_flags & VM_SHARED)) {
+ struct page *page;
- if (unlikely(anon_vma_prepare(vma)))
- goto oom;
- page = alloc_page_vma(GFP_HIGHUSER, vma, address);
- if (!page)
- goto oom;
- copy_user_highpage(page, new_page, address);
- page_cache_release(new_page);
- new_page = page;
- anon = 1;
+ if (unlikely(anon_vma_prepare(vma)))
+ goto oom;
+ page = alloc_page_vma(GFP_HIGHUSER, vma, address);
+ if (!page)
+ goto oom;
+ copy_user_highpage(page, new_page, address);
+ page_cache_release(new_page);
+ new_page = page;
+ anon = 1;
+
+ } else {
+ /* if the page will be shareable, see if the backing
+ * address space wants to know that the page is about
+ * to become writable */
+ if (vma->vm_ops->page_mkwrite &&
+ vma->vm_ops->page_mkwrite(vma, new_page) < 0
+ ) {
+ page_cache_release(new_page);
+ return VM_FAULT_SIGBUS;
+ }
+ }
}
page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 70df5c0d957e..841a077d5aeb 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -26,7 +26,7 @@
extern void zonetable_add(struct zone *zone, int nid, int zid, unsigned long pfn,
unsigned long size);
-static void __add_zone(struct zone *zone, unsigned long phys_start_pfn)
+static int __add_zone(struct zone *zone, unsigned long phys_start_pfn)
{
struct pglist_data *pgdat = zone->zone_pgdat;
int nr_pages = PAGES_PER_SECTION;
@@ -34,8 +34,15 @@ static void __add_zone(struct zone *zone, unsigned long phys_start_pfn)
int zone_type;
zone_type = zone - pgdat->node_zones;
+ if (!populated_zone(zone)) {
+ int ret = 0;
+ ret = init_currently_empty_zone(zone, phys_start_pfn, nr_pages);
+ if (ret < 0)
+ return ret;
+ }
memmap_init_zone(nr_pages, nid, zone_type, phys_start_pfn);
zonetable_add(zone, nid, zone_type, phys_start_pfn, nr_pages);
+ return 0;
}
extern int sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
@@ -50,7 +57,11 @@ static int __add_section(struct zone *zone, unsigned long phys_start_pfn)
if (ret < 0)
return ret;
- __add_zone(zone, phys_start_pfn);
+ ret = __add_zone(zone, phys_start_pfn);
+
+ if (ret < 0)
+ return ret;
+
return register_new_memory(__pfn_to_section(phys_start_pfn));
}
@@ -116,6 +127,7 @@ int online_pages(unsigned long pfn, unsigned long nr_pages)
unsigned long flags;
unsigned long onlined_pages = 0;
struct zone *zone;
+ int need_zonelists_rebuild = 0;
/*
* This doesn't need a lock to do pfn_to_page().
@@ -128,6 +140,14 @@ int online_pages(unsigned long pfn, unsigned long nr_pages)
grow_pgdat_span(zone->zone_pgdat, pfn, pfn + nr_pages);
pgdat_resize_unlock(zone->zone_pgdat, &flags);
+ /*
+ * If this zone is not populated, then it is not in zonelist.
+ * This means the page allocator ignores this zone.
+ * So, zonelist must be updated after online.
+ */
+ if (!populated_zone(zone))
+ need_zonelists_rebuild = 1;
+
for (i = 0; i < nr_pages; i++) {
struct page *page = pfn_to_page(pfn + i);
online_page(page);
@@ -138,5 +158,8 @@ int online_pages(unsigned long pfn, unsigned long nr_pages)
setup_per_zone_pages_min();
+ if (need_zonelists_rebuild)
+ build_all_zonelists();
+ vm_total_pages = nr_free_pagecache_pages();
return 0;
}
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 8778f58880c4..ec4a1a950df9 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -87,6 +87,8 @@
#include <linux/seq_file.h>
#include <linux/proc_fs.h>
#include <linux/migrate.h>
+#include <linux/rmap.h>
+#include <linux/security.h>
#include <asm/tlbflush.h>
#include <asm/uaccess.h>
@@ -587,6 +589,11 @@ static void migrate_page_add(struct page *page, struct list_head *pagelist,
isolate_lru_page(page, pagelist);
}
+static struct page *new_node_page(struct page *page, unsigned long node, int **x)
+{
+ return alloc_pages_node(node, GFP_HIGHUSER, 0);
+}
+
/*
* Migrate pages from one node to a target node.
* Returns error or the number of pages not migrated.
@@ -603,11 +610,9 @@ int migrate_to_node(struct mm_struct *mm, int source, int dest, int flags)
check_range(mm, mm->mmap->vm_start, TASK_SIZE, &nmask,
flags | MPOL_MF_DISCONTIG_OK, &pagelist);
- if (!list_empty(&pagelist)) {
- err = migrate_pages_to(&pagelist, NULL, dest);
- if (!list_empty(&pagelist))
- putback_lru_pages(&pagelist);
- }
+ if (!list_empty(&pagelist))
+ err = migrate_pages(&pagelist, new_node_page, dest);
+
return err;
}
@@ -694,6 +699,12 @@ int do_migrate_pages(struct mm_struct *mm,
}
+static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
+{
+ struct vm_area_struct *vma = (struct vm_area_struct *)private;
+
+ return alloc_page_vma(GFP_HIGHUSER, vma, page_address_in_vma(page, vma));
+}
#else
static void migrate_page_add(struct page *page, struct list_head *pagelist,
@@ -706,6 +717,11 @@ int do_migrate_pages(struct mm_struct *mm,
{
return -ENOSYS;
}
+
+static struct page *new_vma_page(struct page *page, unsigned long private)
+{
+ return NULL;
+}
#endif
long do_mbind(unsigned long start, unsigned long len,
@@ -767,15 +783,13 @@ long do_mbind(unsigned long start, unsigned long len,
err = mbind_range(vma, start, end, new);
if (!list_empty(&pagelist))
- nr_failed = migrate_pages_to(&pagelist, vma, -1);
+ nr_failed = migrate_pages(&pagelist, new_vma_page,
+ (unsigned long)vma);
if (!err && nr_failed && (flags & MPOL_MF_STRICT))
err = -EIO;
}
- if (!list_empty(&pagelist))
- putback_lru_pages(&pagelist);
-
up_write(&mm->mmap_sem);
mpol_free(new);
return err;
@@ -929,6 +943,10 @@ asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode,
goto out;
}
+ err = security_task_movememory(task);
+ if (err)
+ goto out;
+
err = do_migrate_pages(mm, &old, &new,
capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
out:
diff --git a/mm/migrate.c b/mm/migrate.c
index 1c25040693d2..1c2a71aa05cd 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -15,6 +15,7 @@
#include <linux/migrate.h>
#include <linux/module.h>
#include <linux/swap.h>
+#include <linux/swapops.h>
#include <linux/pagemap.h>
#include <linux/buffer_head.h>
#include <linux/mm_inline.h>
@@ -23,13 +24,13 @@
#include <linux/topology.h>
#include <linux/cpu.h>
#include <linux/cpuset.h>
-#include <linux/swapops.h>
+#include <linux/writeback.h>
+#include <linux/mempolicy.h>
+#include <linux/vmalloc.h>
+#include <linux/security.h>
#include "internal.h"
-/* The maximum number of pages to take off the LRU for migration */
-#define MIGRATE_CHUNK_SIZE 256
-
#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
/*
@@ -64,16 +65,11 @@ int isolate_lru_page(struct page *page, struct list_head *pagelist)
}
/*
- * migrate_prep() needs to be called after we have compiled the list of pages
- * to be migrated using isolate_lru_page() but before we begin a series of calls
- * to migrate_pages().
+ * migrate_prep() needs to be called before we start compiling a list of pages
+ * to be migrated using isolate_lru_page().
*/
int migrate_prep(void)
{
- /* Must have swap device for migration */
- if (nr_swap_pages <= 0)
- return -ENODEV;
-
/*
* Clear the LRU lists so pages can be isolated.
* Note that pages may be moved off the LRU after we have
@@ -87,7 +83,6 @@ int migrate_prep(void)
static inline void move_to_lru(struct page *page)
{
- list_del(&page->lru);
if (PageActive(page)) {
/*
* lru_cache_add_active checks that
@@ -113,113 +108,200 @@ int putback_lru_pages(struct list_head *l)
int count = 0;
list_for_each_entry_safe(page, page2, l, lru) {
+ list_del(&page->lru);
move_to_lru(page);
count++;
}
return count;
}
-/*
- * Non migratable page
- */
-int fail_migrate_page(struct page *newpage, struct page *page)
+static inline int is_swap_pte(pte_t pte)
{
- return -EIO;
+ return !pte_none(pte) && !pte_present(pte) && !pte_file(pte);
}
-EXPORT_SYMBOL(fail_migrate_page);
/*
- * swapout a single page
- * page is locked upon entry, unlocked on exit
+ * Restore a potential migration pte to a working pte entry
*/
-static int swap_page(struct page *page)
+static void remove_migration_pte(struct vm_area_struct *vma,
+ struct page *old, struct page *new)
{
- struct address_space *mapping = page_mapping(page);
+ struct mm_struct *mm = vma->vm_mm;
+ swp_entry_t entry;
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *ptep, pte;
+ spinlock_t *ptl;
+ unsigned long addr = page_address_in_vma(new, vma);
+
+ if (addr == -EFAULT)
+ return;
+
+ pgd = pgd_offset(mm, addr);
+ if (!pgd_present(*pgd))
+ return;
+
+ pud = pud_offset(pgd, addr);
+ if (!pud_present(*pud))
+ return;
+
+ pmd = pmd_offset(pud, addr);
+ if (!pmd_present(*pmd))
+ return;
+
+ ptep = pte_offset_map(pmd, addr);
+
+ if (!is_swap_pte(*ptep)) {
+ pte_unmap(ptep);
+ return;
+ }
- if (page_mapped(page) && mapping)
- if (try_to_unmap(page, 1) != SWAP_SUCCESS)
- goto unlock_retry;
+ ptl = pte_lockptr(mm, pmd);
+ spin_lock(ptl);
+ pte = *ptep;
+ if (!is_swap_pte(pte))
+ goto out;
- if (PageDirty(page)) {
- /* Page is dirty, try to write it out here */
- switch(pageout(page, mapping)) {
- case PAGE_KEEP:
- case PAGE_ACTIVATE:
- goto unlock_retry;
+ entry = pte_to_swp_entry(pte);
- case PAGE_SUCCESS:
- goto retry;
+ if (!is_migration_entry(entry) || migration_entry_to_page(entry) != old)
+ goto out;
- case PAGE_CLEAN:
- ; /* try to free the page below */
- }
- }
+ get_page(new);
+ pte = pte_mkold(mk_pte(new, vma->vm_page_prot));
+ if (is_write_migration_entry(entry))
+ pte = pte_mkwrite(pte);
+ set_pte_at(mm, addr, ptep, pte);
- if (PagePrivate(page)) {
- if (!try_to_release_page(page, GFP_KERNEL) ||
- (!mapping && page_count(page) == 1))
- goto unlock_retry;
- }
+ if (PageAnon(new))
+ page_add_anon_rmap(new, vma, addr);
+ else
+ page_add_file_rmap(new);
- if (remove_mapping(mapping, page)) {
- /* Success */
- unlock_page(page);
- return 0;
- }
+ /* No need to invalidate - it was non-present before */
+ update_mmu_cache(vma, addr, pte);
+ lazy_mmu_prot_update(pte);
-unlock_retry:
- unlock_page(page);
+out:
+ pte_unmap_unlock(ptep, ptl);
+}
-retry:
- return -EAGAIN;
+/*
+ * Note that remove_file_migration_ptes will only work on regular mappings,
+ * Nonlinear mappings do not use migration entries.
+ */
+static void remove_file_migration_ptes(struct page *old, struct page *new)
+{
+ struct vm_area_struct *vma;
+ struct address_space *mapping = page_mapping(new);
+ struct prio_tree_iter iter;
+ pgoff_t pgoff = new->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+
+ if (!mapping)
+ return;
+
+ spin_lock(&mapping->i_mmap_lock);
+
+ vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff)
+ remove_migration_pte(vma, old, new);
+
+ spin_unlock(&mapping->i_mmap_lock);
}
/*
- * Remove references for a page and establish the new page with the correct
- * basic settings to be able to stop accesses to the page.
+ * Must hold mmap_sem lock on at least one of the vmas containing
+ * the page so that the anon_vma cannot vanish.
*/
-int migrate_page_remove_references(struct page *newpage,
- struct page *page, int nr_refs)
+static void remove_anon_migration_ptes(struct page *old, struct page *new)
{
- struct address_space *mapping = page_mapping(page);
- struct page **radix_pointer;
+ struct anon_vma *anon_vma;
+ struct vm_area_struct *vma;
+ unsigned long mapping;
- /*
- * Avoid doing any of the following work if the page count
- * indicates that the page is in use or truncate has removed
- * the page.
- */
- if (!mapping || page_mapcount(page) + nr_refs != page_count(page))
- return -EAGAIN;
+ mapping = (unsigned long)new->mapping;
- /*
- * Establish swap ptes for anonymous pages or destroy pte
- * maps for files.
- *
- * In order to reestablish file backed mappings the fault handlers
- * will take the radix tree_lock which may then be used to stop
- * processses from accessing this page until the new page is ready.
- *
- * A process accessing via a swap pte (an anonymous page) will take a
- * page_lock on the old page which will block the process until the
- * migration attempt is complete. At that time the PageSwapCache bit
- * will be examined. If the page was migrated then the PageSwapCache
- * bit will be clear and the operation to retrieve the page will be
- * retried which will find the new page in the radix tree. Then a new
- * direct mapping may be generated based on the radix tree contents.
- *
- * If the page was not migrated then the PageSwapCache bit
- * is still set and the operation may continue.
- */
- if (try_to_unmap(page, 1) == SWAP_FAIL)
- /* A vma has VM_LOCKED set -> permanent failure */
- return -EPERM;
+ if (!mapping || (mapping & PAGE_MAPPING_ANON) == 0)
+ return;
/*
- * Give up if we were unable to remove all mappings.
+ * We hold the mmap_sem lock. So no need to call page_lock_anon_vma.
*/
- if (page_mapcount(page))
- return -EAGAIN;
+ anon_vma = (struct anon_vma *) (mapping - PAGE_MAPPING_ANON);
+ spin_lock(&anon_vma->lock);
+
+ list_for_each_entry(vma, &anon_vma->head, anon_vma_node)
+ remove_migration_pte(vma, old, new);
+
+ spin_unlock(&anon_vma->lock);
+}
+
+/*
+ * Get rid of all migration entries and replace them by
+ * references to the indicated page.
+ */
+static void remove_migration_ptes(struct page *old, struct page *new)
+{
+ if (PageAnon(new))
+ remove_anon_migration_ptes(old, new);
+ else
+ remove_file_migration_ptes(old, new);
+}
+
+/*
+ * Something used the pte of a page under migration. We need to
+ * get to the page and wait until migration is finished.
+ * When we return from this function the fault will be retried.
+ *
+ * This function is called from do_swap_page().
+ */
+void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
+ unsigned long address)
+{
+ pte_t *ptep, pte;
+ spinlock_t *ptl;
+ swp_entry_t entry;
+ struct page *page;
+
+ ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
+ pte = *ptep;
+ if (!is_swap_pte(pte))
+ goto out;
+
+ entry = pte_to_swp_entry(pte);
+ if (!is_migration_entry(entry))
+ goto out;
+
+ page = migration_entry_to_page(entry);
+
+ get_page(page);
+ pte_unmap_unlock(ptep, ptl);
+ wait_on_page_locked(page);
+ put_page(page);
+ return;
+out:
+ pte_unmap_unlock(ptep, ptl);
+}
+
+/*
+ * Replace the page in the mapping.
+ *
+ * The number of remaining references must be:
+ * 1 for anonymous pages without a mapping
+ * 2 for pages with a mapping
+ * 3 for pages with a mapping and PagePrivate set.
+ */
+static int migrate_page_move_mapping(struct address_space *mapping,
+ struct page *newpage, struct page *page)
+{
+ struct page **radix_pointer;
+
+ if (!mapping) {
+ /* Anonymous page */
+ if (page_count(page) != 1)
+ return -EAGAIN;
+ return 0;
+ }
write_lock_irq(&mapping->tree_lock);
@@ -227,7 +309,7 @@ int migrate_page_remove_references(struct page *newpage,
&mapping->page_tree,
page_index(page));
- if (!page_mapping(page) || page_count(page) != nr_refs ||
+ if (page_count(page) != 2 + !!PagePrivate(page) ||
*radix_pointer != page) {
write_unlock_irq(&mapping->tree_lock);
return -EAGAIN;
@@ -235,19 +317,14 @@ int migrate_page_remove_references(struct page *newpage,
/*
* Now we know that no one else is looking at the page.
- *
- * Certain minimal information about a page must be available
- * in order for other subsystems to properly handle the page if they
- * find it through the radix tree update before we are finished
- * copying the page.
*/
get_page(newpage);
- newpage->index = page->index;
- newpage->mapping = page->mapping;
+#ifdef CONFIG_SWAP
if (PageSwapCache(page)) {
SetPageSwapCache(newpage);
set_page_private(newpage, page_private(page));
}
+#endif
*radix_pointer = newpage;
__put_page(page);
@@ -255,12 +332,11 @@ int migrate_page_remove_references(struct page *newpage,
return 0;
}
-EXPORT_SYMBOL(migrate_page_remove_references);
/*
* Copy the page to its new location
*/
-void migrate_page_copy(struct page *newpage, struct page *page)
+static void migrate_page_copy(struct page *newpage, struct page *page)
{
copy_highpage(newpage, page);
@@ -282,7 +358,9 @@ void migrate_page_copy(struct page *newpage, struct page *page)
set_page_dirty(newpage);
}
+#ifdef CONFIG_SWAP
ClearPageSwapCache(page);
+#endif
ClearPageActive(page);
ClearPagePrivate(page);
set_page_private(page, 0);
@@ -295,7 +373,18 @@ void migrate_page_copy(struct page *newpage, struct page *page)
if (PageWriteback(newpage))
end_page_writeback(newpage);
}
-EXPORT_SYMBOL(migrate_page_copy);
+
+/************************************************************
+ * Migration functions
+ ***********************************************************/
+
+/* Always fail migration. Used for mappings that are not movable */
+int fail_migrate_page(struct address_space *mapping,
+ struct page *newpage, struct page *page)
+{
+ return -EIO;
+}
+EXPORT_SYMBOL(fail_migrate_page);
/*
* Common logic to directly migrate a single page suitable for
@@ -303,51 +392,286 @@ EXPORT_SYMBOL(migrate_page_copy);
*
* Pages are locked upon entry and exit.
*/
-int migrate_page(struct page *newpage, struct page *page)
+int migrate_page(struct address_space *mapping,
+ struct page *newpage, struct page *page)
{
int rc;
BUG_ON(PageWriteback(page)); /* Writeback must be complete */
- rc = migrate_page_remove_references(newpage, page, 2);
+ rc = migrate_page_move_mapping(mapping, newpage, page);
+
+ if (rc)
+ return rc;
+
+ migrate_page_copy(newpage, page);
+ return 0;
+}
+EXPORT_SYMBOL(migrate_page);
+
+/*
+ * Migration function for pages with buffers. This function can only be used
+ * if the underlying filesystem guarantees that no other references to "page"
+ * exist.
+ */
+int buffer_migrate_page(struct address_space *mapping,
+ struct page *newpage, struct page *page)
+{
+ struct buffer_head *bh, *head;
+ int rc;
+
+ if (!page_has_buffers(page))
+ return migrate_page(mapping, newpage, page);
+
+ head = page_buffers(page);
+
+ rc = migrate_page_move_mapping(mapping, newpage, page);
if (rc)
return rc;
+ bh = head;
+ do {
+ get_bh(bh);
+ lock_buffer(bh);
+ bh = bh->b_this_page;
+
+ } while (bh != head);
+
+ ClearPagePrivate(page);
+ set_page_private(newpage, page_private(page));
+ set_page_private(page, 0);
+ put_page(page);
+ get_page(newpage);
+
+ bh = head;
+ do {
+ set_bh_page(bh, newpage, bh_offset(bh));
+ bh = bh->b_this_page;
+
+ } while (bh != head);
+
+ SetPagePrivate(newpage);
+
migrate_page_copy(newpage, page);
+ bh = head;
+ do {
+ unlock_buffer(bh);
+ put_bh(bh);
+ bh = bh->b_this_page;
+
+ } while (bh != head);
+
+ return 0;
+}
+EXPORT_SYMBOL(buffer_migrate_page);
+
+/*
+ * Writeback a page to clean the dirty state
+ */
+static int writeout(struct address_space *mapping, struct page *page)
+{
+ struct writeback_control wbc = {
+ .sync_mode = WB_SYNC_NONE,
+ .nr_to_write = 1,
+ .range_start = 0,
+ .range_end = LLONG_MAX,
+ .nonblocking = 1,
+ .for_reclaim = 1
+ };
+ int rc;
+
+ if (!mapping->a_ops->writepage)
+ /* No write method for the address space */
+ return -EINVAL;
+
+ if (!clear_page_dirty_for_io(page))
+ /* Someone else already triggered a write */
+ return -EAGAIN;
+
/*
- * Remove auxiliary swap entries and replace
- * them with real ptes.
- *
- * Note that a real pte entry will allow processes that are not
- * waiting on the page lock to use the new page via the page tables
- * before the new page is unlocked.
+ * A dirty page may imply that the underlying filesystem has
+ * the page on some queue. So the page must be clean for
+ * migration. Writeout may mean we loose the lock and the
+ * page state is no longer what we checked for earlier.
+ * At this point we know that the migration attempt cannot
+ * be successful.
*/
- remove_from_swap(newpage);
- return 0;
+ remove_migration_ptes(page, page);
+
+ rc = mapping->a_ops->writepage(page, &wbc);
+ if (rc < 0)
+ /* I/O Error writing */
+ return -EIO;
+
+ if (rc != AOP_WRITEPAGE_ACTIVATE)
+ /* unlocked. Relock */
+ lock_page(page);
+
+ return -EAGAIN;
+}
+
+/*
+ * Default handling if a filesystem does not provide a migration function.
+ */
+static int fallback_migrate_page(struct address_space *mapping,
+ struct page *newpage, struct page *page)
+{
+ if (PageDirty(page))
+ return writeout(mapping, page);
+
+ /*
+ * Buffers may be managed in a filesystem specific way.
+ * We must have no buffers or drop them.
+ */
+ if (page_has_buffers(page) &&
+ !try_to_release_page(page, GFP_KERNEL))
+ return -EAGAIN;
+
+ return migrate_page(mapping, newpage, page);
+}
+
+/*
+ * Move a page to a newly allocated page
+ * The page is locked and all ptes have been successfully removed.
+ *
+ * The new page will have replaced the old page if this function
+ * is successful.
+ */
+static int move_to_new_page(struct page *newpage, struct page *page)
+{
+ struct address_space *mapping;
+ int rc;
+
+ /*
+ * Block others from accessing the page when we get around to
+ * establishing additional references. We are the only one
+ * holding a reference to the new page at this point.
+ */
+ if (TestSetPageLocked(newpage))
+ BUG();
+
+ /* Prepare mapping for the new page.*/
+ newpage->index = page->index;
+ newpage->mapping = page->mapping;
+
+ mapping = page_mapping(page);
+ if (!mapping)
+ rc = migrate_page(mapping, newpage, page);
+ else if (mapping->a_ops->migratepage)
+ /*
+ * Most pages have a mapping and most filesystems
+ * should provide a migration function. Anonymous
+ * pages are part of swap space which also has its
+ * own migration function. This is the most common
+ * path for page migration.
+ */
+ rc = mapping->a_ops->migratepage(mapping,
+ newpage, page);
+ else
+ rc = fallback_migrate_page(mapping, newpage, page);
+
+ if (!rc)
+ remove_migration_ptes(page, newpage);
+ else
+ newpage->mapping = NULL;
+
+ unlock_page(newpage);
+
+ return rc;
+}
+
+/*
+ * Obtain the lock on page, remove all ptes and migrate the page
+ * to the newly allocated page in newpage.
+ */
+static int unmap_and_move(new_page_t get_new_page, unsigned long private,
+ struct page *page, int force)
+{
+ int rc = 0;
+ int *result = NULL;
+ struct page *newpage = get_new_page(page, private, &result);
+
+ if (!newpage)
+ return -ENOMEM;
+
+ if (page_count(page) == 1)
+ /* page was freed from under us. So we are done. */
+ goto move_newpage;
+
+ rc = -EAGAIN;
+ if (TestSetPageLocked(page)) {
+ if (!force)
+ goto move_newpage;
+ lock_page(page);
+ }
+
+ if (PageWriteback(page)) {
+ if (!force)
+ goto unlock;
+ wait_on_page_writeback(page);
+ }
+
+ /*
+ * Establish migration ptes or remove ptes
+ */
+ if (try_to_unmap(page, 1) != SWAP_FAIL) {
+ if (!page_mapped(page))
+ rc = move_to_new_page(newpage, page);
+ } else
+ /* A vma has VM_LOCKED set -> permanent failure */
+ rc = -EPERM;
+
+ if (rc)
+ remove_migration_ptes(page, page);
+unlock:
+ unlock_page(page);
+
+ if (rc != -EAGAIN) {
+ /*
+ * A page that has been migrated has all references
+ * removed and will be freed. A page that has not been
+ * migrated will have kepts its references and be
+ * restored.
+ */
+ list_del(&page->lru);
+ move_to_lru(page);
+ }
+
+move_newpage:
+ /*
+ * Move the new page to the LRU. If migration was not successful
+ * then this will free the page.
+ */
+ move_to_lru(newpage);
+ if (result) {
+ if (rc)
+ *result = rc;
+ else
+ *result = page_to_nid(newpage);
+ }
+ return rc;
}
-EXPORT_SYMBOL(migrate_page);
/*
* migrate_pages
*
- * Two lists are passed to this function. The first list
- * contains the pages isolated from the LRU to be migrated.
- * The second list contains new pages that the pages isolated
- * can be moved to. If the second list is NULL then all
- * pages are swapped out.
+ * The function takes one list of pages to migrate and a function
+ * that determines from the page to be migrated and the private data
+ * the target of the move and allocates the page.
*
* The function returns after 10 attempts or if no pages
* are movable anymore because to has become empty
- * or no retryable pages exist anymore.
+ * or no retryable pages exist anymore. All pages will be
+ * retruned to the LRU or freed.
*
- * Return: Number of pages not migrated when "to" ran empty.
+ * Return: Number of pages not migrated or error code.
*/
-int migrate_pages(struct list_head *from, struct list_head *to,
- struct list_head *moved, struct list_head *failed)
+int migrate_pages(struct list_head *from,
+ new_page_t get_new_page, unsigned long private)
{
- int retry;
+ int retry = 1;
int nr_failed = 0;
int pass = 0;
struct page *page;
@@ -358,305 +682,297 @@ int migrate_pages(struct list_head *from, struct list_head *to,
if (!swapwrite)
current->flags |= PF_SWAPWRITE;
-redo:
- retry = 0;
+ for(pass = 0; pass < 10 && retry; pass++) {
+ retry = 0;
+
+ list_for_each_entry_safe(page, page2, from, lru) {
+ cond_resched();
+
+ rc = unmap_and_move(get_new_page, private,
+ page, pass > 2);
+
+ switch(rc) {
+ case -ENOMEM:
+ goto out;
+ case -EAGAIN:
+ retry++;
+ break;
+ case 0:
+ break;
+ default:
+ /* Permanent failure */
+ nr_failed++;
+ break;
+ }
+ }
+ }
+ rc = 0;
+out:
+ if (!swapwrite)
+ current->flags &= ~PF_SWAPWRITE;
- list_for_each_entry_safe(page, page2, from, lru) {
- struct page *newpage = NULL;
- struct address_space *mapping;
+ putback_lru_pages(from);
- cond_resched();
+ if (rc)
+ return rc;
- rc = 0;
- if (page_count(page) == 1)
- /* page was freed from under us. So we are done. */
- goto next;
+ return nr_failed + retry;
+}
- if (to && list_empty(to))
- break;
+#ifdef CONFIG_NUMA
+/*
+ * Move a list of individual pages
+ */
+struct page_to_node {
+ unsigned long addr;
+ struct page *page;
+ int node;
+ int status;
+};
- /*
- * Skip locked pages during the first two passes to give the
- * functions holding the lock time to release the page. Later we
- * use lock_page() to have a higher chance of acquiring the
- * lock.
- */
- rc = -EAGAIN;
- if (pass > 2)
- lock_page(page);
- else
- if (TestSetPageLocked(page))
- goto next;
+static struct page *new_page_node(struct page *p, unsigned long private,
+ int **result)
+{
+ struct page_to_node *pm = (struct page_to_node *)private;
- /*
- * Only wait on writeback if we have already done a pass where
- * we we may have triggered writeouts for lots of pages.
- */
- if (pass > 0) {
- wait_on_page_writeback(page);
- } else {
- if (PageWriteback(page))
- goto unlock_page;
- }
+ while (pm->node != MAX_NUMNODES && pm->page != p)
+ pm++;
- /*
- * Anonymous pages must have swap cache references otherwise
- * the information contained in the page maps cannot be
- * preserved.
- */
- if (PageAnon(page) && !PageSwapCache(page)) {
- if (!add_to_swap(page, GFP_KERNEL)) {
- rc = -ENOMEM;
- goto unlock_page;
- }
- }
+ if (pm->node == MAX_NUMNODES)
+ return NULL;
- if (!to) {
- rc = swap_page(page);
- goto next;
- }
+ *result = &pm->status;
- newpage = lru_to_page(to);
- lock_page(newpage);
+ return alloc_pages_node(pm->node, GFP_HIGHUSER, 0);
+}
- /*
- * Pages are properly locked and writeback is complete.
- * Try to migrate the page.
- */
- mapping = page_mapping(page);
- if (!mapping)
- goto unlock_both;
+/*
+ * Move a set of pages as indicated in the pm array. The addr
+ * field must be set to the virtual address of the page to be moved
+ * and the node number must contain a valid target node.
+ */
+static int do_move_pages(struct mm_struct *mm, struct page_to_node *pm,
+ int migrate_all)
+{
+ int err;
+ struct page_to_node *pp;
+ LIST_HEAD(pagelist);
- if (mapping->a_ops->migratepage) {
- /*
- * Most pages have a mapping and most filesystems
- * should provide a migration function. Anonymous
- * pages are part of swap space which also has its
- * own migration function. This is the most common
- * path for page migration.
- */
- rc = mapping->a_ops->migratepage(newpage, page);
- goto unlock_both;
- }
-
- /* Make sure the dirty bit is up to date */
- if (try_to_unmap(page, 1) == SWAP_FAIL) {
- rc = -EPERM;
- goto unlock_both;
- }
+ down_read(&mm->mmap_sem);
- if (page_mapcount(page)) {
- rc = -EAGAIN;
- goto unlock_both;
- }
+ /*
+ * Build a list of pages to migrate
+ */
+ migrate_prep();
+ for (pp = pm; pp->node != MAX_NUMNODES; pp++) {
+ struct vm_area_struct *vma;
+ struct page *page;
/*
- * Default handling if a filesystem does not provide
- * a migration function. We can only migrate clean
- * pages so try to write out any dirty pages first.
+ * A valid page pointer that will not match any of the
+ * pages that will be moved.
*/
- if (PageDirty(page)) {
- switch (pageout(page, mapping)) {
- case PAGE_KEEP:
- case PAGE_ACTIVATE:
- goto unlock_both;
-
- case PAGE_SUCCESS:
- unlock_page(newpage);
- goto next;
-
- case PAGE_CLEAN:
- ; /* try to migrate the page below */
- }
- }
+ pp->page = ZERO_PAGE(0);
- /*
- * Buffers are managed in a filesystem specific way.
- * We must have no buffers or drop them.
- */
- if (!page_has_buffers(page) ||
- try_to_release_page(page, GFP_KERNEL)) {
- rc = migrate_page(newpage, page);
- goto unlock_both;
- }
+ err = -EFAULT;
+ vma = find_vma(mm, pp->addr);
+ if (!vma)
+ goto set_status;
- /*
- * On early passes with mapped pages simply
- * retry. There may be a lock held for some
- * buffers that may go away. Later
- * swap them out.
- */
- if (pass > 4) {
+ page = follow_page(vma, pp->addr, FOLL_GET);
+ err = -ENOENT;
+ if (!page)
+ goto set_status;
+
+ if (PageReserved(page)) /* Check for zero page */
+ goto put_and_set;
+
+ pp->page = page;
+ err = page_to_nid(page);
+
+ if (err == pp->node)
/*
- * Persistently unable to drop buffers..... As a
- * measure of last resort we fall back to
- * swap_page().
+ * Node already in the right place
*/
- unlock_page(newpage);
- newpage = NULL;
- rc = swap_page(page);
- goto next;
- }
+ goto put_and_set;
-unlock_both:
- unlock_page(newpage);
-
-unlock_page:
- unlock_page(page);
-
-next:
- if (rc == -EAGAIN) {
- retry++;
- } else if (rc) {
- /* Permanent failure */
- list_move(&page->lru, failed);
- nr_failed++;
- } else {
- if (newpage) {
- /* Successful migration. Return page to LRU */
- move_to_lru(newpage);
- }
- list_move(&page->lru, moved);
- }
+ err = -EACCES;
+ if (page_mapcount(page) > 1 &&
+ !migrate_all)
+ goto put_and_set;
+
+ err = isolate_lru_page(page, &pagelist);
+put_and_set:
+ /*
+ * Either remove the duplicate refcount from
+ * isolate_lru_page() or drop the page ref if it was
+ * not isolated.
+ */
+ put_page(page);
+set_status:
+ pp->status = err;
}
- if (retry && pass++ < 10)
- goto redo;
- if (!swapwrite)
- current->flags &= ~PF_SWAPWRITE;
+ if (!list_empty(&pagelist))
+ err = migrate_pages(&pagelist, new_page_node,
+ (unsigned long)pm);
+ else
+ err = -ENOENT;
- return nr_failed + retry;
+ up_read(&mm->mmap_sem);
+ return err;
}
/*
- * Migration function for pages with buffers. This function can only be used
- * if the underlying filesystem guarantees that no other references to "page"
- * exist.
+ * Determine the nodes of a list of pages. The addr in the pm array
+ * must have been set to the virtual address of which we want to determine
+ * the node number.
*/
-int buffer_migrate_page(struct page *newpage, struct page *page)
+static int do_pages_stat(struct mm_struct *mm, struct page_to_node *pm)
{
- struct address_space *mapping = page->mapping;
- struct buffer_head *bh, *head;
- int rc;
+ down_read(&mm->mmap_sem);
+
+ for ( ; pm->node != MAX_NUMNODES; pm++) {
+ struct vm_area_struct *vma;
+ struct page *page;
+ int err;
+
+ err = -EFAULT;
+ vma = find_vma(mm, pm->addr);
+ if (!vma)
+ goto set_status;
+
+ page = follow_page(vma, pm->addr, 0);
+ err = -ENOENT;
+ /* Use PageReserved to check for zero page */
+ if (!page || PageReserved(page))
+ goto set_status;
+
+ err = page_to_nid(page);
+set_status:
+ pm->status = err;
+ }
- if (!mapping)
- return -EAGAIN;
+ up_read(&mm->mmap_sem);
+ return 0;
+}
- if (!page_has_buffers(page))
- return migrate_page(newpage, page);
+/*
+ * Move a list of pages in the address space of the currently executing
+ * process.
+ */
+asmlinkage long sys_move_pages(pid_t pid, unsigned long nr_pages,
+ const void __user * __user *pages,
+ const int __user *nodes,
+ int __user *status, int flags)
+{
+ int err = 0;
+ int i;
+ struct task_struct *task;
+ nodemask_t task_nodes;
+ struct mm_struct *mm;
+ struct page_to_node *pm = NULL;
- head = page_buffers(page);
+ /* Check flags */
+ if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
+ return -EINVAL;
- rc = migrate_page_remove_references(newpage, page, 3);
+ if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
+ return -EPERM;
- if (rc)
- return rc;
+ /* Find the mm_struct */
+ read_lock(&tasklist_lock);
+ task = pid ? find_task_by_pid(pid) : current;
+ if (!task) {
+ read_unlock(&tasklist_lock);
+ return -ESRCH;
+ }
+ mm = get_task_mm(task);
+ read_unlock(&tasklist_lock);
- bh = head;
- do {
- get_bh(bh);
- lock_buffer(bh);
- bh = bh->b_this_page;
+ if (!mm)
+ return -EINVAL;
- } while (bh != head);
+ /*
+ * Check if this process has the right to modify the specified
+ * process. The right exists if the process has administrative
+ * capabilities, superuser privileges or the same
+ * userid as the target process.
+ */
+ if ((current->euid != task->suid) && (current->euid != task->uid) &&
+ (current->uid != task->suid) && (current->uid != task->uid) &&
+ !capable(CAP_SYS_NICE)) {
+ err = -EPERM;
+ goto out2;
+ }
- ClearPagePrivate(page);
- set_page_private(newpage, page_private(page));
- set_page_private(page, 0);
- put_page(page);
- get_page(newpage);
+ err = security_task_movememory(task);
+ if (err)
+ goto out2;
- bh = head;
- do {
- set_bh_page(bh, newpage, bh_offset(bh));
- bh = bh->b_this_page;
- } while (bh != head);
+ task_nodes = cpuset_mems_allowed(task);
- SetPagePrivate(newpage);
+ /* Limit nr_pages so that the multiplication may not overflow */
+ if (nr_pages >= ULONG_MAX / sizeof(struct page_to_node) - 1) {
+ err = -E2BIG;
+ goto out2;
+ }
- migrate_page_copy(newpage, page);
+ pm = vmalloc((nr_pages + 1) * sizeof(struct page_to_node));
+ if (!pm) {
+ err = -ENOMEM;
+ goto out2;
+ }
- bh = head;
- do {
- unlock_buffer(bh);
- put_bh(bh);
- bh = bh->b_this_page;
+ /*
+ * Get parameters from user space and initialize the pm
+ * array. Return various errors if the user did something wrong.
+ */
+ for (i = 0; i < nr_pages; i++) {
+ const void *p;
- } while (bh != head);
+ err = -EFAULT;
+ if (get_user(p, pages + i))
+ goto out;
- return 0;
-}
-EXPORT_SYMBOL(buffer_migrate_page);
+ pm[i].addr = (unsigned long)p;
+ if (nodes) {
+ int node;
-/*
- * Migrate the list 'pagelist' of pages to a certain destination.
- *
- * Specify destination with either non-NULL vma or dest_node >= 0
- * Return the number of pages not migrated or error code
- */
-int migrate_pages_to(struct list_head *pagelist,
- struct vm_area_struct *vma, int dest)
-{
- LIST_HEAD(newlist);
- LIST_HEAD(moved);
- LIST_HEAD(failed);
- int err = 0;
- unsigned long offset = 0;
- int nr_pages;
- struct page *page;
- struct list_head *p;
+ if (get_user(node, nodes + i))
+ goto out;
-redo:
- nr_pages = 0;
- list_for_each(p, pagelist) {
- if (vma) {
- /*
- * The address passed to alloc_page_vma is used to
- * generate the proper interleave behavior. We fake
- * the address here by an increasing offset in order
- * to get the proper distribution of pages.
- *
- * No decision has been made as to which page
- * a certain old page is moved to so we cannot
- * specify the correct address.
- */
- page = alloc_page_vma(GFP_HIGHUSER, vma,
- offset + vma->vm_start);
- offset += PAGE_SIZE;
- }
- else
- page = alloc_pages_node(dest, GFP_HIGHUSER, 0);
+ err = -ENODEV;
+ if (!node_online(node))
+ goto out;
- if (!page) {
- err = -ENOMEM;
- goto out;
+ err = -EACCES;
+ if (!node_isset(node, task_nodes))
+ goto out;
+
+ pm[i].node = node;
}
- list_add_tail(&page->lru, &newlist);
- nr_pages++;
- if (nr_pages > MIGRATE_CHUNK_SIZE)
- break;
}
- err = migrate_pages(pagelist, &newlist, &moved, &failed);
+ /* End marker */
+ pm[nr_pages].node = MAX_NUMNODES;
+
+ if (nodes)
+ err = do_move_pages(mm, pm, flags & MPOL_MF_MOVE_ALL);
+ else
+ err = do_pages_stat(mm, pm);
- putback_lru_pages(&moved); /* Call release pages instead ?? */
+ if (err >= 0)
+ /* Return status information */
+ for (i = 0; i < nr_pages; i++)
+ if (put_user(pm[i].status, status + i))
+ err = -EFAULT;
- if (err >= 0 && list_empty(&newlist) && !list_empty(pagelist))
- goto redo;
out:
- /* Return leftover allocated pages */
- while (!list_empty(&newlist)) {
- page = list_entry(newlist.next, struct page, lru);
- list_del(&page->lru);
- __free_page(page);
- }
- list_splice(&failed, pagelist);
- if (err < 0)
- return err;
-
- /* Calculate number of leftover pages */
- nr_pages = 0;
- list_for_each(p, pagelist)
- nr_pages++;
- return nr_pages;
+ vfree(pm);
+out2:
+ mmput(mm);
+ return err;
}
+#endif
+
diff --git a/mm/mmap.c b/mm/mmap.c
index e6ee12344b13..6446c6134b04 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1065,7 +1065,8 @@ munmap_back:
vma->vm_start = addr;
vma->vm_end = addr + len;
vma->vm_flags = vm_flags;
- vma->vm_page_prot = protection_map[vm_flags & 0x0f];
+ vma->vm_page_prot = protection_map[vm_flags &
+ (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)];
vma->vm_pgoff = pgoff;
if (file) {
@@ -1089,6 +1090,12 @@ munmap_back:
goto free_vma;
}
+ /* Don't make the VMA automatically writable if it's shared, but the
+ * backer wishes to know when pages are first written to */
+ if (vma->vm_ops && vma->vm_ops->page_mkwrite)
+ vma->vm_page_prot =
+ protection_map[vm_flags & (VM_READ|VM_WRITE|VM_EXEC)];
+
/* We set VM_ACCOUNT in a shared mapping's vm_flags, to inform
* shmem_zero_setup (perhaps called through /dev/zero's ->mmap)
* that memory reservation must be checked; but that reservation
@@ -1921,7 +1928,8 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
vma->vm_end = addr + len;
vma->vm_pgoff = pgoff;
vma->vm_flags = flags;
- vma->vm_page_prot = protection_map[flags & 0x0f];
+ vma->vm_page_prot = protection_map[flags &
+ (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)];
vma_link(mm, vma, prev, rb_link, rb_parent);
out:
mm->total_vm += len >> PAGE_SHIFT;
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 4c14d4289b61..638edabaff71 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -19,7 +19,8 @@
#include <linux/mempolicy.h>
#include <linux/personality.h>
#include <linux/syscalls.h>
-
+#include <linux/swap.h>
+#include <linux/swapops.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/cacheflush.h>
@@ -28,12 +29,13 @@
static void change_pte_range(struct mm_struct *mm, pmd_t *pmd,
unsigned long addr, unsigned long end, pgprot_t newprot)
{
- pte_t *pte;
+ pte_t *pte, oldpte;
spinlock_t *ptl;
pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
do {
- if (pte_present(*pte)) {
+ oldpte = *pte;
+ if (pte_present(oldpte)) {
pte_t ptent;
/* Avoid an SMP race with hardware updated dirty/clean
@@ -43,7 +45,22 @@ static void change_pte_range(struct mm_struct *mm, pmd_t *pmd,
ptent = pte_modify(ptep_get_and_clear(mm, addr, pte), newprot);
set_pte_at(mm, addr, pte, ptent);
lazy_mmu_prot_update(ptent);
+#ifdef CONFIG_MIGRATION
+ } else if (!pte_file(oldpte)) {
+ swp_entry_t entry = pte_to_swp_entry(oldpte);
+
+ if (is_write_migration_entry(entry)) {
+ /*
+ * A protection check is difficult so
+ * just be safe and disable write
+ */
+ make_migration_entry_read(&entry);
+ set_pte_at(mm, addr, pte,
+ swp_entry_to_pte(entry));
+ }
+#endif
}
+
} while (pte++, addr += PAGE_SIZE, addr != end);
pte_unmap_unlock(pte - 1, ptl);
}
@@ -106,6 +123,7 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
unsigned long oldflags = vma->vm_flags;
long nrpages = (end - start) >> PAGE_SHIFT;
unsigned long charged = 0;
+ unsigned int mask;
pgprot_t newprot;
pgoff_t pgoff;
int error;
@@ -132,8 +150,6 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
}
}
- newprot = protection_map[newflags & 0xf];
-
/*
* First try to merge with previous and/or next vma.
*/
@@ -160,6 +176,14 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
}
success:
+ /* Don't make the VMA automatically writable if it's shared, but the
+ * backer wishes to know when pages are first written to */
+ mask = VM_READ|VM_WRITE|VM_EXEC|VM_SHARED;
+ if (vma->vm_ops && vma->vm_ops->page_mkwrite)
+ mask &= ~VM_SHARED;
+
+ newprot = protection_map[newflags & mask];
+
/*
* vm_flags and vm_page_prot are protected by the mmap_sem
* held in write mode.
@@ -205,8 +229,7 @@ sys_mprotect(unsigned long start, size_t len, unsigned long prot)
/*
* Does the application expect PROT_READ to imply PROT_EXEC:
*/
- if (unlikely((prot & PROT_READ) &&
- (current->personality & READ_IMPLIES_EXEC)))
+ if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
prot |= PROT_EXEC;
vm_flags = calc_vm_prot_bits(prot);
diff --git a/mm/msync.c b/mm/msync.c
index bc6c95376366..d083544df21b 100644
--- a/mm/msync.c
+++ b/mm/msync.c
@@ -170,8 +170,6 @@ asmlinkage long sys_msync(unsigned long start, size_t len, int flags)
* just ignore them, but return -ENOMEM at the end.
*/
down_read(&current->mm->mmap_sem);
- if (flags & MS_SYNC)
- current->flags |= PF_SYNCWRITE;
vma = find_vma(current->mm, start);
if (!vma) {
error = -ENOMEM;
@@ -228,7 +226,6 @@ asmlinkage long sys_msync(unsigned long start, size_t len, int flags)
}
} while (vma && !done);
out_unlock:
- current->flags &= ~PF_SYNCWRITE;
up_read(&current->mm->mmap_sem);
out:
return error;
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 042e6436c3ee..d46ed0f1dc06 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -22,10 +22,11 @@
#include <linux/jiffies.h>
#include <linux/cpuset.h>
+int sysctl_panic_on_oom;
/* #define DEBUG */
/**
- * oom_badness - calculate a numeric value for how bad this task has been
+ * badness - calculate a numeric value for how bad this task has been
* @p: task struct of which task we should calculate
* @uptime: current uptime in seconds
*
@@ -200,7 +201,7 @@ static struct task_struct *select_bad_process(unsigned long *ppoints)
continue;
/*
- * This is in the process of releasing memory so for wait it
+ * This is in the process of releasing memory so wait for it
* to finish before killing some other task by mistake.
*/
releasing = test_tsk_thread_flag(p, TIF_MEMDIE) ||
@@ -306,7 +307,7 @@ static int oom_kill_process(struct task_struct *p, unsigned long points,
}
/**
- * oom_kill - kill the "best" process when we run out of memory
+ * out_of_memory - kill the "best" process when we run out of memory
*
* If we run out of memory, we have the choice between either
* killing a random task (bad), letting the system crash (worse)
@@ -344,6 +345,8 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order)
break;
case CONSTRAINT_NONE:
+ if (sysctl_panic_on_oom)
+ panic("out of memory. panic_on_oom is selected\n");
retry:
/*
* Rambo mode: Shoot down a process and hope it solves whatever
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 75d7f48b79bb..8ccf6f1b1473 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -204,6 +204,7 @@ static void balance_dirty_pages(struct address_space *mapping)
.sync_mode = WB_SYNC_NONE,
.older_than_this = NULL,
.nr_to_write = write_chunk,
+ .range_cyclic = 1,
};
get_dirty_limits(&wbs, &background_thresh,
@@ -331,6 +332,7 @@ static void background_writeout(unsigned long _min_pages)
.older_than_this = NULL,
.nr_to_write = 0,
.nonblocking = 1,
+ .range_cyclic = 1,
};
for ( ; ; ) {
@@ -407,6 +409,7 @@ static void wb_kupdate(unsigned long arg)
.nr_to_write = 0,
.nonblocking = 1,
.for_kupdate = 1,
+ .range_cyclic = 1,
};
sync_supers();
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 253a450c400d..423db0db7c02 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -37,6 +37,7 @@
#include <linux/nodemask.h>
#include <linux/vmalloc.h>
#include <linux/mempolicy.h>
+#include <linux/stop_machine.h>
#include <asm/tlbflush.h>
#include <asm/div64.h>
@@ -83,8 +84,8 @@ EXPORT_SYMBOL(zone_table);
static char *zone_names[MAX_NR_ZONES] = { "DMA", "DMA32", "Normal", "HighMem" };
int min_free_kbytes = 1024;
-unsigned long __initdata nr_kernel_pages;
-unsigned long __initdata nr_all_pages;
+unsigned long __meminitdata nr_kernel_pages;
+unsigned long __meminitdata nr_all_pages;
#ifdef CONFIG_DEBUG_VM
static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
@@ -286,22 +287,27 @@ __find_combined_index(unsigned long page_idx, unsigned int order)
* we can do coalesce a page and its buddy if
* (a) the buddy is not in a hole &&
* (b) the buddy is in the buddy system &&
- * (c) a page and its buddy have the same order.
+ * (c) a page and its buddy have the same order &&
+ * (d) a page and its buddy are in the same zone.
*
* For recording whether a page is in the buddy system, we use PG_buddy.
* Setting, clearing, and testing PG_buddy is serialized by zone->lock.
*
* For recording page's order, we use page_private(page).
*/
-static inline int page_is_buddy(struct page *page, int order)
+static inline int page_is_buddy(struct page *page, struct page *buddy,
+ int order)
{
#ifdef CONFIG_HOLES_IN_ZONE
- if (!pfn_valid(page_to_pfn(page)))
+ if (!pfn_valid(page_to_pfn(buddy)))
return 0;
#endif
- if (PageBuddy(page) && page_order(page) == order) {
- BUG_ON(page_count(page) != 0);
+ if (page_zone_id(page) != page_zone_id(buddy))
+ return 0;
+
+ if (PageBuddy(buddy) && page_order(buddy) == order) {
+ BUG_ON(page_count(buddy) != 0);
return 1;
}
return 0;
@@ -352,7 +358,7 @@ static inline void __free_one_page(struct page *page,
struct page *buddy;
buddy = __page_find_buddy(page, page_idx, order);
- if (!page_is_buddy(buddy, order))
+ if (!page_is_buddy(page, buddy, order))
break; /* Move the buddy up one level. */
list_del(&buddy->lru);
@@ -1485,7 +1491,7 @@ void show_free_areas(void)
}
for_each_zone(zone) {
- unsigned long nr, flags, order, total = 0;
+ unsigned long nr[MAX_ORDER], flags, order, total = 0;
show_node(zone);
printk("%s: ", zone->name);
@@ -1496,11 +1502,12 @@ void show_free_areas(void)
spin_lock_irqsave(&zone->lock, flags);
for (order = 0; order < MAX_ORDER; order++) {
- nr = zone->free_area[order].nr_free;
- total += nr << order;
- printk("%lu*%lukB ", nr, K(1UL) << order);
+ nr[order] = zone->free_area[order].nr_free;
+ total += nr[order] << order;
}
spin_unlock_irqrestore(&zone->lock, flags);
+ for (order = 0; order < MAX_ORDER; order++)
+ printk("%lu*%lukB ", nr[order], K(1UL) << order);
printk("= %lukB\n", K(total));
}
@@ -1512,7 +1519,7 @@ void show_free_areas(void)
*
* Add all populated zones of a node to the zonelist.
*/
-static int __init build_zonelists_node(pg_data_t *pgdat,
+static int __meminit build_zonelists_node(pg_data_t *pgdat,
struct zonelist *zonelist, int nr_zones, int zone_type)
{
struct zone *zone;
@@ -1548,7 +1555,7 @@ static inline int highest_zone(int zone_bits)
#ifdef CONFIG_NUMA
#define MAX_NODE_LOAD (num_online_nodes())
-static int __initdata node_load[MAX_NUMNODES];
+static int __meminitdata node_load[MAX_NUMNODES];
/**
* find_next_best_node - find the next node that should appear in a given node's fallback list
* @node: node whose fallback list we're appending
@@ -1563,7 +1570,7 @@ static int __initdata node_load[MAX_NUMNODES];
* on them otherwise.
* It returns -1 if no node is found.
*/
-static int __init find_next_best_node(int node, nodemask_t *used_node_mask)
+static int __meminit find_next_best_node(int node, nodemask_t *used_node_mask)
{
int n, val;
int min_val = INT_MAX;
@@ -1609,7 +1616,7 @@ static int __init find_next_best_node(int node, nodemask_t *used_node_mask)
return best_node;
}
-static void __init build_zonelists(pg_data_t *pgdat)
+static void __meminit build_zonelists(pg_data_t *pgdat)
{
int i, j, k, node, local_node;
int prev_node, load;
@@ -1661,7 +1668,7 @@ static void __init build_zonelists(pg_data_t *pgdat)
#else /* CONFIG_NUMA */
-static void __init build_zonelists(pg_data_t *pgdat)
+static void __meminit build_zonelists(pg_data_t *pgdat)
{
int i, j, k, node, local_node;
@@ -1699,14 +1706,29 @@ static void __init build_zonelists(pg_data_t *pgdat)
#endif /* CONFIG_NUMA */
-void __init build_all_zonelists(void)
+/* return values int ....just for stop_machine_run() */
+static int __meminit __build_all_zonelists(void *dummy)
{
- int i;
+ int nid;
+ for_each_online_node(nid)
+ build_zonelists(NODE_DATA(nid));
+ return 0;
+}
- for_each_online_node(i)
- build_zonelists(NODE_DATA(i));
- printk("Built %i zonelists\n", num_online_nodes());
- cpuset_init_current_mems_allowed();
+void __meminit build_all_zonelists(void)
+{
+ if (system_state == SYSTEM_BOOTING) {
+ __build_all_zonelists(0);
+ cpuset_init_current_mems_allowed();
+ } else {
+ /* we have to stop all cpus to guaranntee there is no user
+ of zonelist */
+ stop_machine_run(__build_all_zonelists, NULL, NR_CPUS);
+ /* cpuset refresh routine should be here */
+ }
+ vm_total_pages = nr_free_pagecache_pages();
+ printk("Built %i zonelists. Total pages: %ld\n",
+ num_online_nodes(), vm_total_pages);
}
/*
@@ -1722,7 +1744,8 @@ void __init build_all_zonelists(void)
*/
#define PAGES_PER_WAITQUEUE 256
-static inline unsigned long wait_table_size(unsigned long pages)
+#ifndef CONFIG_MEMORY_HOTPLUG
+static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
{
unsigned long size = 1;
@@ -1740,6 +1763,29 @@ static inline unsigned long wait_table_size(unsigned long pages)
return max(size, 4UL);
}
+#else
+/*
+ * A zone's size might be changed by hot-add, so it is not possible to determine
+ * a suitable size for its wait_table. So we use the maximum size now.
+ *
+ * The max wait table size = 4096 x sizeof(wait_queue_head_t). ie:
+ *
+ * i386 (preemption config) : 4096 x 16 = 64Kbyte.
+ * ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte.
+ * ia64, x86-64 (preemption) : 4096 x 24 = 96Kbyte.
+ *
+ * The maximum entries are prepared when a zone's memory is (512K + 256) pages
+ * or more by the traditional way. (See above). It equals:
+ *
+ * i386, x86-64, powerpc(4K page size) : = ( 2G + 1M)byte.
+ * ia64(16K page size) : = ( 8G + 4M)byte.
+ * powerpc (64K page size) : = (32G +16M)byte.
+ */
+static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
+{
+ return 4096UL;
+}
+#endif
/*
* This is an integer logarithm so that shifts can be used later
@@ -2005,23 +2051,46 @@ void __init setup_per_cpu_pageset(void)
#endif
static __meminit
-void zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
+int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
{
int i;
struct pglist_data *pgdat = zone->zone_pgdat;
+ size_t alloc_size;
/*
* The per-page waitqueue mechanism uses hashed waitqueues
* per zone.
*/
- zone->wait_table_size = wait_table_size(zone_size_pages);
- zone->wait_table_bits = wait_table_bits(zone->wait_table_size);
- zone->wait_table = (wait_queue_head_t *)
- alloc_bootmem_node(pgdat, zone->wait_table_size
- * sizeof(wait_queue_head_t));
+ zone->wait_table_hash_nr_entries =
+ wait_table_hash_nr_entries(zone_size_pages);
+ zone->wait_table_bits =
+ wait_table_bits(zone->wait_table_hash_nr_entries);
+ alloc_size = zone->wait_table_hash_nr_entries
+ * sizeof(wait_queue_head_t);
+
+ if (system_state == SYSTEM_BOOTING) {
+ zone->wait_table = (wait_queue_head_t *)
+ alloc_bootmem_node(pgdat, alloc_size);
+ } else {
+ /*
+ * This case means that a zone whose size was 0 gets new memory
+ * via memory hot-add.
+ * But it may be the case that a new node was hot-added. In
+ * this case vmalloc() will not be able to use this new node's
+ * memory - this wait_table must be initialized to use this new
+ * node itself as well.
+ * To use this new node's memory, further consideration will be
+ * necessary.
+ */
+ zone->wait_table = (wait_queue_head_t *)vmalloc(alloc_size);
+ }
+ if (!zone->wait_table)
+ return -ENOMEM;
- for(i = 0; i < zone->wait_table_size; ++i)
+ for(i = 0; i < zone->wait_table_hash_nr_entries; ++i)
init_waitqueue_head(zone->wait_table + i);
+
+ return 0;
}
static __meminit void zone_pcp_init(struct zone *zone)
@@ -2043,12 +2112,15 @@ static __meminit void zone_pcp_init(struct zone *zone)
zone->name, zone->present_pages, batch);
}
-static __meminit void init_currently_empty_zone(struct zone *zone,
- unsigned long zone_start_pfn, unsigned long size)
+__meminit int init_currently_empty_zone(struct zone *zone,
+ unsigned long zone_start_pfn,
+ unsigned long size)
{
struct pglist_data *pgdat = zone->zone_pgdat;
-
- zone_wait_table_init(zone, size);
+ int ret;
+ ret = zone_wait_table_init(zone, size);
+ if (ret)
+ return ret;
pgdat->nr_zones = zone_idx(zone) + 1;
zone->zone_start_pfn = zone_start_pfn;
@@ -2056,6 +2128,8 @@ static __meminit void init_currently_empty_zone(struct zone *zone,
memmap_init(size, pgdat->node_id, zone_idx(zone), zone_start_pfn);
zone_init_free_lists(pgdat, zone, zone->spanned_pages);
+
+ return 0;
}
/*
@@ -2064,12 +2138,13 @@ static __meminit void init_currently_empty_zone(struct zone *zone,
* - mark all memory queues empty
* - clear the memory bitmaps
*/
-static void __init free_area_init_core(struct pglist_data *pgdat,
+static void __meminit free_area_init_core(struct pglist_data *pgdat,
unsigned long *zones_size, unsigned long *zholes_size)
{
unsigned long j;
int nid = pgdat->node_id;
unsigned long zone_start_pfn = pgdat->node_start_pfn;
+ int ret;
pgdat_resize_init(pgdat);
pgdat->nr_zones = 0;
@@ -2111,7 +2186,8 @@ static void __init free_area_init_core(struct pglist_data *pgdat,
continue;
zonetable_add(zone, nid, j, zone_start_pfn, size);
- init_currently_empty_zone(zone, zone_start_pfn, size);
+ ret = init_currently_empty_zone(zone, zone_start_pfn, size);
+ BUG_ON(ret);
zone_start_pfn += size;
}
}
@@ -2152,7 +2228,7 @@ static void __init alloc_node_mem_map(struct pglist_data *pgdat)
#endif /* CONFIG_FLAT_NODE_MEM_MAP */
}
-void __init free_area_init_node(int nid, struct pglist_data *pgdat,
+void __meminit free_area_init_node(int nid, struct pglist_data *pgdat,
unsigned long *zones_size, unsigned long node_start_pfn,
unsigned long *zholes_size)
{
@@ -2804,42 +2880,14 @@ void *__init alloc_large_system_hash(const char *tablename,
}
#ifdef CONFIG_OUT_OF_LINE_PFN_TO_PAGE
-/*
- * pfn <-> page translation. out-of-line version.
- * (see asm-generic/memory_model.h)
- */
-#if defined(CONFIG_FLATMEM)
struct page *pfn_to_page(unsigned long pfn)
{
- return mem_map + (pfn - ARCH_PFN_OFFSET);
+ return __pfn_to_page(pfn);
}
unsigned long page_to_pfn(struct page *page)
{
- return (page - mem_map) + ARCH_PFN_OFFSET;
-}
-#elif defined(CONFIG_DISCONTIGMEM)
-struct page *pfn_to_page(unsigned long pfn)
-{
- int nid = arch_pfn_to_nid(pfn);
- return NODE_DATA(nid)->node_mem_map + arch_local_page_offset(pfn,nid);
-}
-unsigned long page_to_pfn(struct page *page)
-{
- struct pglist_data *pgdat = NODE_DATA(page_to_nid(page));
- return (page - pgdat->node_mem_map) + pgdat->node_start_pfn;
-}
-#elif defined(CONFIG_SPARSEMEM)
-struct page *pfn_to_page(unsigned long pfn)
-{
- return __section_mem_map_addr(__pfn_to_section(pfn)) + pfn;
-}
-
-unsigned long page_to_pfn(struct page *page)
-{
- long section_id = page_to_section(page);
- return page - __section_mem_map_addr(__nr_to_section(section_id));
+ return __page_to_pfn(page);
}
-#endif /* CONFIG_FLATMEM/DISCONTIGMME/SPARSEMEM */
EXPORT_SYMBOL(pfn_to_page);
EXPORT_SYMBOL(page_to_pfn);
#endif /* CONFIG_OUT_OF_LINE_PFN_TO_PAGE */
diff --git a/mm/pdflush.c b/mm/pdflush.c
index c4b6d0afd736..df7e50b8f70c 100644
--- a/mm/pdflush.c
+++ b/mm/pdflush.c
@@ -202,8 +202,7 @@ int pdflush_operation(void (*fn)(unsigned long), unsigned long arg0)
unsigned long flags;
int ret = 0;
- if (fn == NULL)
- BUG(); /* Hard to diagnose if it's deferred */
+ BUG_ON(fn == NULL); /* Hard to diagnose if it's deferred */
spin_lock_irqsave(&pdflush_lock, flags);
if (list_empty(&pdflush_list)) {
diff --git a/mm/rmap.c b/mm/rmap.c
index 1963e269314d..882a85826bb2 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -103,7 +103,7 @@ int anon_vma_prepare(struct vm_area_struct *vma)
spin_lock(&mm->page_table_lock);
if (likely(!vma->anon_vma)) {
vma->anon_vma = anon_vma;
- list_add(&vma->anon_vma_node, &anon_vma->head);
+ list_add_tail(&vma->anon_vma_node, &anon_vma->head);
allocated = NULL;
}
spin_unlock(&mm->page_table_lock);
@@ -127,7 +127,7 @@ void __anon_vma_link(struct vm_area_struct *vma)
struct anon_vma *anon_vma = vma->anon_vma;
if (anon_vma) {
- list_add(&vma->anon_vma_node, &anon_vma->head);
+ list_add_tail(&vma->anon_vma_node, &anon_vma->head);
validate_anon_vma(vma);
}
}
@@ -138,7 +138,7 @@ void anon_vma_link(struct vm_area_struct *vma)
if (anon_vma) {
spin_lock(&anon_vma->lock);
- list_add(&vma->anon_vma_node, &anon_vma->head);
+ list_add_tail(&vma->anon_vma_node, &anon_vma->head);
validate_anon_vma(vma);
spin_unlock(&anon_vma->lock);
}
@@ -205,44 +205,6 @@ out:
return anon_vma;
}
-#ifdef CONFIG_MIGRATION
-/*
- * Remove an anonymous page from swap replacing the swap pte's
- * through real pte's pointing to valid pages and then releasing
- * the page from the swap cache.
- *
- * Must hold page lock on page and mmap_sem of one vma that contains
- * the page.
- */
-void remove_from_swap(struct page *page)
-{
- struct anon_vma *anon_vma;
- struct vm_area_struct *vma;
- unsigned long mapping;
-
- if (!PageSwapCache(page))
- return;
-
- mapping = (unsigned long)page->mapping;
-
- if (!mapping || (mapping & PAGE_MAPPING_ANON) == 0)
- return;
-
- /*
- * We hold the mmap_sem lock. So no need to call page_lock_anon_vma.
- */
- anon_vma = (struct anon_vma *) (mapping - PAGE_MAPPING_ANON);
- spin_lock(&anon_vma->lock);
-
- list_for_each_entry(vma, &anon_vma->head, anon_vma_node)
- remove_vma_swap(vma, page);
-
- spin_unlock(&anon_vma->lock);
- delete_from_swap_cache(page);
-}
-EXPORT_SYMBOL(remove_from_swap);
-#endif
-
/*
* At what user virtual address is page expected in vma?
*/
@@ -578,7 +540,7 @@ void page_remove_rmap(struct page *page)
* repeatedly from either try_to_unmap_anon or try_to_unmap_file.
*/
static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
- int ignore_refs)
+ int migration)
{
struct mm_struct *mm = vma->vm_mm;
unsigned long address;
@@ -602,7 +564,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
*/
if ((vma->vm_flags & VM_LOCKED) ||
(ptep_clear_flush_young(vma, address, pte)
- && !ignore_refs)) {
+ && !migration)) {
ret = SWAP_FAIL;
goto out_unmap;
}
@@ -620,24 +582,45 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
if (PageAnon(page)) {
swp_entry_t entry = { .val = page_private(page) };
- /*
- * Store the swap location in the pte.
- * See handle_pte_fault() ...
- */
- BUG_ON(!PageSwapCache(page));
- swap_duplicate(entry);
- if (list_empty(&mm->mmlist)) {
- spin_lock(&mmlist_lock);
- if (list_empty(&mm->mmlist))
- list_add(&mm->mmlist, &init_mm.mmlist);
- spin_unlock(&mmlist_lock);
+
+ if (PageSwapCache(page)) {
+ /*
+ * Store the swap location in the pte.
+ * See handle_pte_fault() ...
+ */
+ swap_duplicate(entry);
+ if (list_empty(&mm->mmlist)) {
+ spin_lock(&mmlist_lock);
+ if (list_empty(&mm->mmlist))
+ list_add(&mm->mmlist, &init_mm.mmlist);
+ spin_unlock(&mmlist_lock);
+ }
+ dec_mm_counter(mm, anon_rss);
+#ifdef CONFIG_MIGRATION
+ } else {
+ /*
+ * Store the pfn of the page in a special migration
+ * pte. do_swap_page() will wait until the migration
+ * pte is removed and then restart fault handling.
+ */
+ BUG_ON(!migration);
+ entry = make_migration_entry(page, pte_write(pteval));
+#endif
}
set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
BUG_ON(pte_file(*pte));
- dec_mm_counter(mm, anon_rss);
} else
+#ifdef CONFIG_MIGRATION
+ if (migration) {
+ /* Establish migration entry for a file page */
+ swp_entry_t entry;
+ entry = make_migration_entry(page, pte_write(pteval));
+ set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
+ } else
+#endif
dec_mm_counter(mm, file_rss);
+
page_remove_rmap(page);
page_cache_release(page);
@@ -736,7 +719,7 @@ static void try_to_unmap_cluster(unsigned long cursor,
pte_unmap_unlock(pte - 1, ptl);
}
-static int try_to_unmap_anon(struct page *page, int ignore_refs)
+static int try_to_unmap_anon(struct page *page, int migration)
{
struct anon_vma *anon_vma;
struct vm_area_struct *vma;
@@ -747,7 +730,7 @@ static int try_to_unmap_anon(struct page *page, int ignore_refs)
return ret;
list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
- ret = try_to_unmap_one(page, vma, ignore_refs);
+ ret = try_to_unmap_one(page, vma, migration);
if (ret == SWAP_FAIL || !page_mapped(page))
break;
}
@@ -764,7 +747,7 @@ static int try_to_unmap_anon(struct page *page, int ignore_refs)
*
* This function is only called from try_to_unmap for object-based pages.
*/
-static int try_to_unmap_file(struct page *page, int ignore_refs)
+static int try_to_unmap_file(struct page *page, int migration)
{
struct address_space *mapping = page->mapping;
pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
@@ -778,7 +761,7 @@ static int try_to_unmap_file(struct page *page, int ignore_refs)
spin_lock(&mapping->i_mmap_lock);
vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
- ret = try_to_unmap_one(page, vma, ignore_refs);
+ ret = try_to_unmap_one(page, vma, migration);
if (ret == SWAP_FAIL || !page_mapped(page))
goto out;
}
@@ -863,16 +846,16 @@ out:
* SWAP_AGAIN - we missed a mapping, try again later
* SWAP_FAIL - the page is unswappable
*/
-int try_to_unmap(struct page *page, int ignore_refs)
+int try_to_unmap(struct page *page, int migration)
{
int ret;
BUG_ON(!PageLocked(page));
if (PageAnon(page))
- ret = try_to_unmap_anon(page, ignore_refs);
+ ret = try_to_unmap_anon(page, migration);
else
- ret = try_to_unmap_file(page, ignore_refs);
+ ret = try_to_unmap_file(page, migration);
if (!page_mapped(page))
ret = SWAP_SUCCESS;
diff --git a/mm/shmem.c b/mm/shmem.c
index 1e43c8a865ba..84b5cf9b63c5 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1081,14 +1081,6 @@ repeat:
page_cache_release(swappage);
goto repeat;
}
- if (!PageSwapCache(swappage)) {
- /* Page migration has occured */
- shmem_swp_unmap(entry);
- spin_unlock(&info->lock);
- unlock_page(swappage);
- page_cache_release(swappage);
- goto repeat;
- }
if (PageWriteback(swappage)) {
shmem_swp_unmap(entry);
spin_unlock(&info->lock);
@@ -1654,9 +1646,9 @@ static ssize_t shmem_file_sendfile(struct file *in_file, loff_t *ppos,
return desc.error;
}
-static int shmem_statfs(struct super_block *sb, struct kstatfs *buf)
+static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
{
- struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
+ struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
buf->f_type = TMPFS_MAGIC;
buf->f_bsize = PAGE_CACHE_SIZE;
@@ -2233,10 +2225,10 @@ static struct vm_operations_struct shmem_vm_ops = {
};
-static struct super_block *shmem_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int shmem_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_nodev(fs_type, flags, data, shmem_fill_super);
+ return get_sb_nodev(fs_type, flags, data, shmem_fill_super, mnt);
}
static struct file_system_type tmpfs_fs_type = {
diff --git a/mm/slab.c b/mm/slab.c
index f1b644eb39d8..98ac20bc0de9 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -331,6 +331,8 @@ static __always_inline int index_of(const size_t size)
return 0;
}
+static int slab_early_init = 1;
+
#define INDEX_AC index_of(sizeof(struct arraycache_init))
#define INDEX_L3 index_of(sizeof(struct kmem_list3))
@@ -592,6 +594,7 @@ static inline struct kmem_cache *page_get_cache(struct page *page)
{
if (unlikely(PageCompound(page)))
page = (struct page *)page_private(page);
+ BUG_ON(!PageSlab(page));
return (struct kmem_cache *)page->lru.next;
}
@@ -604,6 +607,7 @@ static inline struct slab *page_get_slab(struct page *page)
{
if (unlikely(PageCompound(page)))
page = (struct page *)page_private(page);
+ BUG_ON(!PageSlab(page));
return (struct slab *)page->lru.prev;
}
@@ -1024,6 +1028,40 @@ static void drain_alien_cache(struct kmem_cache *cachep,
}
}
}
+
+static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
+{
+ struct slab *slabp = virt_to_slab(objp);
+ int nodeid = slabp->nodeid;
+ struct kmem_list3 *l3;
+ struct array_cache *alien = NULL;
+
+ /*
+ * Make sure we are not freeing a object from another node to the array
+ * cache on this cpu.
+ */
+ if (likely(slabp->nodeid == numa_node_id()))
+ return 0;
+
+ l3 = cachep->nodelists[numa_node_id()];
+ STATS_INC_NODEFREES(cachep);
+ if (l3->alien && l3->alien[nodeid]) {
+ alien = l3->alien[nodeid];
+ spin_lock(&alien->lock);
+ if (unlikely(alien->avail == alien->limit)) {
+ STATS_INC_ACOVERFLOW(cachep);
+ __drain_alien_cache(cachep, alien, nodeid);
+ }
+ alien->entry[alien->avail++] = objp;
+ spin_unlock(&alien->lock);
+ } else {
+ spin_lock(&(cachep->nodelists[nodeid])->list_lock);
+ free_block(cachep, &objp, 1, nodeid);
+ spin_unlock(&(cachep->nodelists[nodeid])->list_lock);
+ }
+ return 1;
+}
+
#else
#define drain_alien_cache(cachep, alien) do { } while (0)
@@ -1038,6 +1076,11 @@ static inline void free_alien_cache(struct array_cache **ac_ptr)
{
}
+static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
+{
+ return 0;
+}
+
#endif
static int cpuup_callback(struct notifier_block *nfb,
@@ -1335,6 +1378,8 @@ void __init kmem_cache_init(void)
NULL, NULL);
}
+ slab_early_init = 0;
+
while (sizes->cs_size != ULONG_MAX) {
/*
* For performance, all the general caches are L1 aligned.
@@ -1450,31 +1495,29 @@ __initcall(cpucache_init);
static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
{
struct page *page;
- void *addr;
+ int nr_pages;
int i;
- flags |= cachep->gfpflags;
#ifndef CONFIG_MMU
- /* nommu uses slab's for process anonymous memory allocations, so
- * requires __GFP_COMP to properly refcount higher order allocations"
+ /*
+ * Nommu uses slab's for process anonymous memory allocations, and thus
+ * requires __GFP_COMP to properly refcount higher order allocations
*/
- page = alloc_pages_node(nodeid, (flags | __GFP_COMP), cachep->gfporder);
-#else
- page = alloc_pages_node(nodeid, flags, cachep->gfporder);
+ flags |= __GFP_COMP;
#endif
+ flags |= cachep->gfpflags;
+
+ page = alloc_pages_node(nodeid, flags, cachep->gfporder);
if (!page)
return NULL;
- addr = page_address(page);
- i = (1 << cachep->gfporder);
+ nr_pages = (1 << cachep->gfporder);
if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
- atomic_add(i, &slab_reclaim_pages);
- add_page_state(nr_slab, i);
- while (i--) {
- __SetPageSlab(page);
- page++;
- }
- return addr;
+ atomic_add(nr_pages, &slab_reclaim_pages);
+ add_page_state(nr_slab, nr_pages);
+ for (i = 0; i < nr_pages; i++)
+ __SetPageSlab(page + i);
+ return page_address(page);
}
/*
@@ -1913,8 +1956,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
void (*dtor)(void*, struct kmem_cache *, unsigned long))
{
size_t left_over, slab_size, ralign;
- struct kmem_cache *cachep = NULL;
- struct list_head *p;
+ struct kmem_cache *cachep = NULL, *pc;
/*
* Sanity checks... these are all serious usage bugs.
@@ -1934,8 +1976,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
mutex_lock(&cache_chain_mutex);
- list_for_each(p, &cache_chain) {
- struct kmem_cache *pc = list_entry(p, struct kmem_cache, next);
+ list_for_each_entry(pc, &cache_chain, next) {
mm_segment_t old_fs = get_fs();
char tmp;
int res;
@@ -2069,8 +2110,12 @@ kmem_cache_create (const char *name, size_t size, size_t align,
#endif
#endif
- /* Determine if the slab management is 'on' or 'off' slab. */
- if (size >= (PAGE_SIZE >> 3))
+ /*
+ * Determine if the slab management is 'on' or 'off' slab.
+ * (bootstrapping cannot cope with offslab caches so don't do
+ * it too early on.)
+ */
+ if ((size >= (PAGE_SIZE >> 3)) && !slab_early_init)
/*
* Size is large, assume best to place the slab management obj
* off-slab (should allow better packing of objs).
@@ -2460,23 +2505,28 @@ static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp,
slabp->inuse--;
}
-static void set_slab_attr(struct kmem_cache *cachep, struct slab *slabp,
- void *objp)
+/*
+ * Map pages beginning at addr to the given cache and slab. This is required
+ * for the slab allocator to be able to lookup the cache and slab of a
+ * virtual address for kfree, ksize, kmem_ptr_validate, and slab debugging.
+ */
+static void slab_map_pages(struct kmem_cache *cache, struct slab *slab,
+ void *addr)
{
- int i;
+ int nr_pages;
struct page *page;
- /* Nasty!!!!!! I hope this is OK. */
- page = virt_to_page(objp);
+ page = virt_to_page(addr);
- i = 1;
+ nr_pages = 1;
if (likely(!PageCompound(page)))
- i <<= cachep->gfporder;
+ nr_pages <<= cache->gfporder;
+
do {
- page_set_cache(page, cachep);
- page_set_slab(page, slabp);
+ page_set_cache(page, cache);
+ page_set_slab(page, slab);
page++;
- } while (--i);
+ } while (--nr_pages);
}
/*
@@ -2548,7 +2598,7 @@ static int cache_grow(struct kmem_cache *cachep, gfp_t flags, int nodeid)
goto opps1;
slabp->nodeid = nodeid;
- set_slab_attr(cachep, slabp, objp);
+ slab_map_pages(cachep, slabp, objp);
cache_init_objs(cachep, slabp, ctor_flags);
@@ -2596,6 +2646,28 @@ static void kfree_debugcheck(const void *objp)
}
}
+static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
+{
+ unsigned long redzone1, redzone2;
+
+ redzone1 = *dbg_redzone1(cache, obj);
+ redzone2 = *dbg_redzone2(cache, obj);
+
+ /*
+ * Redzone is ok.
+ */
+ if (redzone1 == RED_ACTIVE && redzone2 == RED_ACTIVE)
+ return;
+
+ if (redzone1 == RED_INACTIVE && redzone2 == RED_INACTIVE)
+ slab_error(cache, "double free detected");
+ else
+ slab_error(cache, "memory outside object was overwritten");
+
+ printk(KERN_ERR "%p: redzone 1:0x%lx, redzone 2:0x%lx.\n",
+ obj, redzone1, redzone2);
+}
+
static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
void *caller)
{
@@ -2607,27 +2679,10 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
kfree_debugcheck(objp);
page = virt_to_page(objp);
- if (page_get_cache(page) != cachep) {
- printk(KERN_ERR "mismatch in kmem_cache_free: expected "
- "cache %p, got %p\n",
- page_get_cache(page), cachep);
- printk(KERN_ERR "%p is %s.\n", cachep, cachep->name);
- printk(KERN_ERR "%p is %s.\n", page_get_cache(page),
- page_get_cache(page)->name);
- WARN_ON(1);
- }
slabp = page_get_slab(page);
if (cachep->flags & SLAB_RED_ZONE) {
- if (*dbg_redzone1(cachep, objp) != RED_ACTIVE ||
- *dbg_redzone2(cachep, objp) != RED_ACTIVE) {
- slab_error(cachep, "double free, or memory outside"
- " object was overwritten");
- printk(KERN_ERR "%p: redzone 1:0x%lx, "
- "redzone 2:0x%lx.\n",
- objp, *dbg_redzone1(cachep, objp),
- *dbg_redzone2(cachep, objp));
- }
+ verify_redzone_free(cachep, objp);
*dbg_redzone1(cachep, objp) = RED_INACTIVE;
*dbg_redzone2(cachep, objp) = RED_INACTIVE;
}
@@ -3087,41 +3142,9 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp)
check_irq_off();
objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));
- /* Make sure we are not freeing a object from another
- * node to the array cache on this cpu.
- */
-#ifdef CONFIG_NUMA
- {
- struct slab *slabp;
- slabp = virt_to_slab(objp);
- if (unlikely(slabp->nodeid != numa_node_id())) {
- struct array_cache *alien = NULL;
- int nodeid = slabp->nodeid;
- struct kmem_list3 *l3;
-
- l3 = cachep->nodelists[numa_node_id()];
- STATS_INC_NODEFREES(cachep);
- if (l3->alien && l3->alien[nodeid]) {
- alien = l3->alien[nodeid];
- spin_lock(&alien->lock);
- if (unlikely(alien->avail == alien->limit)) {
- STATS_INC_ACOVERFLOW(cachep);
- __drain_alien_cache(cachep,
- alien, nodeid);
- }
- alien->entry[alien->avail++] = objp;
- spin_unlock(&alien->lock);
- } else {
- spin_lock(&(cachep->nodelists[nodeid])->
- list_lock);
- free_block(cachep, &objp, 1, nodeid);
- spin_unlock(&(cachep->nodelists[nodeid])->
- list_lock);
- }
- return;
- }
- }
-#endif
+ if (cache_free_alien(cachep, objp))
+ return;
+
if (likely(ac->avail < ac->limit)) {
STATS_INC_FREEHIT(cachep);
ac->entry[ac->avail++] = objp;
@@ -3254,26 +3277,10 @@ EXPORT_SYMBOL(kmalloc_node);
#endif
/**
- * kmalloc - allocate memory
+ * __do_kmalloc - allocate memory
* @size: how many bytes of memory are required.
- * @flags: the type of memory to allocate.
+ * @flags: the type of memory to allocate (see kmalloc).
* @caller: function caller for debug tracking of the caller
- *
- * kmalloc is the normal method of allocating memory
- * in the kernel.
- *
- * The @flags argument may be one of:
- *
- * %GFP_USER - Allocate memory on behalf of user. May sleep.
- *
- * %GFP_KERNEL - Allocate normal kernel ram. May sleep.
- *
- * %GFP_ATOMIC - Allocation will not sleep. Use inside interrupt handlers.
- *
- * Additionally, the %GFP_DMA flag may be set to indicate the memory
- * must be suitable for DMA. This can mean different things on different
- * platforms. For example, on i386, it means that the memory must come
- * from the first 16MB.
*/
static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
void *caller)
@@ -3371,6 +3378,8 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
{
unsigned long flags;
+ BUG_ON(virt_to_cache(objp) != cachep);
+
local_irq_save(flags);
__cache_free(cachep, objp);
local_irq_restore(flags);
@@ -3680,7 +3689,7 @@ void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
*/
static void cache_reap(void *unused)
{
- struct list_head *walk;
+ struct kmem_cache *searchp;
struct kmem_list3 *l3;
int node = numa_node_id();
@@ -3691,13 +3700,11 @@ static void cache_reap(void *unused)
return;
}
- list_for_each(walk, &cache_chain) {
- struct kmem_cache *searchp;
+ list_for_each_entry(searchp, &cache_chain, next) {
struct list_head *p;
int tofree;
struct slab *slabp;
- searchp = list_entry(walk, struct kmem_cache, next);
check_irq_on();
/*
@@ -3825,7 +3832,6 @@ static void s_stop(struct seq_file *m, void *p)
static int s_show(struct seq_file *m, void *p)
{
struct kmem_cache *cachep = p;
- struct list_head *q;
struct slab *slabp;
unsigned long active_objs;
unsigned long num_objs;
@@ -3846,15 +3852,13 @@ static int s_show(struct seq_file *m, void *p)
check_irq_on();
spin_lock_irq(&l3->list_lock);
- list_for_each(q, &l3->slabs_full) {
- slabp = list_entry(q, struct slab, list);
+ list_for_each_entry(slabp, &l3->slabs_full, list) {
if (slabp->inuse != cachep->num && !error)
error = "slabs_full accounting error";
active_objs += cachep->num;
active_slabs++;
}
- list_for_each(q, &l3->slabs_partial) {
- slabp = list_entry(q, struct slab, list);
+ list_for_each_entry(slabp, &l3->slabs_partial, list) {
if (slabp->inuse == cachep->num && !error)
error = "slabs_partial inuse accounting error";
if (!slabp->inuse && !error)
@@ -3862,8 +3866,7 @@ static int s_show(struct seq_file *m, void *p)
active_objs += slabp->inuse;
active_slabs++;
}
- list_for_each(q, &l3->slabs_free) {
- slabp = list_entry(q, struct slab, list);
+ list_for_each_entry(slabp, &l3->slabs_free, list) {
if (slabp->inuse && !error)
error = "slabs_free/inuse accounting error";
num_slabs++;
@@ -3956,7 +3959,7 @@ ssize_t slabinfo_write(struct file *file, const char __user * buffer,
{
char kbuf[MAX_SLABINFO_WRITE + 1], *tmp;
int limit, batchcount, shared, res;
- struct list_head *p;
+ struct kmem_cache *cachep;
if (count > MAX_SLABINFO_WRITE)
return -EINVAL;
@@ -3975,10 +3978,7 @@ ssize_t slabinfo_write(struct file *file, const char __user * buffer,
/* Find the cache in the chain of caches. */
mutex_lock(&cache_chain_mutex);
res = -EINVAL;
- list_for_each(p, &cache_chain) {
- struct kmem_cache *cachep;
-
- cachep = list_entry(p, struct kmem_cache, next);
+ list_for_each_entry(cachep, &cache_chain, next) {
if (!strcmp(cachep->name, kbuf)) {
if (limit < 1 || batchcount < 1 ||
batchcount > limit || shared < 0) {
@@ -4080,7 +4080,6 @@ static void show_symbol(struct seq_file *m, unsigned long address)
static int leaks_show(struct seq_file *m, void *p)
{
struct kmem_cache *cachep = p;
- struct list_head *q;
struct slab *slabp;
struct kmem_list3 *l3;
const char *name;
@@ -4105,14 +4104,10 @@ static int leaks_show(struct seq_file *m, void *p)
check_irq_on();
spin_lock_irq(&l3->list_lock);
- list_for_each(q, &l3->slabs_full) {
- slabp = list_entry(q, struct slab, list);
+ list_for_each_entry(slabp, &l3->slabs_full, list)
handle_slab(n, cachep, slabp);
- }
- list_for_each(q, &l3->slabs_partial) {
- slabp = list_entry(q, struct slab, list);
+ list_for_each_entry(slabp, &l3->slabs_partial, list)
handle_slab(n, cachep, slabp);
- }
spin_unlock_irq(&l3->list_lock);
}
name = cachep->name;
diff --git a/mm/sparse.c b/mm/sparse.c
index 100040c0dfb6..e0a3fe48aa37 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -99,6 +99,22 @@ int __section_nr(struct mem_section* ms)
return (root_nr * SECTIONS_PER_ROOT) + (ms - root);
}
+/*
+ * During early boot, before section_mem_map is used for an actual
+ * mem_map, we use section_mem_map to store the section's NUMA
+ * node. This keeps us from having to use another data structure. The
+ * node information is cleared just before we store the real mem_map.
+ */
+static inline unsigned long sparse_encode_early_nid(int nid)
+{
+ return (nid << SECTION_NID_SHIFT);
+}
+
+static inline int sparse_early_nid(struct mem_section *section)
+{
+ return (section->section_mem_map >> SECTION_NID_SHIFT);
+}
+
/* Record a memory area against a node. */
void memory_present(int nid, unsigned long start, unsigned long end)
{
@@ -113,7 +129,8 @@ void memory_present(int nid, unsigned long start, unsigned long end)
ms = __nr_to_section(section);
if (!ms->section_mem_map)
- ms->section_mem_map = SECTION_MARKED_PRESENT;
+ ms->section_mem_map = sparse_encode_early_nid(nid) |
+ SECTION_MARKED_PRESENT;
}
}
@@ -164,6 +181,7 @@ static int sparse_init_one_section(struct mem_section *ms,
if (!valid_section(ms))
return -EINVAL;
+ ms->section_mem_map &= ~SECTION_MAP_MASK;
ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum);
return 1;
@@ -172,8 +190,8 @@ static int sparse_init_one_section(struct mem_section *ms,
static struct page *sparse_early_mem_map_alloc(unsigned long pnum)
{
struct page *map;
- int nid = early_pfn_to_nid(section_nr_to_pfn(pnum));
struct mem_section *ms = __nr_to_section(pnum);
+ int nid = sparse_early_nid(ms);
map = alloc_remap(nid, sizeof(struct page) * PAGES_PER_SECTION);
if (map)
diff --git a/mm/swap.c b/mm/swap.c
index 88895c249bc9..03ae2076f92f 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -480,48 +480,6 @@ static int cpu_swap_callback(struct notifier_block *nfb,
#endif /* CONFIG_HOTPLUG_CPU */
#endif /* CONFIG_SMP */
-#ifdef CONFIG_SMP
-void percpu_counter_mod(struct percpu_counter *fbc, long amount)
-{
- long count;
- long *pcount;
- int cpu = get_cpu();
-
- pcount = per_cpu_ptr(fbc->counters, cpu);
- count = *pcount + amount;
- if (count >= FBC_BATCH || count <= -FBC_BATCH) {
- spin_lock(&fbc->lock);
- fbc->count += count;
- *pcount = 0;
- spin_unlock(&fbc->lock);
- } else {
- *pcount = count;
- }
- put_cpu();
-}
-EXPORT_SYMBOL(percpu_counter_mod);
-
-/*
- * Add up all the per-cpu counts, return the result. This is a more accurate
- * but much slower version of percpu_counter_read_positive()
- */
-long percpu_counter_sum(struct percpu_counter *fbc)
-{
- long ret;
- int cpu;
-
- spin_lock(&fbc->lock);
- ret = fbc->count;
- for_each_possible_cpu(cpu) {
- long *pcount = per_cpu_ptr(fbc->counters, cpu);
- ret += *pcount;
- }
- spin_unlock(&fbc->lock);
- return ret < 0 ? 0 : ret;
-}
-EXPORT_SYMBOL(percpu_counter_sum);
-#endif
-
/*
* Perform any setup for the swap system
*/
diff --git a/mm/swapfile.c b/mm/swapfile.c
index e5fd5385f0cc..cc367f7e75d8 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -395,6 +395,9 @@ void free_swap_and_cache(swp_entry_t entry)
struct swap_info_struct * p;
struct page *page = NULL;
+ if (is_migration_entry(entry))
+ return;
+
p = swap_info_get(entry);
if (p) {
if (swap_entry_free(p, swp_offset(entry)) == 1) {
@@ -615,15 +618,6 @@ static int unuse_mm(struct mm_struct *mm,
return 0;
}
-#ifdef CONFIG_MIGRATION
-int remove_vma_swap(struct vm_area_struct *vma, struct page *page)
-{
- swp_entry_t entry = { .val = page_private(page) };
-
- return unuse_vma(vma, entry, page);
-}
-#endif
-
/*
* Scan swap_map from current position to next entry still in use.
* Recycle to start on reaching the end, returning 0 when empty.
@@ -716,7 +710,6 @@ static int try_to_unuse(unsigned int type)
*/
swap_map = &si->swap_map[i];
entry = swp_entry(type, i);
-again:
page = read_swap_cache_async(entry, NULL, 0);
if (!page) {
/*
@@ -751,12 +744,6 @@ again:
wait_on_page_locked(page);
wait_on_page_writeback(page);
lock_page(page);
- if (!PageSwapCache(page)) {
- /* Page migration has occured */
- unlock_page(page);
- page_cache_release(page);
- goto again;
- }
wait_on_page_writeback(page);
/*
@@ -785,10 +772,8 @@ again:
while (*swap_map > 1 && !retval &&
(p = p->next) != &start_mm->mmlist) {
mm = list_entry(p, struct mm_struct, mmlist);
- if (atomic_inc_return(&mm->mm_users) == 1) {
- atomic_dec(&mm->mm_users);
+ if (!atomic_inc_not_zero(&mm->mm_users))
continue;
- }
spin_unlock(&mmlist_lock);
mmput(prev_mm);
prev_mm = mm;
@@ -1407,19 +1392,7 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags)
if (!(p->flags & SWP_USED))
break;
error = -EPERM;
- /*
- * Test if adding another swap device is possible. There are
- * two limiting factors: 1) the number of bits for the swap
- * type swp_entry_t definition and 2) the number of bits for
- * the swap type in the swap ptes as defined by the different
- * architectures. To honor both limitations a swap entry
- * with swap offset 0 and swap type ~0UL is created, encoded
- * to a swap pte, decoded to a swp_entry_t again and finally
- * the swap type part is extracted. This will mask all bits
- * from the initial ~0UL that can't be encoded in either the
- * swp_entry_t or the architecture definition of a swap pte.
- */
- if (type > swp_type(pte_to_swp_entry(swp_entry_to_pte(swp_entry(~0UL,0))))) {
+ if (type >= MAX_SWAPFILES) {
spin_unlock(&swap_lock);
goto out;
}
@@ -1504,8 +1477,7 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags)
error = -EINVAL;
goto bad_swap;
}
- page = read_cache_page(mapping, 0,
- (filler_t *)mapping->a_ops->readpage, swap_file);
+ page = read_mapping_page(mapping, 0, swap_file);
if (IS_ERR(page)) {
error = PTR_ERR(page);
goto bad_swap;
@@ -1709,6 +1681,9 @@ int swap_duplicate(swp_entry_t entry)
unsigned long offset, type;
int result = 0;
+ if (is_migration_entry(entry))
+ return 1;
+
type = swp_type(entry);
if (type >= nr_swapfiles)
goto bad_file;
diff --git a/mm/truncate.c b/mm/truncate.c
index 6cb3fff25f67..cf1b015df4a7 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -230,14 +230,24 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping,
pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
for (i = 0; i < pagevec_count(&pvec); i++) {
struct page *page = pvec.pages[i];
+ pgoff_t index;
+ int lock_failed;
- if (TestSetPageLocked(page)) {
- next++;
- continue;
- }
- if (page->index > next)
- next = page->index;
+ lock_failed = TestSetPageLocked(page);
+
+ /*
+ * We really shouldn't be looking at the ->index of an
+ * unlocked page. But we're not allowed to lock these
+ * pages. So we rely upon nobody altering the ->index
+ * of this (pinned-by-us) page.
+ */
+ index = page->index;
+ if (index > next)
+ next = index;
next++;
+ if (lock_failed)
+ continue;
+
if (PageDirty(page) || PageWriteback(page))
goto unlock;
if (page_mapped(page))
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index c0504f1e34eb..35f8553f893a 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -257,6 +257,19 @@ struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags, int
}
/* Caller must hold vmlist_lock */
+static struct vm_struct *__find_vm_area(void *addr)
+{
+ struct vm_struct *tmp;
+
+ for (tmp = vmlist; tmp != NULL; tmp = tmp->next) {
+ if (tmp->addr == addr)
+ break;
+ }
+
+ return tmp;
+}
+
+/* Caller must hold vmlist_lock */
struct vm_struct *__remove_vm_area(void *addr)
{
struct vm_struct **p, *tmp;
@@ -498,11 +511,33 @@ EXPORT_SYMBOL(__vmalloc);
*/
void *vmalloc(unsigned long size)
{
- return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
+ return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
}
EXPORT_SYMBOL(vmalloc);
/**
+ * vmalloc_user - allocate virtually contiguous memory which has
+ * been zeroed so it can be mapped to userspace without
+ * leaking data.
+ *
+ * @size: allocation size
+ */
+void *vmalloc_user(unsigned long size)
+{
+ struct vm_struct *area;
+ void *ret;
+
+ ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
+ write_lock(&vmlist_lock);
+ area = __find_vm_area(ret);
+ area->flags |= VM_USERMAP;
+ write_unlock(&vmlist_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(vmalloc_user);
+
+/**
* vmalloc_node - allocate memory on a specific node
*
* @size: allocation size
@@ -516,7 +551,7 @@ EXPORT_SYMBOL(vmalloc);
*/
void *vmalloc_node(unsigned long size, int node)
{
- return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, node);
+ return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, node);
}
EXPORT_SYMBOL(vmalloc_node);
@@ -556,6 +591,28 @@ void *vmalloc_32(unsigned long size)
}
EXPORT_SYMBOL(vmalloc_32);
+/**
+ * vmalloc_32_user - allocate virtually contiguous memory (32bit
+ * addressable) which is zeroed so it can be
+ * mapped to userspace without leaking data.
+ *
+ * @size: allocation size
+ */
+void *vmalloc_32_user(unsigned long size)
+{
+ struct vm_struct *area;
+ void *ret;
+
+ ret = __vmalloc(size, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL);
+ write_lock(&vmlist_lock);
+ area = __find_vm_area(ret);
+ area->flags |= VM_USERMAP;
+ write_unlock(&vmlist_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(vmalloc_32_user);
+
long vread(char *buf, char *addr, unsigned long count)
{
struct vm_struct *tmp;
@@ -630,3 +687,64 @@ finished:
read_unlock(&vmlist_lock);
return buf - buf_start;
}
+
+/**
+ * remap_vmalloc_range - map vmalloc pages to userspace
+ *
+ * @vma: vma to cover (map full range of vma)
+ * @addr: vmalloc memory
+ * @pgoff: number of pages into addr before first page to map
+ * @returns: 0 for success, -Exxx on failure
+ *
+ * This function checks that addr is a valid vmalloc'ed area, and
+ * that it is big enough to cover the vma. Will return failure if
+ * that criteria isn't met.
+ *
+ * Similar to remap_pfn_range (see mm/memory.c)
+ */
+int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
+ unsigned long pgoff)
+{
+ struct vm_struct *area;
+ unsigned long uaddr = vma->vm_start;
+ unsigned long usize = vma->vm_end - vma->vm_start;
+ int ret;
+
+ if ((PAGE_SIZE-1) & (unsigned long)addr)
+ return -EINVAL;
+
+ read_lock(&vmlist_lock);
+ area = __find_vm_area(addr);
+ if (!area)
+ goto out_einval_locked;
+
+ if (!(area->flags & VM_USERMAP))
+ goto out_einval_locked;
+
+ if (usize + (pgoff << PAGE_SHIFT) > area->size - PAGE_SIZE)
+ goto out_einval_locked;
+ read_unlock(&vmlist_lock);
+
+ addr += pgoff << PAGE_SHIFT;
+ do {
+ struct page *page = vmalloc_to_page(addr);
+ ret = vm_insert_page(vma, uaddr, page);
+ if (ret)
+ return ret;
+
+ uaddr += PAGE_SIZE;
+ addr += PAGE_SIZE;
+ usize -= PAGE_SIZE;
+ } while (usize > 0);
+
+ /* Prevent "things" like memory migration? VM_flags need a cleanup... */
+ vma->vm_flags |= VM_RESERVED;
+
+ return ret;
+
+out_einval_locked:
+ read_unlock(&vmlist_lock);
+ return -EINVAL;
+}
+EXPORT_SYMBOL(remap_vmalloc_range);
+
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 440a733fe2e9..72babac71dea 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -61,6 +61,8 @@ struct scan_control {
* In this context, it doesn't matter that we scan the
* whole list at once. */
int swap_cluster_max;
+
+ int swappiness;
};
/*
@@ -108,7 +110,7 @@ struct shrinker {
* From 0 .. 100. Higher means more swappy.
*/
int vm_swappiness = 60;
-static long total_memory;
+long vm_total_pages; /* The total number of pages which the VM controls */
static LIST_HEAD(shrinker_list);
static DECLARE_RWSEM(shrinker_rwsem);
@@ -288,11 +290,23 @@ static void handle_write_error(struct address_space *mapping,
unlock_page(page);
}
+/* possible outcome of pageout() */
+typedef enum {
+ /* failed to write page out, page is locked */
+ PAGE_KEEP,
+ /* move page to the active list, page is locked */
+ PAGE_ACTIVATE,
+ /* page has been sent to the disk successfully, page is unlocked */
+ PAGE_SUCCESS,
+ /* page is clean and locked */
+ PAGE_CLEAN,
+} pageout_t;
+
/*
* pageout is called by shrink_page_list() for each dirty page.
* Calls ->writepage().
*/
-pageout_t pageout(struct page *page, struct address_space *mapping)
+static pageout_t pageout(struct page *page, struct address_space *mapping)
{
/*
* If the page is dirty, only perform writeback if that write
@@ -337,6 +351,8 @@ pageout_t pageout(struct page *page, struct address_space *mapping)
struct writeback_control wbc = {
.sync_mode = WB_SYNC_NONE,
.nr_to_write = SWAP_CLUSTER_MAX,
+ .range_start = 0,
+ .range_end = LLONG_MAX,
.nonblocking = 1,
.for_reclaim = 1,
};
@@ -727,7 +743,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
* how much memory
* is mapped.
*/
- mapped_ratio = (sc->nr_mapped * 100) / total_memory;
+ mapped_ratio = (sc->nr_mapped * 100) / vm_total_pages;
/*
* Now decide how much we really want to unmap some pages. The
@@ -741,7 +757,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
* A 100% value of vm_swappiness overrides this algorithm
* altogether.
*/
- swap_tendency = mapped_ratio / 2 + distress + vm_swappiness;
+ swap_tendency = mapped_ratio / 2 + distress + sc->swappiness;
/*
* Now use this metric to decide whether to start moving mapped
@@ -957,6 +973,7 @@ unsigned long try_to_free_pages(struct zone **zones, gfp_t gfp_mask)
.may_writepage = !laptop_mode,
.swap_cluster_max = SWAP_CLUSTER_MAX,
.may_swap = 1,
+ .swappiness = vm_swappiness,
};
inc_page_state(allocstall);
@@ -1021,10 +1038,6 @@ out:
* For kswapd, balance_pgdat() will work across all this node's zones until
* they are all at pages_high.
*
- * If `nr_pages' is non-zero then it is the number of pages which are to be
- * reclaimed, regardless of the zone occupancies. This is a software suspend
- * special.
- *
* Returns the number of pages which were actually freed.
*
* There is special handling here for zones which are full of pinned pages.
@@ -1042,10 +1055,8 @@ out:
* the page allocator fallback scheme to ensure that aging of pages is balanced
* across the zones.
*/
-static unsigned long balance_pgdat(pg_data_t *pgdat, unsigned long nr_pages,
- int order)
+static unsigned long balance_pgdat(pg_data_t *pgdat, int order)
{
- unsigned long to_free = nr_pages;
int all_zones_ok;
int priority;
int i;
@@ -1055,7 +1066,8 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, unsigned long nr_pages,
struct scan_control sc = {
.gfp_mask = GFP_KERNEL,
.may_swap = 1,
- .swap_cluster_max = nr_pages ? nr_pages : SWAP_CLUSTER_MAX,
+ .swap_cluster_max = SWAP_CLUSTER_MAX,
+ .swappiness = vm_swappiness,
};
loop_again:
@@ -1082,31 +1094,26 @@ loop_again:
all_zones_ok = 1;
- if (nr_pages == 0) {
- /*
- * Scan in the highmem->dma direction for the highest
- * zone which needs scanning
- */
- for (i = pgdat->nr_zones - 1; i >= 0; i--) {
- struct zone *zone = pgdat->node_zones + i;
+ /*
+ * Scan in the highmem->dma direction for the highest
+ * zone which needs scanning
+ */
+ for (i = pgdat->nr_zones - 1; i >= 0; i--) {
+ struct zone *zone = pgdat->node_zones + i;
- if (!populated_zone(zone))
- continue;
+ if (!populated_zone(zone))
+ continue;
- if (zone->all_unreclaimable &&
- priority != DEF_PRIORITY)
- continue;
+ if (zone->all_unreclaimable && priority != DEF_PRIORITY)
+ continue;
- if (!zone_watermark_ok(zone, order,
- zone->pages_high, 0, 0)) {
- end_zone = i;
- goto scan;
- }
+ if (!zone_watermark_ok(zone, order, zone->pages_high,
+ 0, 0)) {
+ end_zone = i;
+ goto scan;
}
- goto out;
- } else {
- end_zone = pgdat->nr_zones - 1;
}
+ goto out;
scan:
for (i = 0; i <= end_zone; i++) {
struct zone *zone = pgdat->node_zones + i;
@@ -1133,11 +1140,9 @@ scan:
if (zone->all_unreclaimable && priority != DEF_PRIORITY)
continue;
- if (nr_pages == 0) { /* Not software suspend */
- if (!zone_watermark_ok(zone, order,
- zone->pages_high, end_zone, 0))
- all_zones_ok = 0;
- }
+ if (!zone_watermark_ok(zone, order, zone->pages_high,
+ end_zone, 0))
+ all_zones_ok = 0;
zone->temp_priority = priority;
if (zone->prev_priority > priority)
zone->prev_priority = priority;
@@ -1162,8 +1167,6 @@ scan:
total_scanned > nr_reclaimed + nr_reclaimed / 2)
sc.may_writepage = 1;
}
- if (nr_pages && to_free > nr_reclaimed)
- continue; /* swsusp: need to do more work */
if (all_zones_ok)
break; /* kswapd: all done */
/*
@@ -1179,7 +1182,7 @@ scan:
* matches the direct reclaim path behaviour in terms of impact
* on zone->*_priority.
*/
- if ((nr_reclaimed >= SWAP_CLUSTER_MAX) && !nr_pages)
+ if (nr_reclaimed >= SWAP_CLUSTER_MAX)
break;
}
out:
@@ -1261,7 +1264,7 @@ static int kswapd(void *p)
}
finish_wait(&pgdat->kswapd_wait, &wait);
- balance_pgdat(pgdat, 0, order);
+ balance_pgdat(pgdat, order);
}
return 0;
}
@@ -1290,35 +1293,154 @@ void wakeup_kswapd(struct zone *zone, int order)
#ifdef CONFIG_PM
/*
- * Try to free `nr_pages' of memory, system-wide. Returns the number of freed
- * pages.
+ * Helper function for shrink_all_memory(). Tries to reclaim 'nr_pages' pages
+ * from LRU lists system-wide, for given pass and priority, and returns the
+ * number of reclaimed pages
+ *
+ * For pass > 3 we also try to shrink the LRU lists that contain a few pages
+ */
+static unsigned long shrink_all_zones(unsigned long nr_pages, int pass,
+ int prio, struct scan_control *sc)
+{
+ struct zone *zone;
+ unsigned long nr_to_scan, ret = 0;
+
+ for_each_zone(zone) {
+
+ if (!populated_zone(zone))
+ continue;
+
+ if (zone->all_unreclaimable && prio != DEF_PRIORITY)
+ continue;
+
+ /* For pass = 0 we don't shrink the active list */
+ if (pass > 0) {
+ zone->nr_scan_active += (zone->nr_active >> prio) + 1;
+ if (zone->nr_scan_active >= nr_pages || pass > 3) {
+ zone->nr_scan_active = 0;
+ nr_to_scan = min(nr_pages, zone->nr_active);
+ shrink_active_list(nr_to_scan, zone, sc);
+ }
+ }
+
+ zone->nr_scan_inactive += (zone->nr_inactive >> prio) + 1;
+ if (zone->nr_scan_inactive >= nr_pages || pass > 3) {
+ zone->nr_scan_inactive = 0;
+ nr_to_scan = min(nr_pages, zone->nr_inactive);
+ ret += shrink_inactive_list(nr_to_scan, zone, sc);
+ if (ret >= nr_pages)
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+/*
+ * Try to free `nr_pages' of memory, system-wide, and return the number of
+ * freed pages.
+ *
+ * Rather than trying to age LRUs the aim is to preserve the overall
+ * LRU order by reclaiming preferentially
+ * inactive > active > active referenced > active mapped
*/
unsigned long shrink_all_memory(unsigned long nr_pages)
{
- pg_data_t *pgdat;
- unsigned long nr_to_free = nr_pages;
+ unsigned long lru_pages, nr_slab;
unsigned long ret = 0;
- unsigned retry = 2;
- struct reclaim_state reclaim_state = {
- .reclaimed_slab = 0,
+ int pass;
+ struct reclaim_state reclaim_state;
+ struct zone *zone;
+ struct scan_control sc = {
+ .gfp_mask = GFP_KERNEL,
+ .may_swap = 0,
+ .swap_cluster_max = nr_pages,
+ .may_writepage = 1,
+ .swappiness = vm_swappiness,
};
current->reclaim_state = &reclaim_state;
-repeat:
- for_each_online_pgdat(pgdat) {
- unsigned long freed;
- freed = balance_pgdat(pgdat, nr_to_free, 0);
- ret += freed;
- nr_to_free -= freed;
- if ((long)nr_to_free <= 0)
+ lru_pages = 0;
+ for_each_zone(zone)
+ lru_pages += zone->nr_active + zone->nr_inactive;
+
+ nr_slab = read_page_state(nr_slab);
+ /* If slab caches are huge, it's better to hit them first */
+ while (nr_slab >= lru_pages) {
+ reclaim_state.reclaimed_slab = 0;
+ shrink_slab(nr_pages, sc.gfp_mask, lru_pages);
+ if (!reclaim_state.reclaimed_slab)
break;
+
+ ret += reclaim_state.reclaimed_slab;
+ if (ret >= nr_pages)
+ goto out;
+
+ nr_slab -= reclaim_state.reclaimed_slab;
}
- if (retry-- && ret < nr_pages) {
- blk_congestion_wait(WRITE, HZ/5);
- goto repeat;
+
+ /*
+ * We try to shrink LRUs in 5 passes:
+ * 0 = Reclaim from inactive_list only
+ * 1 = Reclaim from active list but don't reclaim mapped
+ * 2 = 2nd pass of type 1
+ * 3 = Reclaim mapped (normal reclaim)
+ * 4 = 2nd pass of type 3
+ */
+ for (pass = 0; pass < 5; pass++) {
+ int prio;
+
+ /* Needed for shrinking slab caches later on */
+ if (!lru_pages)
+ for_each_zone(zone) {
+ lru_pages += zone->nr_active;
+ lru_pages += zone->nr_inactive;
+ }
+
+ /* Force reclaiming mapped pages in the passes #3 and #4 */
+ if (pass > 2) {
+ sc.may_swap = 1;
+ sc.swappiness = 100;
+ }
+
+ for (prio = DEF_PRIORITY; prio >= 0; prio--) {
+ unsigned long nr_to_scan = nr_pages - ret;
+
+ sc.nr_mapped = read_page_state(nr_mapped);
+ sc.nr_scanned = 0;
+
+ ret += shrink_all_zones(nr_to_scan, prio, pass, &sc);
+ if (ret >= nr_pages)
+ goto out;
+
+ reclaim_state.reclaimed_slab = 0;
+ shrink_slab(sc.nr_scanned, sc.gfp_mask, lru_pages);
+ ret += reclaim_state.reclaimed_slab;
+ if (ret >= nr_pages)
+ goto out;
+
+ if (sc.nr_scanned && prio < DEF_PRIORITY - 2)
+ blk_congestion_wait(WRITE, HZ / 10);
+ }
+
+ lru_pages = 0;
}
+
+ /*
+ * If ret = 0, we could not shrink LRUs, but there may be something
+ * in slab caches
+ */
+ if (!ret)
+ do {
+ reclaim_state.reclaimed_slab = 0;
+ shrink_slab(nr_pages, sc.gfp_mask, lru_pages);
+ ret += reclaim_state.reclaimed_slab;
+ } while (ret < nr_pages && reclaim_state.reclaimed_slab > 0);
+
+out:
current->reclaim_state = NULL;
+
return ret;
}
#endif
@@ -1360,7 +1482,6 @@ static int __init kswapd_init(void)
pgdat->kswapd = find_task_by_pid(pid);
read_unlock(&tasklist_lock);
}
- total_memory = nr_free_pagecache_pages();
hotcpu_notifier(cpu_callback, 0);
return 0;
}
@@ -1416,6 +1537,7 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
.swap_cluster_max = max_t(unsigned long, nr_pages,
SWAP_CLUSTER_MAX),
.gfp_mask = gfp_mask,
+ .swappiness = vm_swappiness,
};
disable_swap_token();
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index 0dca027ceb80..8be9f2123e54 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -34,8 +34,8 @@ static inline unsigned packet_length(const struct sk_buff *skb)
int br_dev_queue_push_xmit(struct sk_buff *skb)
{
- /* drop mtu oversized packets except tso */
- if (packet_length(skb) > skb->dev->mtu && !skb_shinfo(skb)->tso_size)
+ /* drop mtu oversized packets except gso */
+ if (packet_length(skb) > skb->dev->mtu && !skb_shinfo(skb)->gso_size)
kfree_skb(skb);
else {
#ifdef CONFIG_BRIDGE_NETFILTER
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index fdec773f5b52..07956ecf545e 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -376,15 +376,20 @@ void br_features_recompute(struct net_bridge *br)
features = br->feature_mask & ~NETIF_F_ALL_CSUM;
list_for_each_entry(p, &br->port_list, list) {
- if (checksum & NETIF_F_NO_CSUM &&
- !(p->dev->features & NETIF_F_NO_CSUM))
+ unsigned long feature = p->dev->features;
+
+ if (checksum & NETIF_F_NO_CSUM && !(feature & NETIF_F_NO_CSUM))
checksum ^= NETIF_F_NO_CSUM | NETIF_F_HW_CSUM;
- if (checksum & NETIF_F_HW_CSUM &&
- !(p->dev->features & NETIF_F_HW_CSUM))
+ if (checksum & NETIF_F_HW_CSUM && !(feature & NETIF_F_HW_CSUM))
checksum ^= NETIF_F_HW_CSUM | NETIF_F_IP_CSUM;
- if (!(p->dev->features & NETIF_F_IP_CSUM))
+ if (!(feature & NETIF_F_IP_CSUM))
checksum = 0;
- features &= p->dev->features;
+
+ if (feature & NETIF_F_GSO)
+ feature |= NETIF_F_TSO;
+ feature |= NETIF_F_GSO;
+
+ features &= feature;
}
br->dev->features = features | checksum | NETIF_F_LLTX;
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index 3e41f9d6d51c..8298a5179aef 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -761,7 +761,7 @@ static int br_nf_dev_queue_xmit(struct sk_buff *skb)
{
if (skb->protocol == htons(ETH_P_IP) &&
skb->len > skb->dev->mtu &&
- !(skb_shinfo(skb)->ufo_size || skb_shinfo(skb)->tso_size))
+ !skb_shinfo(skb)->gso_size)
return ip_fragment(skb, br_dev_queue_push_xmit);
else
return br_dev_queue_push_xmit(skb);
diff --git a/net/core/dev.c b/net/core/dev.c
index ab39fe17cb58..ea2469398bd5 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -116,6 +116,7 @@
#include <asm/current.h>
#include <linux/audit.h>
#include <linux/dmaengine.h>
+#include <linux/err.h>
/*
* The list of packet types we will receive (as opposed to discard)
@@ -1048,7 +1049,7 @@ static inline void net_timestamp(struct sk_buff *skb)
* taps currently in use.
*/
-void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
+static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
{
struct packet_type *ptype;
@@ -1186,6 +1187,40 @@ out:
return ret;
}
+/**
+ * skb_gso_segment - Perform segmentation on skb.
+ * @skb: buffer to segment
+ * @sg: whether scatter-gather is supported on the target.
+ *
+ * This function segments the given skb and returns a list of segments.
+ */
+struct sk_buff *skb_gso_segment(struct sk_buff *skb, int sg)
+{
+ struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
+ struct packet_type *ptype;
+ int type = skb->protocol;
+
+ BUG_ON(skb_shinfo(skb)->frag_list);
+ BUG_ON(skb->ip_summed != CHECKSUM_HW);
+
+ skb->mac.raw = skb->data;
+ skb->mac_len = skb->nh.raw - skb->data;
+ __skb_pull(skb, skb->mac_len);
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type) & 15], list) {
+ if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
+ segs = ptype->gso_segment(skb, sg);
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ return segs;
+}
+
+EXPORT_SYMBOL(skb_gso_segment);
+
/* Take action when hardware reception checksum errors are detected. */
#ifdef CONFIG_BUG
void netdev_rx_csum_fault(struct net_device *dev)
@@ -1222,6 +1257,86 @@ static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
#define illegal_highdma(dev, skb) (0)
#endif
+struct dev_gso_cb {
+ void (*destructor)(struct sk_buff *skb);
+};
+
+#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
+
+static void dev_gso_skb_destructor(struct sk_buff *skb)
+{
+ struct dev_gso_cb *cb;
+
+ do {
+ struct sk_buff *nskb = skb->next;
+
+ skb->next = nskb->next;
+ nskb->next = NULL;
+ kfree_skb(nskb);
+ } while (skb->next);
+
+ cb = DEV_GSO_CB(skb);
+ if (cb->destructor)
+ cb->destructor(skb);
+}
+
+/**
+ * dev_gso_segment - Perform emulated hardware segmentation on skb.
+ * @skb: buffer to segment
+ *
+ * This function segments the given skb and stores the list of segments
+ * in skb->next.
+ */
+static int dev_gso_segment(struct sk_buff *skb)
+{
+ struct net_device *dev = skb->dev;
+ struct sk_buff *segs;
+
+ segs = skb_gso_segment(skb, dev->features & NETIF_F_SG &&
+ !illegal_highdma(dev, skb));
+ if (unlikely(IS_ERR(segs)))
+ return PTR_ERR(segs);
+
+ skb->next = segs;
+ DEV_GSO_CB(skb)->destructor = skb->destructor;
+ skb->destructor = dev_gso_skb_destructor;
+
+ return 0;
+}
+
+int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ if (likely(!skb->next)) {
+ if (netdev_nit)
+ dev_queue_xmit_nit(skb, dev);
+
+ if (!netif_needs_gso(dev, skb))
+ return dev->hard_start_xmit(skb, dev);
+
+ if (unlikely(dev_gso_segment(skb)))
+ goto out_kfree_skb;
+ }
+
+ do {
+ struct sk_buff *nskb = skb->next;
+ int rc;
+
+ skb->next = nskb->next;
+ nskb->next = NULL;
+ rc = dev->hard_start_xmit(nskb, dev);
+ if (unlikely(rc)) {
+ skb->next = nskb;
+ return rc;
+ }
+ } while (skb->next);
+
+ skb->destructor = DEV_GSO_CB(skb)->destructor;
+
+out_kfree_skb:
+ kfree_skb(skb);
+ return 0;
+}
+
#define HARD_TX_LOCK(dev, cpu) { \
if ((dev->features & NETIF_F_LLTX) == 0) { \
netif_tx_lock(dev); \
@@ -1266,6 +1381,10 @@ int dev_queue_xmit(struct sk_buff *skb)
struct Qdisc *q;
int rc = -ENOMEM;
+ /* GSO will handle the following emulations directly. */
+ if (netif_needs_gso(dev, skb))
+ goto gso;
+
if (skb_shinfo(skb)->frag_list &&
!(dev->features & NETIF_F_FRAGLIST) &&
__skb_linearize(skb))
@@ -1290,12 +1409,13 @@ int dev_queue_xmit(struct sk_buff *skb)
if (skb_checksum_help(skb, 0))
goto out_kfree_skb;
+gso:
spin_lock_prefetch(&dev->queue_lock);
/* Disable soft irqs for various locks below. Also
* stops preemption for RCU.
*/
- local_bh_disable();
+ rcu_read_lock_bh();
/* Updates of qdisc are serialized by queue_lock.
* The struct Qdisc which is pointed to by qdisc is now a
@@ -1346,11 +1466,8 @@ int dev_queue_xmit(struct sk_buff *skb)
HARD_TX_LOCK(dev, cpu);
if (!netif_queue_stopped(dev)) {
- if (netdev_nit)
- dev_queue_xmit_nit(skb, dev);
-
rc = 0;
- if (!dev->hard_start_xmit(skb, dev)) {
+ if (!dev_hard_start_xmit(skb, dev)) {
HARD_TX_UNLOCK(dev);
goto out;
}
@@ -1369,13 +1486,13 @@ int dev_queue_xmit(struct sk_buff *skb)
}
rc = -ENETDOWN;
- local_bh_enable();
+ rcu_read_unlock_bh();
out_kfree_skb:
kfree_skb(skb);
return rc;
out:
- local_bh_enable();
+ rcu_read_unlock_bh();
return rc;
}
@@ -2980,7 +3097,7 @@ static void netdev_wait_allrefs(struct net_device *dev)
static DEFINE_MUTEX(net_todo_run_mutex);
void netdev_run_todo(void)
{
- struct list_head list = LIST_HEAD_INIT(list);
+ struct list_head list;
/* Need to guard against multiple cpu's getting out of order. */
mutex_lock(&net_todo_run_mutex);
@@ -2995,9 +3112,9 @@ void netdev_run_todo(void)
/* Snapshot list, allow later requests */
spin_lock(&net_todo_list_lock);
- list_splice_init(&net_todo_list, &list);
+ list_replace_init(&net_todo_list, &list);
spin_unlock(&net_todo_list_lock);
-
+
while (!list_empty(&list)) {
struct net_device *dev
= list_entry(list.next, struct net_device, todo_list);
@@ -3301,8 +3418,8 @@ static void net_dma_rebalance(void)
/**
* netdev_dma_event - event callback for the net_dma_client
* @client: should always be net_dma_client
- * @chan:
- * @event:
+ * @chan: DMA channel for the event
+ * @event: event type
*/
static void netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
enum dma_event event)
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 33ce7ed6afc6..27ce1683caf5 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -614,6 +614,29 @@ static int ethtool_set_ufo(struct net_device *dev, char __user *useraddr)
return dev->ethtool_ops->set_ufo(dev, edata.data);
}
+static int ethtool_get_gso(struct net_device *dev, char __user *useraddr)
+{
+ struct ethtool_value edata = { ETHTOOL_GGSO };
+
+ edata.data = dev->features & NETIF_F_GSO;
+ if (copy_to_user(useraddr, &edata, sizeof(edata)))
+ return -EFAULT;
+ return 0;
+}
+
+static int ethtool_set_gso(struct net_device *dev, char __user *useraddr)
+{
+ struct ethtool_value edata;
+
+ if (copy_from_user(&edata, useraddr, sizeof(edata)))
+ return -EFAULT;
+ if (edata.data)
+ dev->features |= NETIF_F_GSO;
+ else
+ dev->features &= ~NETIF_F_GSO;
+ return 0;
+}
+
static int ethtool_self_test(struct net_device *dev, char __user *useraddr)
{
struct ethtool_test test;
@@ -905,6 +928,12 @@ int dev_ethtool(struct ifreq *ifr)
case ETHTOOL_SUFO:
rc = ethtool_set_ufo(dev, useraddr);
break;
+ case ETHTOOL_GGSO:
+ rc = ethtool_get_gso(dev, useraddr);
+ break;
+ case ETHTOOL_SGSO:
+ rc = ethtool_set_gso(dev, useraddr);
+ break;
default:
rc = -EOPNOTSUPP;
}
diff --git a/net/core/link_watch.c b/net/core/link_watch.c
index 646937cc2d84..0f37266411b5 100644
--- a/net/core/link_watch.c
+++ b/net/core/link_watch.c
@@ -91,11 +91,10 @@ static void rfc2863_policy(struct net_device *dev)
/* Must be called with the rtnl semaphore held */
void linkwatch_run_queue(void)
{
- LIST_HEAD(head);
- struct list_head *n, *next;
+ struct list_head head, *n, *next;
spin_lock_irq(&lweventlist_lock);
- list_splice_init(&lweventlist, &head);
+ list_replace_init(&lweventlist, &head);
spin_unlock_irq(&lweventlist_lock);
list_for_each_safe(n, next, &head) {
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index bb7210f4005e..8e5044ba3ab6 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -172,9 +172,9 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
shinfo = skb_shinfo(skb);
atomic_set(&shinfo->dataref, 1);
shinfo->nr_frags = 0;
- shinfo->tso_size = 0;
- shinfo->tso_segs = 0;
- shinfo->ufo_size = 0;
+ shinfo->gso_size = 0;
+ shinfo->gso_segs = 0;
+ shinfo->gso_type = 0;
shinfo->ip6_frag_id = 0;
shinfo->frag_list = NULL;
@@ -238,8 +238,9 @@ struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp,
atomic_set(&(skb_shinfo(skb)->dataref), 1);
skb_shinfo(skb)->nr_frags = 0;
- skb_shinfo(skb)->tso_size = 0;
- skb_shinfo(skb)->tso_segs = 0;
+ skb_shinfo(skb)->gso_size = 0;
+ skb_shinfo(skb)->gso_segs = 0;
+ skb_shinfo(skb)->gso_type = 0;
skb_shinfo(skb)->frag_list = NULL;
out:
return skb;
@@ -528,8 +529,9 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
#endif
skb_copy_secmark(new, old);
atomic_set(&new->users, 1);
- skb_shinfo(new)->tso_size = skb_shinfo(old)->tso_size;
- skb_shinfo(new)->tso_segs = skb_shinfo(old)->tso_segs;
+ skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size;
+ skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs;
+ skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type;
}
/**
@@ -781,24 +783,40 @@ struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
* filled. Used by network drivers which may DMA or transfer data
* beyond the buffer end onto the wire.
*
- * May return NULL in out of memory cases.
+ * May return error in out of memory cases. The skb is freed on error.
*/
-struct sk_buff *skb_pad(struct sk_buff *skb, int pad)
+int skb_pad(struct sk_buff *skb, int pad)
{
- struct sk_buff *nskb;
+ int err;
+ int ntail;
/* If the skbuff is non linear tailroom is always zero.. */
- if (skb_tailroom(skb) >= pad) {
+ if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) {
memset(skb->data+skb->len, 0, pad);
- return skb;
+ return 0;
}
-
- nskb = skb_copy_expand(skb, skb_headroom(skb), skb_tailroom(skb) + pad, GFP_ATOMIC);
+
+ ntail = skb->data_len + pad - (skb->end - skb->tail);
+ if (likely(skb_cloned(skb) || ntail > 0)) {
+ err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC);
+ if (unlikely(err))
+ goto free_skb;
+ }
+
+ /* FIXME: The use of this function with non-linear skb's really needs
+ * to be audited.
+ */
+ err = skb_linearize(skb);
+ if (unlikely(err))
+ goto free_skb;
+
+ memset(skb->data + skb->len, 0, pad);
+ return 0;
+
+free_skb:
kfree_skb(skb);
- if (nskb)
- memset(nskb->data+nskb->len, 0, pad);
- return nskb;
+ return err;
}
/* Trims skb to length len. It can change skb pointers.
@@ -1824,6 +1842,132 @@ unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len)
EXPORT_SYMBOL_GPL(skb_pull_rcsum);
+/**
+ * skb_segment - Perform protocol segmentation on skb.
+ * @skb: buffer to segment
+ * @sg: whether scatter-gather can be used for generated segments
+ *
+ * This function performs segmentation on the given skb. It returns
+ * the segment at the given position. It returns NULL if there are
+ * no more segments to generate, or when an error is encountered.
+ */
+struct sk_buff *skb_segment(struct sk_buff *skb, int sg)
+{
+ struct sk_buff *segs = NULL;
+ struct sk_buff *tail = NULL;
+ unsigned int mss = skb_shinfo(skb)->gso_size;
+ unsigned int doffset = skb->data - skb->mac.raw;
+ unsigned int offset = doffset;
+ unsigned int headroom;
+ unsigned int len;
+ int nfrags = skb_shinfo(skb)->nr_frags;
+ int err = -ENOMEM;
+ int i = 0;
+ int pos;
+
+ __skb_push(skb, doffset);
+ headroom = skb_headroom(skb);
+ pos = skb_headlen(skb);
+
+ do {
+ struct sk_buff *nskb;
+ skb_frag_t *frag;
+ int hsize, nsize;
+ int k;
+ int size;
+
+ len = skb->len - offset;
+ if (len > mss)
+ len = mss;
+
+ hsize = skb_headlen(skb) - offset;
+ if (hsize < 0)
+ hsize = 0;
+ nsize = hsize + doffset;
+ if (nsize > len + doffset || !sg)
+ nsize = len + doffset;
+
+ nskb = alloc_skb(nsize + headroom, GFP_ATOMIC);
+ if (unlikely(!nskb))
+ goto err;
+
+ if (segs)
+ tail->next = nskb;
+ else
+ segs = nskb;
+ tail = nskb;
+
+ nskb->dev = skb->dev;
+ nskb->priority = skb->priority;
+ nskb->protocol = skb->protocol;
+ nskb->dst = dst_clone(skb->dst);
+ memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
+ nskb->pkt_type = skb->pkt_type;
+ nskb->mac_len = skb->mac_len;
+
+ skb_reserve(nskb, headroom);
+ nskb->mac.raw = nskb->data;
+ nskb->nh.raw = nskb->data + skb->mac_len;
+ nskb->h.raw = nskb->nh.raw + (skb->h.raw - skb->nh.raw);
+ memcpy(skb_put(nskb, doffset), skb->data, doffset);
+
+ if (!sg) {
+ nskb->csum = skb_copy_and_csum_bits(skb, offset,
+ skb_put(nskb, len),
+ len, 0);
+ continue;
+ }
+
+ frag = skb_shinfo(nskb)->frags;
+ k = 0;
+
+ nskb->ip_summed = CHECKSUM_HW;
+ nskb->csum = skb->csum;
+ memcpy(skb_put(nskb, hsize), skb->data + offset, hsize);
+
+ while (pos < offset + len) {
+ BUG_ON(i >= nfrags);
+
+ *frag = skb_shinfo(skb)->frags[i];
+ get_page(frag->page);
+ size = frag->size;
+
+ if (pos < offset) {
+ frag->page_offset += offset - pos;
+ frag->size -= offset - pos;
+ }
+
+ k++;
+
+ if (pos + size <= offset + len) {
+ i++;
+ pos += size;
+ } else {
+ frag->size -= pos + size - (offset + len);
+ break;
+ }
+
+ frag++;
+ }
+
+ skb_shinfo(nskb)->nr_frags = k;
+ nskb->data_len = len - hsize;
+ nskb->len += nskb->data_len;
+ nskb->truesize += nskb->data_len;
+ } while ((offset += len) < skb->len);
+
+ return segs;
+
+err:
+ while ((skb = segs)) {
+ segs = skb->next;
+ kfree(skb);
+ }
+ return ERR_PTR(err);
+}
+
+EXPORT_SYMBOL_GPL(skb_segment);
+
void __init skb_init(void)
{
skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
diff --git a/net/dccp/Kconfig b/net/dccp/Kconfig
index 7e096ba8454f..859e3359fcda 100644
--- a/net/dccp/Kconfig
+++ b/net/dccp/Kconfig
@@ -26,7 +26,7 @@ config INET_DCCP_DIAG
config IP_DCCP_ACKVEC
depends on IP_DCCP
- def_bool N
+ bool
source "net/dccp/ccids/Kconfig"
diff --git a/net/ieee80211/softmac/ieee80211softmac_wx.c b/net/ieee80211/softmac/ieee80211softmac_wx.c
index 22aa6199185b..0e65ff4e33fc 100644
--- a/net/ieee80211/softmac/ieee80211softmac_wx.c
+++ b/net/ieee80211/softmac/ieee80211softmac_wx.c
@@ -388,7 +388,7 @@ ieee80211softmac_wx_set_genie(struct net_device *dev,
memcpy(mac->wpa.IE, extra, wrqu->data.length);
dprintk(KERN_INFO PFX "generic IE set to ");
for (i=0;i<wrqu->data.length;i++)
- dprintk("%.2x", mac->wpa.IE[i]);
+ dprintk("%.2x", (u8)mac->wpa.IE[i]);
dprintk("\n");
mac->wpa.IElen = wrqu->data.length;
} else {
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 0a277453526b..461216b47948 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -68,6 +68,7 @@
*/
#include <linux/config.h>
+#include <linux/err.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
@@ -1096,6 +1097,54 @@ int inet_sk_rebuild_header(struct sock *sk)
EXPORT_SYMBOL(inet_sk_rebuild_header);
+static struct sk_buff *inet_gso_segment(struct sk_buff *skb, int sg)
+{
+ struct sk_buff *segs = ERR_PTR(-EINVAL);
+ struct iphdr *iph;
+ struct net_protocol *ops;
+ int proto;
+ int ihl;
+ int id;
+
+ if (!pskb_may_pull(skb, sizeof(*iph)))
+ goto out;
+
+ iph = skb->nh.iph;
+ ihl = iph->ihl * 4;
+ if (ihl < sizeof(*iph))
+ goto out;
+
+ if (!pskb_may_pull(skb, ihl))
+ goto out;
+
+ skb->h.raw = __skb_pull(skb, ihl);
+ iph = skb->nh.iph;
+ id = ntohs(iph->id);
+ proto = iph->protocol & (MAX_INET_PROTOS - 1);
+ segs = ERR_PTR(-EPROTONOSUPPORT);
+
+ rcu_read_lock();
+ ops = rcu_dereference(inet_protos[proto]);
+ if (ops && ops->gso_segment)
+ segs = ops->gso_segment(skb, sg);
+ rcu_read_unlock();
+
+ if (IS_ERR(segs))
+ goto out;
+
+ skb = segs;
+ do {
+ iph = skb->nh.iph;
+ iph->id = htons(id++);
+ iph->tot_len = htons(skb->len - skb->mac_len);
+ iph->check = 0;
+ iph->check = ip_fast_csum(skb->nh.raw, iph->ihl);
+ } while ((skb = skb->next));
+
+out:
+ return segs;
+}
+
#ifdef CONFIG_IP_MULTICAST
static struct net_protocol igmp_protocol = {
.handler = igmp_rcv,
@@ -1105,6 +1154,7 @@ static struct net_protocol igmp_protocol = {
static struct net_protocol tcp_protocol = {
.handler = tcp_v4_rcv,
.err_handler = tcp_v4_err,
+ .gso_segment = tcp_tso_segment,
.no_policy = 1,
};
@@ -1150,6 +1200,7 @@ static int ipv4_proc_init(void);
static struct packet_type ip_packet_type = {
.type = __constant_htons(ETH_P_IP),
.func = ip_rcv,
+ .gso_segment = inet_gso_segment,
};
static int __init inet_init(void)
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 8538aac3d148..7624fd1d8f9f 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -210,8 +210,7 @@ static inline int ip_finish_output(struct sk_buff *skb)
return dst_output(skb);
}
#endif
- if (skb->len > dst_mtu(skb->dst) &&
- !(skb_shinfo(skb)->ufo_size || skb_shinfo(skb)->tso_size))
+ if (skb->len > dst_mtu(skb->dst) && !skb_shinfo(skb)->gso_size)
return ip_fragment(skb, ip_finish_output2);
else
return ip_finish_output2(skb);
@@ -362,7 +361,7 @@ packet_routed:
}
ip_select_ident_more(iph, &rt->u.dst, sk,
- (skb_shinfo(skb)->tso_segs ?: 1) - 1);
+ (skb_shinfo(skb)->gso_segs ?: 1) - 1);
/* Add an IP checksum. */
ip_send_check(iph);
@@ -744,7 +743,8 @@ static inline int ip_ufo_append_data(struct sock *sk,
(length - transhdrlen));
if (!err) {
/* specify the length of each IP datagram fragment*/
- skb_shinfo(skb)->ufo_size = (mtu - fragheaderlen);
+ skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
+ skb_shinfo(skb)->gso_type = SKB_GSO_UDPV4;
__skb_queue_tail(&sk->sk_write_queue, skb);
return 0;
@@ -1087,14 +1087,16 @@ ssize_t ip_append_page(struct sock *sk, struct page *page,
inet->cork.length += size;
if ((sk->sk_protocol == IPPROTO_UDP) &&
- (rt->u.dst.dev->features & NETIF_F_UFO))
- skb_shinfo(skb)->ufo_size = (mtu - fragheaderlen);
+ (rt->u.dst.dev->features & NETIF_F_UFO)) {
+ skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
+ skb_shinfo(skb)->gso_type = SKB_GSO_UDPV4;
+ }
while (size > 0) {
int i;
- if (skb_shinfo(skb)->ufo_size)
+ if (skb_shinfo(skb)->gso_size)
len = size;
else {
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 74998f250071..0e029c4e2903 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -258,6 +258,7 @@
#include <linux/random.h>
#include <linux/bootmem.h>
#include <linux/cache.h>
+#include <linux/err.h>
#include <net/icmp.h>
#include <net/tcp.h>
@@ -571,7 +572,7 @@ new_segment:
skb->ip_summed = CHECKSUM_HW;
tp->write_seq += copy;
TCP_SKB_CB(skb)->end_seq += copy;
- skb_shinfo(skb)->tso_segs = 0;
+ skb_shinfo(skb)->gso_segs = 0;
if (!copied)
TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
@@ -818,7 +819,7 @@ new_segment:
tp->write_seq += copy;
TCP_SKB_CB(skb)->end_seq += copy;
- skb_shinfo(skb)->tso_segs = 0;
+ skb_shinfo(skb)->gso_segs = 0;
from += copy;
copied += copy;
@@ -2144,6 +2145,67 @@ int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
EXPORT_SYMBOL(compat_tcp_getsockopt);
#endif
+struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int sg)
+{
+ struct sk_buff *segs = ERR_PTR(-EINVAL);
+ struct tcphdr *th;
+ unsigned thlen;
+ unsigned int seq;
+ unsigned int delta;
+ unsigned int oldlen;
+ unsigned int len;
+
+ if (!pskb_may_pull(skb, sizeof(*th)))
+ goto out;
+
+ th = skb->h.th;
+ thlen = th->doff * 4;
+ if (thlen < sizeof(*th))
+ goto out;
+
+ if (!pskb_may_pull(skb, thlen))
+ goto out;
+
+ oldlen = ~htonl(skb->len);
+ __skb_pull(skb, thlen);
+
+ segs = skb_segment(skb, sg);
+ if (IS_ERR(segs))
+ goto out;
+
+ len = skb_shinfo(skb)->gso_size;
+ delta = csum_add(oldlen, htonl(thlen + len));
+
+ skb = segs;
+ th = skb->h.th;
+ seq = ntohl(th->seq);
+
+ do {
+ th->fin = th->psh = 0;
+
+ if (skb->ip_summed == CHECKSUM_NONE) {
+ th->check = csum_fold(csum_partial(
+ skb->h.raw, thlen, csum_add(skb->csum, delta)));
+ }
+
+ seq += len;
+ skb = skb->next;
+ th = skb->h.th;
+
+ th->seq = htonl(seq);
+ th->cwr = 0;
+ } while (skb->next);
+
+ if (skb->ip_summed == CHECKSUM_NONE) {
+ delta = csum_add(oldlen, htonl(skb->tail - skb->h.raw));
+ th->check = csum_fold(csum_partial(
+ skb->h.raw, thlen, csum_add(skb->csum, delta)));
+ }
+
+out:
+ return segs;
+}
+
extern void __skb_cb_too_small_for_tcp(int, int);
extern struct tcp_congestion_ops tcp_reno;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index e08245bdda3a..94fe5b1f9dcb 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1073,7 +1073,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
else
pkt_len = (end_seq -
TCP_SKB_CB(skb)->seq);
- if (tcp_fragment(sk, skb, pkt_len, skb_shinfo(skb)->tso_size))
+ if (tcp_fragment(sk, skb, pkt_len, skb_shinfo(skb)->gso_size))
break;
pcount = tcp_skb_pcount(skb);
}
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 07bb5a2b375e..bdd71db8bf90 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -515,15 +515,17 @@ static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned
/* Avoid the costly divide in the normal
* non-TSO case.
*/
- skb_shinfo(skb)->tso_segs = 1;
- skb_shinfo(skb)->tso_size = 0;
+ skb_shinfo(skb)->gso_segs = 1;
+ skb_shinfo(skb)->gso_size = 0;
+ skb_shinfo(skb)->gso_type = 0;
} else {
unsigned int factor;
factor = skb->len + (mss_now - 1);
factor /= mss_now;
- skb_shinfo(skb)->tso_segs = factor;
- skb_shinfo(skb)->tso_size = mss_now;
+ skb_shinfo(skb)->gso_segs = factor;
+ skb_shinfo(skb)->gso_size = mss_now;
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
}
}
@@ -914,7 +916,7 @@ static int tcp_init_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned int
if (!tso_segs ||
(tso_segs > 1 &&
- skb_shinfo(skb)->tso_size != mss_now)) {
+ tcp_skb_mss(skb) != mss_now)) {
tcp_set_skb_tso_segs(sk, skb, mss_now);
tso_segs = tcp_skb_pcount(skb);
}
@@ -1724,8 +1726,9 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) {
if (!pskb_trim(skb, 0)) {
TCP_SKB_CB(skb)->seq = TCP_SKB_CB(skb)->end_seq - 1;
- skb_shinfo(skb)->tso_segs = 1;
- skb_shinfo(skb)->tso_size = 0;
+ skb_shinfo(skb)->gso_segs = 1;
+ skb_shinfo(skb)->gso_size = 0;
+ skb_shinfo(skb)->gso_type = 0;
skb->ip_summed = CHECKSUM_NONE;
skb->csum = 0;
}
@@ -1930,8 +1933,9 @@ void tcp_send_fin(struct sock *sk)
skb->csum = 0;
TCP_SKB_CB(skb)->flags = (TCPCB_FLAG_ACK | TCPCB_FLAG_FIN);
TCP_SKB_CB(skb)->sacked = 0;
- skb_shinfo(skb)->tso_segs = 1;
- skb_shinfo(skb)->tso_size = 0;
+ skb_shinfo(skb)->gso_segs = 1;
+ skb_shinfo(skb)->gso_size = 0;
+ skb_shinfo(skb)->gso_type = 0;
/* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
TCP_SKB_CB(skb)->seq = tp->write_seq;
@@ -1963,8 +1967,9 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority)
skb->csum = 0;
TCP_SKB_CB(skb)->flags = (TCPCB_FLAG_ACK | TCPCB_FLAG_RST);
TCP_SKB_CB(skb)->sacked = 0;
- skb_shinfo(skb)->tso_segs = 1;
- skb_shinfo(skb)->tso_size = 0;
+ skb_shinfo(skb)->gso_segs = 1;
+ skb_shinfo(skb)->gso_size = 0;
+ skb_shinfo(skb)->gso_type = 0;
/* Send it off. */
TCP_SKB_CB(skb)->seq = tcp_acceptable_seq(sk, tp);
@@ -2047,8 +2052,9 @@ struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst,
TCP_SKB_CB(skb)->seq = tcp_rsk(req)->snt_isn;
TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + 1;
TCP_SKB_CB(skb)->sacked = 0;
- skb_shinfo(skb)->tso_segs = 1;
- skb_shinfo(skb)->tso_size = 0;
+ skb_shinfo(skb)->gso_segs = 1;
+ skb_shinfo(skb)->gso_size = 0;
+ skb_shinfo(skb)->gso_type = 0;
th->seq = htonl(TCP_SKB_CB(skb)->seq);
th->ack_seq = htonl(tcp_rsk(req)->rcv_isn + 1);
if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */
@@ -2152,8 +2158,9 @@ int tcp_connect(struct sock *sk)
TCP_SKB_CB(buff)->flags = TCPCB_FLAG_SYN;
TCP_ECN_send_syn(sk, tp, buff);
TCP_SKB_CB(buff)->sacked = 0;
- skb_shinfo(buff)->tso_segs = 1;
- skb_shinfo(buff)->tso_size = 0;
+ skb_shinfo(buff)->gso_segs = 1;
+ skb_shinfo(buff)->gso_size = 0;
+ skb_shinfo(buff)->gso_type = 0;
buff->csum = 0;
TCP_SKB_CB(buff)->seq = tp->write_seq++;
TCP_SKB_CB(buff)->end_seq = tp->write_seq;
@@ -2257,8 +2264,9 @@ void tcp_send_ack(struct sock *sk)
buff->csum = 0;
TCP_SKB_CB(buff)->flags = TCPCB_FLAG_ACK;
TCP_SKB_CB(buff)->sacked = 0;
- skb_shinfo(buff)->tso_segs = 1;
- skb_shinfo(buff)->tso_size = 0;
+ skb_shinfo(buff)->gso_segs = 1;
+ skb_shinfo(buff)->gso_size = 0;
+ skb_shinfo(buff)->gso_type = 0;
/* Send it off, this clears delayed acks for us. */
TCP_SKB_CB(buff)->seq = TCP_SKB_CB(buff)->end_seq = tcp_acceptable_seq(sk, tp);
@@ -2293,8 +2301,9 @@ static int tcp_xmit_probe_skb(struct sock *sk, int urgent)
skb->csum = 0;
TCP_SKB_CB(skb)->flags = TCPCB_FLAG_ACK;
TCP_SKB_CB(skb)->sacked = urgent;
- skb_shinfo(skb)->tso_segs = 1;
- skb_shinfo(skb)->tso_size = 0;
+ skb_shinfo(skb)->gso_segs = 1;
+ skb_shinfo(skb)->gso_size = 0;
+ skb_shinfo(skb)->gso_type = 0;
/* Use a previous sequence. This should cause the other
* end to send an ack. Don't queue or clone SKB, just
diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c
index ac9d91d4bb05..193363e22932 100644
--- a/net/ipv4/xfrm4_output.c
+++ b/net/ipv4/xfrm4_output.c
@@ -9,6 +9,8 @@
*/
#include <linux/compiler.h>
+#include <linux/if_ether.h>
+#include <linux/kernel.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/netfilter_ipv4.h>
@@ -97,16 +99,10 @@ error_nolock:
goto out_exit;
}
-static int xfrm4_output_finish(struct sk_buff *skb)
+static int xfrm4_output_finish2(struct sk_buff *skb)
{
int err;
-#ifdef CONFIG_NETFILTER
- if (!skb->dst->xfrm) {
- IPCB(skb)->flags |= IPSKB_REROUTED;
- return dst_output(skb);
- }
-#endif
while (likely((err = xfrm4_output_one(skb)) == 0)) {
nf_reset(skb);
@@ -119,7 +115,7 @@ static int xfrm4_output_finish(struct sk_buff *skb)
return dst_output(skb);
err = nf_hook(PF_INET, NF_IP_POST_ROUTING, &skb, NULL,
- skb->dst->dev, xfrm4_output_finish);
+ skb->dst->dev, xfrm4_output_finish2);
if (unlikely(err != 1))
break;
}
@@ -127,6 +123,48 @@ static int xfrm4_output_finish(struct sk_buff *skb)
return err;
}
+static int xfrm4_output_finish(struct sk_buff *skb)
+{
+ struct sk_buff *segs;
+
+#ifdef CONFIG_NETFILTER
+ if (!skb->dst->xfrm) {
+ IPCB(skb)->flags |= IPSKB_REROUTED;
+ return dst_output(skb);
+ }
+#endif
+
+ if (!skb_shinfo(skb)->gso_size)
+ return xfrm4_output_finish2(skb);
+
+ skb->protocol = htons(ETH_P_IP);
+ segs = skb_gso_segment(skb, 0);
+ kfree_skb(skb);
+ if (unlikely(IS_ERR(segs)))
+ return PTR_ERR(segs);
+
+ do {
+ struct sk_buff *nskb = segs->next;
+ int err;
+
+ segs->next = NULL;
+ err = xfrm4_output_finish2(segs);
+
+ if (unlikely(err)) {
+ while ((segs = nskb)) {
+ nskb = segs->next;
+ segs->next = NULL;
+ kfree_skb(segs);
+ }
+ return err;
+ }
+
+ segs = nskb;
+ } while (segs);
+
+ return 0;
+}
+
int xfrm4_output(struct sk_buff *skb)
{
return NF_HOOK_COND(PF_INET, NF_IP_POST_ROUTING, skb, NULL, skb->dst->dev,
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index c2c26fa0943d..4da664538f52 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -862,6 +862,8 @@ static int inline ipv6_saddr_label(const struct in6_addr *addr, int type)
* 2002::/16 2
* ::/96 3
* ::ffff:0:0/96 4
+ * fc00::/7 5
+ * 2001::/32 6
*/
if (type & IPV6_ADDR_LOOPBACK)
return 0;
@@ -869,8 +871,12 @@ static int inline ipv6_saddr_label(const struct in6_addr *addr, int type)
return 3;
else if (type & IPV6_ADDR_MAPPED)
return 4;
+ else if (addr->s6_addr32[0] == htonl(0x20010000))
+ return 6;
else if (addr->s6_addr16[0] == htons(0x2002))
return 2;
+ else if ((addr->s6_addr[0] & 0xfe) == 0xfc)
+ return 5;
return 1;
}
@@ -1069,6 +1075,9 @@ int ipv6_dev_get_saddr(struct net_device *daddr_dev,
if (hiscore.attrs & IPV6_SADDR_SCORE_PRIVACY)
continue;
}
+#else
+ if (hiscore.rule < 7)
+ hiscore.rule++;
#endif
/* Rule 8: Use longest matching prefix */
if (hiscore.rule < 8) {
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index d29620f4910e..abb94de33768 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -148,7 +148,7 @@ static int ip6_output2(struct sk_buff *skb)
int ip6_output(struct sk_buff *skb)
{
- if ((skb->len > dst_mtu(skb->dst) && !skb_shinfo(skb)->ufo_size) ||
+ if ((skb->len > dst_mtu(skb->dst) && !skb_shinfo(skb)->gso_size) ||
dst_allfrag(skb->dst))
return ip6_fragment(skb, ip6_output2);
else
@@ -833,8 +833,9 @@ static inline int ip6_ufo_append_data(struct sock *sk,
struct frag_hdr fhdr;
/* specify the length of each IP datagram fragment*/
- skb_shinfo(skb)->ufo_size = (mtu - fragheaderlen) -
- sizeof(struct frag_hdr);
+ skb_shinfo(skb)->gso_size = mtu - fragheaderlen -
+ sizeof(struct frag_hdr);
+ skb_shinfo(skb)->gso_type = SKB_GSO_UDPV4;
ipv6_select_ident(skb, &fhdr);
skb_shinfo(skb)->ip6_frag_id = fhdr.identification;
__skb_queue_tail(&sk->sk_write_queue, skb);
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
index 16e84254a252..48fccb1eca08 100644
--- a/net/ipv6/xfrm6_output.c
+++ b/net/ipv6/xfrm6_output.c
@@ -94,7 +94,7 @@ error_nolock:
goto out_exit;
}
-static int xfrm6_output_finish(struct sk_buff *skb)
+static int xfrm6_output_finish2(struct sk_buff *skb)
{
int err;
@@ -110,7 +110,7 @@ static int xfrm6_output_finish(struct sk_buff *skb)
return dst_output(skb);
err = nf_hook(PF_INET6, NF_IP6_POST_ROUTING, &skb, NULL,
- skb->dst->dev, xfrm6_output_finish);
+ skb->dst->dev, xfrm6_output_finish2);
if (unlikely(err != 1))
break;
}
@@ -118,6 +118,41 @@ static int xfrm6_output_finish(struct sk_buff *skb)
return err;
}
+static int xfrm6_output_finish(struct sk_buff *skb)
+{
+ struct sk_buff *segs;
+
+ if (!skb_shinfo(skb)->gso_size)
+ return xfrm6_output_finish2(skb);
+
+ skb->protocol = htons(ETH_P_IP);
+ segs = skb_gso_segment(skb, 0);
+ kfree_skb(skb);
+ if (unlikely(IS_ERR(segs)))
+ return PTR_ERR(segs);
+
+ do {
+ struct sk_buff *nskb = segs->next;
+ int err;
+
+ segs->next = NULL;
+ err = xfrm6_output_finish2(segs);
+
+ if (unlikely(err)) {
+ while ((segs = nskb)) {
+ nskb = segs->next;
+ segs->next = NULL;
+ kfree_skb(segs);
+ }
+ return err;
+ }
+
+ segs = nskb;
+ } while (segs);
+
+ return 0;
+}
+
int xfrm6_output(struct sk_buff *skb)
{
return NF_HOOK(PF_INET6, NF_IP6_POST_ROUTING, skb, NULL, skb->dst->dev,
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index d7aca8ef524a..74d4a1dceeec 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -96,8 +96,11 @@ static inline int qdisc_restart(struct net_device *dev)
struct sk_buff *skb;
/* Dequeue packet */
- if ((skb = q->dequeue(q)) != NULL) {
+ if (((skb = dev->gso_skb)) || ((skb = q->dequeue(q)))) {
unsigned nolock = (dev->features & NETIF_F_LLTX);
+
+ dev->gso_skb = NULL;
+
/*
* When the driver has LLTX set it does its own locking
* in start_xmit. No need to add additional overhead by
@@ -134,10 +137,8 @@ static inline int qdisc_restart(struct net_device *dev)
if (!netif_queue_stopped(dev)) {
int ret;
- if (netdev_nit)
- dev_queue_xmit_nit(skb, dev);
- ret = dev->hard_start_xmit(skb, dev);
+ ret = dev_hard_start_xmit(skb, dev);
if (ret == NETDEV_TX_OK) {
if (!nolock) {
netif_tx_unlock(dev);
@@ -171,7 +172,10 @@ static inline int qdisc_restart(struct net_device *dev)
*/
requeue:
- q->ops->requeue(skb, q);
+ if (skb->next)
+ dev->gso_skb = skb;
+ else
+ q->ops->requeue(skb, q);
netif_schedule(dev);
return 1;
}
@@ -181,9 +185,13 @@ requeue:
void __qdisc_run(struct net_device *dev)
{
+ if (unlikely(dev->qdisc == &noop_qdisc))
+ goto out;
+
while (qdisc_restart(dev) < 0 && !netif_queue_stopped(dev))
/* NOTHING */;
+out:
clear_bit(__LINK_STATE_QDISC_RUNNING, &dev->state);
}
@@ -583,10 +591,17 @@ void dev_deactivate(struct net_device *dev)
dev_watchdog_down(dev);
- while (test_bit(__LINK_STATE_SCHED, &dev->state))
+ /* Wait for outstanding dev_queue_xmit calls. */
+ synchronize_rcu();
+
+ /* Wait for outstanding qdisc_run calls. */
+ while (test_bit(__LINK_STATE_QDISC_RUNNING, &dev->state))
yield();
- spin_unlock_wait(&dev->_xmit_lock);
+ if (dev->gso_skb) {
+ kfree_skb(dev->gso_skb);
+ dev->gso_skb = NULL;
+ }
}
void dev_init_scheduler(struct net_device *dev)
diff --git a/net/socket.c b/net/socket.c
index 02948b622bd2..565f5e8d1191 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -335,10 +335,11 @@ static struct super_operations sockfs_ops = {
.statfs = simple_statfs,
};
-static struct super_block *sockfs_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int sockfs_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_pseudo(fs_type, "socket:", &sockfs_ops, SOCKFS_MAGIC);
+ return get_sb_pseudo(fs_type, "socket:", &sockfs_ops, SOCKFS_MAGIC,
+ mnt);
}
static struct vfsmount *sock_mnt __read_mostly;
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index cc673dd8433f..8241fa726803 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -815,11 +815,11 @@ out:
return -ENOMEM;
}
-static struct super_block *
+static int
rpc_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_single(fs_type, flags, data, rpc_fill_super);
+ return get_sb_single(fs_type, flags, data, rpc_fill_super, mnt);
}
static struct file_system_type rpc_pipe_fs_type = {
diff --git a/security/dummy.c b/security/dummy.c
index 6de4a4a5eb13..c3c5493581e2 100644
--- a/security/dummy.c
+++ b/security/dummy.c
@@ -191,7 +191,7 @@ static int dummy_sb_kern_mount (struct super_block *sb, void *data)
return 0;
}
-static int dummy_sb_statfs (struct super_block *sb)
+static int dummy_sb_statfs (struct dentry *dentry)
{
return 0;
}
@@ -516,6 +516,11 @@ static int dummy_task_setnice (struct task_struct *p, int nice)
return 0;
}
+static int dummy_task_setioprio (struct task_struct *p, int ioprio)
+{
+ return 0;
+}
+
static int dummy_task_setrlimit (unsigned int resource, struct rlimit *new_rlim)
{
return 0;
@@ -532,6 +537,11 @@ static int dummy_task_getscheduler (struct task_struct *p)
return 0;
}
+static int dummy_task_movememory (struct task_struct *p)
+{
+ return 0;
+}
+
static int dummy_task_wait (struct task_struct *p)
{
return 0;
@@ -972,9 +982,11 @@ void security_fixup_ops (struct security_operations *ops)
set_to_dummy_if_null(ops, task_getsid);
set_to_dummy_if_null(ops, task_setgroups);
set_to_dummy_if_null(ops, task_setnice);
+ set_to_dummy_if_null(ops, task_setioprio);
set_to_dummy_if_null(ops, task_setrlimit);
set_to_dummy_if_null(ops, task_setscheduler);
set_to_dummy_if_null(ops, task_getscheduler);
+ set_to_dummy_if_null(ops, task_movememory);
set_to_dummy_if_null(ops, task_wait);
set_to_dummy_if_null(ops, task_kill);
set_to_dummy_if_null(ops, task_prctl);
diff --git a/security/inode.c b/security/inode.c
index 0f77b0223662..e6fc29ac8564 100644
--- a/security/inode.c
+++ b/security/inode.c
@@ -135,11 +135,11 @@ static int fill_super(struct super_block *sb, void *data, int silent)
return simple_fill_super(sb, SECURITYFS_MAGIC, files);
}
-static struct super_block *get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name,
- void *data)
+static int get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name,
+ void *data, struct vfsmount *mnt)
{
- return get_sb_single(fs_type, flags, data, fill_super);
+ return get_sb_single(fs_type, flags, data, fill_super, mnt);
}
static struct file_system_type fs_type = {
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 524915dfda64..79c16e31c884 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -1903,13 +1903,13 @@ static int selinux_sb_kern_mount(struct super_block *sb, void *data)
return superblock_has_perm(current, sb, FILESYSTEM__MOUNT, &ad);
}
-static int selinux_sb_statfs(struct super_block *sb)
+static int selinux_sb_statfs(struct dentry *dentry)
{
struct avc_audit_data ad;
AVC_AUDIT_DATA_INIT(&ad,FS);
- ad.u.fs.dentry = sb->s_root;
- return superblock_has_perm(current, sb, FILESYSTEM__GETATTR, &ad);
+ ad.u.fs.dentry = dentry->d_sb->s_root;
+ return superblock_has_perm(current, dentry->d_sb, FILESYSTEM__GETATTR, &ad);
}
static int selinux_mount(char * dev_name,
@@ -2645,6 +2645,11 @@ static int selinux_task_setnice(struct task_struct *p, int nice)
return task_has_perm(current,p, PROCESS__SETSCHED);
}
+static int selinux_task_setioprio(struct task_struct *p, int ioprio)
+{
+ return task_has_perm(current, p, PROCESS__SETSCHED);
+}
+
static int selinux_task_setrlimit(unsigned int resource, struct rlimit *new_rlim)
{
struct rlimit *old_rlim = current->signal->rlim + resource;
@@ -2674,6 +2679,11 @@ static int selinux_task_getscheduler(struct task_struct *p)
return task_has_perm(current, p, PROCESS__GETSCHED);
}
+static int selinux_task_movememory(struct task_struct *p)
+{
+ return task_has_perm(current, p, PROCESS__SETSCHED);
+}
+
static int selinux_task_kill(struct task_struct *p, struct siginfo *info, int sig)
{
u32 perm;
@@ -4383,9 +4393,11 @@ static struct security_operations selinux_ops = {
.task_getsid = selinux_task_getsid,
.task_setgroups = selinux_task_setgroups,
.task_setnice = selinux_task_setnice,
+ .task_setioprio = selinux_task_setioprio,
.task_setrlimit = selinux_task_setrlimit,
.task_setscheduler = selinux_task_setscheduler,
.task_getscheduler = selinux_task_getscheduler,
+ .task_movememory = selinux_task_movememory,
.task_kill = selinux_task_kill,
.task_wait = selinux_task_wait,
.task_prctl = selinux_task_prctl,
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
index 2e73d3279f2d..7029bbc9bef8 100644
--- a/security/selinux/selinuxfs.c
+++ b/security/selinux/selinuxfs.c
@@ -1345,10 +1345,11 @@ err:
goto out;
}
-static struct super_block *sel_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int sel_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data,
+ struct vfsmount *mnt)
{
- return get_sb_single(fs_type, flags, data, sel_fill_super);
+ return get_sb_single(fs_type, flags, data, sel_fill_super, mnt);
}
static struct file_system_type sel_fs_type = {
diff --git a/sound/oss/au1550_ac97.c b/sound/oss/au1550_ac97.c
index c1168fae6be6..9011abe241ab 100644
--- a/sound/oss/au1550_ac97.c
+++ b/sound/oss/au1550_ac97.c
@@ -57,9 +57,9 @@
#include <asm/io.h>
#include <asm/uaccess.h>
#include <asm/hardirq.h>
-#include <asm/mach-au1x00/au1000.h>
#include <asm/mach-au1x00/au1xxx_psc.h>
#include <asm/mach-au1x00/au1xxx_dbdma.h>
+#include <asm/mach-au1x00/au1xxx.h>
#undef OSS_DOCUMENTED_MIXER_SEMANTICS
diff --git a/sound/oss/cs46xx.c b/sound/oss/cs46xx.c
index 53881bc91bba..994c71e986e4 100644
--- a/sound/oss/cs46xx.c
+++ b/sound/oss/cs46xx.c
@@ -147,7 +147,7 @@
* that should be printed on any released driver.
*/
#if CSDEBUG
-#define CS_DBGOUT(mask,level,x) if((cs_debuglevel >= (level)) && ((mask) & cs_debugmask)) {x;}
+#define CS_DBGOUT(mask,level,x) if ((cs_debuglevel >= (level)) && ((mask) & cs_debugmask)) {x;}
#else
#define CS_DBGOUT(mask,level,x)
#endif
@@ -175,19 +175,19 @@
#define CS_IOCTL_CMD_RESUME 0x2 // resume
#if CSDEBUG
-static unsigned long cs_debuglevel=1; /* levels range from 1-9 */
+static unsigned long cs_debuglevel = 1; /* levels range from 1-9 */
module_param(cs_debuglevel, ulong, 0644);
-static unsigned long cs_debugmask=CS_INIT | CS_ERROR; /* use CS_DBGOUT with various mask values */
+static unsigned long cs_debugmask = CS_INIT | CS_ERROR; /* use CS_DBGOUT with various mask values */
module_param(cs_debugmask, ulong, 0644);
#endif
static unsigned long hercules_egpio_disable; /* if non-zero set all EGPIO to 0 */
module_param(hercules_egpio_disable, ulong, 0);
-static unsigned long initdelay=700; /* PM delay in millisecs */
+static unsigned long initdelay = 700; /* PM delay in millisecs */
module_param(initdelay, ulong, 0);
-static unsigned long powerdown=-1; /* turn on/off powerdown processing in driver */
+static unsigned long powerdown = -1; /* turn on/off powerdown processing in driver */
module_param(powerdown, ulong, 0);
#define DMABUF_DEFAULTORDER 3
-static unsigned long defaultorder=DMABUF_DEFAULTORDER;
+static unsigned long defaultorder = DMABUF_DEFAULTORDER;
module_param(defaultorder, ulong, 0);
static int external_amp;
@@ -200,8 +200,8 @@ module_param(thinkpad, bool, 0);
* powerdown. also set thinkpad to 1 to disable powerdown,
* but also to enable the clkrun functionality.
*/
-static unsigned cs_powerdown=1;
-static unsigned cs_laptop_wait=1;
+static unsigned cs_powerdown = 1;
+static unsigned cs_laptop_wait = 1;
/* An instance of the 4610 channel */
struct cs_channel
@@ -319,7 +319,7 @@ struct cs_card {
atomic_t mixer_use_cnt;
/* PCI device stuff */
- struct pci_dev * pci_dev;
+ struct pci_dev *pci_dev;
struct list_head list;
unsigned int pctl, cctl; /* Hardware DMA flag sets */
@@ -384,7 +384,7 @@ struct cs_card {
static int cs_open_mixdev(struct inode *inode, struct file *file);
static int cs_release_mixdev(struct inode *inode, struct file *file);
static int cs_ioctl_mixdev(struct inode *inode, struct file *file, unsigned int cmd,
- unsigned long arg);
+ unsigned long arg);
static int cs_hardware_init(struct cs_card *card);
static int cs46xx_powerup(struct cs_card *card, unsigned int type);
static int cs461x_powerdown(struct cs_card *card, unsigned int type, int suspendflag);
@@ -423,8 +423,7 @@ static void printioctl(unsigned int x)
[SOUND_MIXER_VOLUME] = 9 /* Master Volume */
};
- switch(x)
- {
+ switch (x) {
case SOUND_MIXER_CS_GETDBGMASK:
CS_DBGOUT(CS_IOCTL, 4, printk("SOUND_MIXER_CS_GETDBGMASK: ") );
break;
@@ -521,7 +520,6 @@ static void printioctl(unsigned int x)
case SOUND_PCM_READ_FILTER:
CS_DBGOUT(CS_IOCTL, 4, printk("SOUND_PCM_READ_FILTER: ") );
break;
-
case SOUND_MIXER_PRIVATE1:
CS_DBGOUT(CS_IOCTL, 4, printk("SOUND_MIXER_PRIVATE1: ") );
break;
@@ -543,10 +541,8 @@ static void printioctl(unsigned int x)
case SOUND_OLD_MIXER_INFO:
CS_DBGOUT(CS_IOCTL, 4, printk("SOUND_OLD_MIXER_INFO: ") );
break;
-
default:
- switch (_IOC_NR(x))
- {
+ switch (_IOC_NR(x)) {
case SOUND_MIXER_VOLUME:
CS_DBGOUT(CS_IOCTL, 4, printk("SOUND_MIXER_VOLUME: ") );
break;
@@ -579,14 +575,11 @@ static void printioctl(unsigned int x)
break;
default:
i = _IOC_NR(x);
- if (i >= SOUND_MIXER_NRDEVICES || !(vidx = mixtable1[i]))
- {
+ if (i >= SOUND_MIXER_NRDEVICES || !(vidx = mixtable1[i])) {
CS_DBGOUT(CS_IOCTL, 4, printk("UNKNOWN IOCTL: 0x%.8x NR=%d ",x,i) );
- }
- else
- {
+ } else {
CS_DBGOUT(CS_IOCTL, 4, printk("SOUND_MIXER_IOCTL AC9x: 0x%.8x NR=%d ",
- x,i) );
+ x,i));
}
break;
}
@@ -601,22 +594,22 @@ static void printioctl(unsigned int x)
static void cs461x_poke(struct cs_card *codec, unsigned long reg, unsigned int val)
{
- writel(val, codec->ba1.idx[(reg >> 16) & 3]+(reg&0xffff));
+ writel(val, codec->ba1.idx[(reg >> 16) & 3] + (reg & 0xffff));
}
static unsigned int cs461x_peek(struct cs_card *codec, unsigned long reg)
{
- return readl(codec->ba1.idx[(reg >> 16) & 3]+(reg&0xffff));
+ return readl(codec->ba1.idx[(reg >> 16) & 3] + (reg & 0xffff));
}
static void cs461x_pokeBA0(struct cs_card *codec, unsigned long reg, unsigned int val)
{
- writel(val, codec->ba0+reg);
+ writel(val, codec->ba0 + reg);
}
static unsigned int cs461x_peekBA0(struct cs_card *codec, unsigned long reg)
{
- return readl(codec->ba0+reg);
+ return readl(codec->ba0 + reg);
}
@@ -625,26 +618,26 @@ static void cs_ac97_set(struct ac97_codec *dev, u8 reg, u16 data);
static struct cs_channel *cs_alloc_pcm_channel(struct cs_card *card)
{
- if(card->channel[1].used==1)
+ if (card->channel[1].used == 1)
return NULL;
- card->channel[1].used=1;
- card->channel[1].num=1;
+ card->channel[1].used = 1;
+ card->channel[1].num = 1;
return &card->channel[1];
}
static struct cs_channel *cs_alloc_rec_pcm_channel(struct cs_card *card)
{
- if(card->channel[0].used==1)
+ if (card->channel[0].used == 1)
return NULL;
- card->channel[0].used=1;
- card->channel[0].num=0;
+ card->channel[0].used = 1;
+ card->channel[0].num = 0;
return &card->channel[0];
}
static void cs_free_pcm_channel(struct cs_card *card, int channel)
{
card->channel[channel].state = NULL;
- card->channel[channel].used=0;
+ card->channel[channel].used = 0;
}
/*
@@ -655,15 +648,15 @@ static void cs_free_pcm_channel(struct cs_card *card, int channel)
*/
static void cs_set_divisor(struct dmabuf *dmabuf)
{
- if(dmabuf->type == CS_TYPE_DAC)
+ if (dmabuf->type == CS_TYPE_DAC)
dmabuf->divisor = 1;
- else if( !(dmabuf->fmt & CS_FMT_STEREO) &&
+ else if (!(dmabuf->fmt & CS_FMT_STEREO) &&
(dmabuf->fmt & CS_FMT_16BIT))
dmabuf->divisor = 2;
- else if( (dmabuf->fmt & CS_FMT_STEREO) &&
+ else if ((dmabuf->fmt & CS_FMT_STEREO) &&
!(dmabuf->fmt & CS_FMT_16BIT))
dmabuf->divisor = 2;
- else if( !(dmabuf->fmt & CS_FMT_STEREO) &&
+ else if (!(dmabuf->fmt & CS_FMT_STEREO) &&
!(dmabuf->fmt & CS_FMT_16BIT))
dmabuf->divisor = 4;
else
@@ -680,13 +673,12 @@ static void cs_set_divisor(struct dmabuf *dmabuf)
*/
static void cs_mute(struct cs_card *card, int state)
{
- struct ac97_codec *dev=card->ac97_codec[0];
+ struct ac97_codec *dev = card->ac97_codec[0];
CS_DBGOUT(CS_FUNCTION, 2, printk(KERN_INFO "cs46xx: cs_mute()+ %s\n",
- (state == CS_TRUE) ? "Muting" : "UnMuting") );
+ (state == CS_TRUE) ? "Muting" : "UnMuting"));
- if(state == CS_TRUE)
- {
+ if (state == CS_TRUE) {
/*
* fix pops when powering up on thinkpads
*/
@@ -703,9 +695,7 @@ static void cs_mute(struct cs_card *card, int state)
cs_ac97_set(dev, (u8)BA0_AC97_HEADPHONE_VOLUME, 0x8000);
cs_ac97_set(dev, (u8)BA0_AC97_MASTER_VOLUME_MONO, 0x8000);
cs_ac97_set(dev, (u8)BA0_AC97_PCM_OUT_VOLUME, 0x8000);
- }
- else
- {
+ } else {
cs_ac97_set(dev, (u8)BA0_AC97_MASTER_VOLUME, card->pm.u32AC97_master_volume);
cs_ac97_set(dev, (u8)BA0_AC97_HEADPHONE_VOLUME, card->pm.u32AC97_headphone_volume);
cs_ac97_set(dev, (u8)BA0_AC97_MASTER_VOLUME_MONO, card->pm.u32AC97_master_volume_mono);
@@ -757,7 +747,6 @@ static unsigned int cs_set_dac_rate(struct cs_state * state, unsigned int rate)
/*
* Fill in the SampleRateConverter control block.
*/
-
spin_lock_irqsave(&state->card->lock, flags);
cs461x_poke(state->card, BA1_PSRC,
((correctionPerSec << 16) & 0xFFFF0000) | (correctionPerGOF & 0xFFFF));
@@ -770,7 +759,7 @@ static unsigned int cs_set_dac_rate(struct cs_state * state, unsigned int rate)
}
/* set recording sample rate */
-static unsigned int cs_set_adc_rate(struct cs_state * state, unsigned int rate)
+static unsigned int cs_set_adc_rate(struct cs_state *state, unsigned int rate)
{
struct dmabuf *dmabuf = &state->dmabuf;
struct cs_card *card = state->card;
@@ -815,7 +804,6 @@ static unsigned int cs_set_adc_rate(struct cs_state * state, unsigned int rate)
* dividend:remainder(ulOther / GOF_PER_SEC)
* initialDelay = dividend(((24 * Fs,in) + Fs,out - 1) / Fs,out)
*/
-
tmp1 = rate << 16;
coeffIncr = tmp1 / 48000;
tmp1 -= coeffIncr * 48000;
@@ -891,7 +879,7 @@ static void cs_play_setup(struct cs_state *state)
CS_DBGOUT(CS_FUNCTION, 2, printk("cs46xx: cs_play_setup()+\n") );
cs461x_poke(card, BA1_PVOL, 0x80008000);
- if(!dmabuf->SGok)
+ if (!dmabuf->SGok)
cs461x_poke(card, BA1_PBA, virt_to_bus(dmabuf->pbuf));
Count = 4;
@@ -899,16 +887,14 @@ static void cs_play_setup(struct cs_state *state)
if ((dmabuf->fmt & CS_FMT_STEREO)) {
playFormat &= ~DMA_RQ_C2_AC_MONO_TO_STEREO;
Count *= 2;
- }
- else
+ } else
playFormat |= DMA_RQ_C2_AC_MONO_TO_STEREO;
if ((dmabuf->fmt & CS_FMT_16BIT)) {
playFormat &= ~(DMA_RQ_C2_AC_8_TO_16_BIT
| DMA_RQ_C2_AC_SIGNED_CONVERT);
Count *= 2;
- }
- else
+ } else
playFormat |= (DMA_RQ_C2_AC_8_TO_16_BIT
| DMA_RQ_C2_AC_SIGNED_CONVERT);
@@ -919,7 +905,6 @@ static void cs_play_setup(struct cs_state *state)
cs461x_poke(card, BA1_PDTC, tmp | --Count);
CS_DBGOUT(CS_FUNCTION, 2, printk("cs46xx: cs_play_setup()-\n") );
-
}
static struct InitStruct
@@ -944,8 +929,7 @@ static void SetCaptureSPValues(struct cs_card *card)
{
unsigned i, offset;
CS_DBGOUT(CS_FUNCTION, 8, printk("cs46xx: SetCaptureSPValues()+\n") );
- for(i=0; i<sizeof(InitArray)/sizeof(struct InitStruct); i++)
- {
+ for (i = 0; i < sizeof(InitArray) / sizeof(struct InitStruct); i++) {
offset = InitArray[i].off*4; /* 8bit to 32bit offset value */
cs461x_poke(card, offset, InitArray[i].val );
}
@@ -957,8 +941,8 @@ static void cs_rec_setup(struct cs_state *state)
{
struct cs_card *card = state->card;
struct dmabuf *dmabuf = &state->dmabuf;
- CS_DBGOUT(CS_FUNCTION, 2, printk("cs46xx: cs_rec_setup()+\n") );
+ CS_DBGOUT(CS_FUNCTION, 2, printk("cs46xx: cs_rec_setup()+\n"));
SetCaptureSPValues(card);
/*
@@ -994,14 +978,11 @@ static inline unsigned cs_get_dma_addr(struct cs_state *state)
/*
* granularity is byte boundary, good part.
*/
- if(dmabuf->enable & DAC_RUNNING)
- {
+ if (dmabuf->enable & DAC_RUNNING)
offset = cs461x_peek(state->card, BA1_PBA);
- }
else /* ADC_RUNNING must be set */
- {
offset = cs461x_peek(state->card, BA1_CBA);
- }
+
CS_DBGOUT(CS_PARMS | CS_FUNCTION, 9,
printk("cs46xx: cs_get_dma_addr() %d\n",offset) );
offset = (u32)bus_to_virt((unsigned long)offset) - (u32)dmabuf->rawbuf;
@@ -1015,8 +996,7 @@ static void resync_dma_ptrs(struct cs_state *state)
struct dmabuf *dmabuf;
CS_DBGOUT(CS_FUNCTION, 2, printk("cs46xx: resync_dma_ptrs()+ \n") );
- if(state)
- {
+ if (state) {
dmabuf = &state->dmabuf;
dmabuf->hwptr=dmabuf->swptr = 0;
dmabuf->pringbuf = 0;
@@ -1149,13 +1129,13 @@ static int alloc_dmabuf(struct cs_state *state)
/*
* check for order within limits, but do not overwrite value.
*/
- if((defaultorder > 1) && (defaultorder < 12))
+ if ((defaultorder > 1) && (defaultorder < 12))
df = defaultorder;
else
df = 2;
for (order = df; order >= DMABUF_MINORDER; order--)
- if ( (rawbuf = (void *) pci_alloc_consistent(
+ if ((rawbuf = (void *)pci_alloc_consistent(
card->pci_dev, PAGE_SIZE << order, &dmabuf->dmaaddr)))
break;
if (!rawbuf) {
@@ -1181,8 +1161,7 @@ static int alloc_dmabuf(struct cs_state *state)
/*
* only allocate the conversion buffer for the ADC
*/
- if(dmabuf->type == CS_TYPE_DAC)
- {
+ if (dmabuf->type == CS_TYPE_DAC) {
dmabuf->tmpbuff = NULL;
dmabuf->buforder_tmpbuff = 0;
return 0;
@@ -1258,8 +1237,7 @@ static int __prog_dmabuf(struct cs_state *state)
/*
* check for CAPTURE and use only non-sg for initial release
*/
- if(dmabuf->type == CS_TYPE_ADC)
- {
+ if (dmabuf->type == CS_TYPE_ADC) {
CS_DBGOUT(CS_FUNCTION, 4, printk("cs46xx: prog_dmabuf() ADC\n"));
/*
* add in non-sg support for capture.
@@ -1313,9 +1291,7 @@ static int __prog_dmabuf(struct cs_state *state)
CS_DBGOUT(CS_FUNCTION, 4, printk("cs46xx: prog_dmabuf()- 0 \n"));
return 0;
- }
- else if (dmabuf->type == CS_TYPE_DAC)
- {
+ } else if (dmabuf->type == CS_TYPE_DAC) {
/*
* Must be DAC
*/
@@ -1337,8 +1313,7 @@ static int __prog_dmabuf(struct cs_state *state)
allocated_pages = 1 << dmabuf->buforder;
allocated_bytes = allocated_pages*PAGE_SIZE;
- if(allocated_pages < 2)
- {
+ if (allocated_pages < 2) {
CS_DBGOUT(CS_FUNCTION, 4, printk(
"cs46xx: prog_dmabuf() Error: allocated_pages too small (%d)\n",
(unsigned)allocated_pages));
@@ -1353,14 +1328,14 @@ static int __prog_dmabuf(struct cs_state *state)
/* Set up S/G variables. */
*ptmp = virt_to_bus(dmabuf->rawbuf);
- *(ptmp+1) = 0x00000008;
- for(tmp1= 1; tmp1 < nSGpages; tmp1++) {
- *(ptmp+2*tmp1) = virt_to_bus( (dmabuf->rawbuf)+4096*tmp1);
- if( tmp1 == nSGpages-1)
+ *(ptmp + 1) = 0x00000008;
+ for (tmp1 = 1; tmp1 < nSGpages; tmp1++) {
+ *(ptmp + 2 * tmp1) = virt_to_bus((dmabuf->rawbuf) + 4096 * tmp1);
+ if (tmp1 == nSGpages - 1)
tmp2 = 0xbfff0000;
else
- tmp2 = 0x80000000+8*(tmp1+1);
- *(ptmp+2*tmp1+1) = tmp2;
+ tmp2 = 0x80000000 + 8 * (tmp1 + 1);
+ *(ptmp + 2 * tmp1 + 1) = tmp2;
}
SGarray[0] = 0x82c0200d;
SGarray[1] = 0xffff0000;
@@ -1368,18 +1343,17 @@ static int __prog_dmabuf(struct cs_state *state)
SGarray[3] = 0x00010600;
SGarray[4] = *(ptmp+2);
SGarray[5] = 0x80000010;
- SGarray[6] = *ptmp;
- SGarray[7] = *(ptmp+2);
- SGarray[8] = (virt_to_bus(dmabuf->pbuf) & 0xffff000) | 0x10;
-
- if (dmabuf->SGok) {
- dmabuf->numfrag = nSGpages;
- dmabuf->fragsize = 4096;
- dmabuf->fragsamples = 4096 >> sample_shift[dmabuf->fmt];
- dmabuf->fragshift = 12;
- dmabuf->dmasize = dmabuf->numfrag*4096;
- }
- else {
+ SGarray[6] = *ptmp;
+ SGarray[7] = *(ptmp+2);
+ SGarray[8] = (virt_to_bus(dmabuf->pbuf) & 0xffff000) | 0x10;
+
+ if (dmabuf->SGok) {
+ dmabuf->numfrag = nSGpages;
+ dmabuf->fragsize = 4096;
+ dmabuf->fragsamples = 4096 >> sample_shift[dmabuf->fmt];
+ dmabuf->fragshift = 12;
+ dmabuf->dmasize = dmabuf->numfrag * 4096;
+ } else {
SGarray[0] = 0xf2c0000f;
SGarray[1] = 0x00000200;
SGarray[2] = 0;
@@ -1391,8 +1365,8 @@ static int __prog_dmabuf(struct cs_state *state)
dmabuf->dmasize = 4096;
dmabuf->fragshift = 11;
}
- for(tmp1 = 0; tmp1 < sizeof(SGarray)/4; tmp1++)
- cs461x_poke( state->card, BA1_PDTC+tmp1*4, SGarray[tmp1]);
+ for (tmp1 = 0; tmp1 < sizeof(SGarray) / 4; tmp1++)
+ cs461x_poke(state->card, BA1_PDTC+tmp1 * 4, SGarray[tmp1]);
memset(dmabuf->rawbuf, (dmabuf->fmt & CS_FMT_16BIT) ? 0 : 0x80,
dmabuf->dmasize);
@@ -1416,9 +1390,7 @@ static int __prog_dmabuf(struct cs_state *state)
CS_DBGOUT(CS_FUNCTION, 4, printk("cs46xx: prog_dmabuf()- \n"));
return 0;
- }
- else
- {
+ } else {
CS_DBGOUT(CS_FUNCTION, 4, printk("cs46xx: prog_dmabuf()- Invalid Type %d\n",
dmabuf->type));
}
@@ -1489,8 +1461,7 @@ static int drain_dac(struct cs_state *state, int nonblock)
}
remove_wait_queue(&dmabuf->wait, &wait);
current->state = TASK_RUNNING;
- if (signal_pending(current))
- {
+ if (signal_pending(current)) {
CS_DBGOUT(CS_FUNCTION, 4, printk("cs46xx: drain_dac()- -ERESTARTSYS\n"));
/*
* set to silence and let that clear the fifos.
@@ -1514,8 +1485,7 @@ static void cs_update_ptr(struct cs_card *card, int wake)
/* error handling and process wake up for ADC */
state = card->states[0];
- if(state)
- {
+ if (state) {
dmabuf = &state->dmabuf;
if (dmabuf->enable & ADC_RUNNING) {
/* update hardware pointer */
@@ -1531,12 +1501,10 @@ static void cs_update_ptr(struct cs_card *card, int wake)
if (dmabuf->count > dmabuf->dmasize)
dmabuf->count = dmabuf->dmasize;
- if(dmabuf->mapped)
- {
+ if (dmabuf->mapped) {
if (wake && dmabuf->count >= (signed)dmabuf->fragsize)
wake_up(&dmabuf->wait);
- } else
- {
+ } else {
if (wake && dmabuf->count > 0)
wake_up(&dmabuf->wait);
}
@@ -1547,8 +1515,7 @@ static void cs_update_ptr(struct cs_card *card, int wake)
* Now the DAC
*/
state = card->states[1];
- if(state)
- {
+ if (state) {
dmabuf = &state->dmabuf;
/* error handling and process wake up for DAC */
if (dmabuf->enable & DAC_RUNNING) {
@@ -1570,7 +1537,7 @@ static void cs_update_ptr(struct cs_card *card, int wake)
* in that, since dmasize is the buffer asked for
* via mmap.
*/
- if( dmabuf->count > dmabuf->dmasize)
+ if (dmabuf->count > dmabuf->dmasize)
dmabuf->count &= dmabuf->dmasize-1;
} else {
dmabuf->count -= diff;
@@ -1578,13 +1545,10 @@ static void cs_update_ptr(struct cs_card *card, int wake)
* backfill with silence and clear out the last
* "diff" number of bytes.
*/
- if(hwptr >= diff)
- {
+ if (hwptr >= diff) {
memset(dmabuf->rawbuf + hwptr - diff,
(dmabuf->fmt & CS_FMT_16BIT) ? 0 : 0x80, diff);
- }
- else
- {
+ } else {
memset(dmabuf->rawbuf,
(dmabuf->fmt & CS_FMT_16BIT) ? 0 : 0x80,
(unsigned)hwptr);
@@ -1602,12 +1566,12 @@ static void cs_update_ptr(struct cs_card *card, int wake)
* buffer underrun or buffer overrun, reset the
* count of bytes written back to 0.
*/
- if(dmabuf->count < 0)
- dmabuf->underrun=1;
+ if (dmabuf->count < 0)
+ dmabuf->underrun = 1;
dmabuf->count = 0;
dmabuf->error++;
}
- if (wake && dmabuf->count < (signed)dmabuf->dmasize/2)
+ if (wake && dmabuf->count < (signed)dmabuf->dmasize / 2)
wake_up(&dmabuf->wait);
}
}
@@ -1661,8 +1625,7 @@ static irqreturn_t cs_interrupt(int irq, void *dev_id, struct pt_regs *regs)
status = cs461x_peekBA0(card, BA0_HISR);
- if ((status & 0x7fffffff) == 0)
- {
+ if ((status & 0x7fffffff) == 0) {
cs461x_pokeBA0(card, BA0_HICR, HICR_CHGM|HICR_IEV);
spin_unlock(&card->lock);
return IRQ_HANDLED; /* Might be IRQ_NONE.. */
@@ -1671,15 +1634,14 @@ static irqreturn_t cs_interrupt(int irq, void *dev_id, struct pt_regs *regs)
/*
* check for playback or capture interrupt only
*/
- if( ((status & HISR_VC0) && playstate && playstate->dmabuf.ready) ||
- (((status & HISR_VC1) && recstate && recstate->dmabuf.ready)) )
- {
+ if (((status & HISR_VC0) && playstate && playstate->dmabuf.ready) ||
+ (((status & HISR_VC1) && recstate && recstate->dmabuf.ready))) {
CS_DBGOUT(CS_INTERRUPT, 8, printk(
"cs46xx: cs_interrupt() interrupt bit(s) set (0x%x)\n",status));
cs_update_ptr(card, CS_TRUE);
}
- if( status & HISR_MIDI )
+ if (status & HISR_MIDI)
cs_handle_midi(card);
/* clear 'em */
@@ -1694,7 +1656,7 @@ static irqreturn_t cs_interrupt(int irq, void *dev_id, struct pt_regs *regs)
static ssize_t cs_midi_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos)
{
- struct cs_card *card = (struct cs_card *)file->private_data;
+ struct cs_card *card = file->private_data;
ssize_t ret;
unsigned long flags;
unsigned ptr;
@@ -1737,7 +1699,7 @@ static ssize_t cs_midi_read(struct file *file, char __user *buffer, size_t count
static ssize_t cs_midi_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
{
- struct cs_card *card = (struct cs_card *)file->private_data;
+ struct cs_card *card = file->private_data;
ssize_t ret;
unsigned long flags;
unsigned ptr;
@@ -1785,7 +1747,7 @@ static ssize_t cs_midi_write(struct file *file, const char __user *buffer, size_
static unsigned int cs_midi_poll(struct file *file, struct poll_table_struct *wait)
{
- struct cs_card *card = (struct cs_card *)file->private_data;
+ struct cs_card *card = file->private_data;
unsigned long flags;
unsigned int mask = 0;
@@ -1810,12 +1772,11 @@ static unsigned int cs_midi_poll(struct file *file, struct poll_table_struct *wa
static int cs_midi_open(struct inode *inode, struct file *file)
{
unsigned int minor = iminor(inode);
- struct cs_card *card=NULL;
+ struct cs_card *card = NULL;
unsigned long flags;
struct list_head *entry;
- list_for_each(entry, &cs46xx_devs)
- {
+ list_for_each(entry, &cs46xx_devs) {
card = list_entry(entry, struct cs_card, list);
if (card->dev_midi == minor)
break;
@@ -1823,8 +1784,7 @@ static int cs_midi_open(struct inode *inode, struct file *file)
if (entry == &cs46xx_devs)
return -ENODEV;
- if (!card)
- {
+ if (!card) {
CS_DBGOUT(CS_FUNCTION | CS_OPEN, 2, printk(KERN_INFO
"cs46xx: cs46xx_midi_open(): Error - unable to find card struct\n"));
return -ENODEV;
@@ -1852,12 +1812,10 @@ static int cs_midi_open(struct inode *inode, struct file *file)
cs461x_pokeBA0(card, BA0_MIDCR, 0x0000000f); /* Enable xmit, rcv. */
cs461x_pokeBA0(card, BA0_HICR, HICR_IEV | HICR_CHGM); /* Enable interrupts */
}
- if (file->f_mode & FMODE_READ) {
+ if (file->f_mode & FMODE_READ)
card->midi.ird = card->midi.iwr = card->midi.icnt = 0;
- }
- if (file->f_mode & FMODE_WRITE) {
+ if (file->f_mode & FMODE_WRITE)
card->midi.ord = card->midi.owr = card->midi.ocnt = 0;
- }
spin_unlock_irqrestore(&card->midi.lock, flags);
card->midi.open_mode |= (file->f_mode & (FMODE_READ | FMODE_WRITE));
mutex_unlock(&card->midi.open_mutex);
@@ -1867,7 +1825,7 @@ static int cs_midi_open(struct inode *inode, struct file *file)
static int cs_midi_release(struct inode *inode, struct file *file)
{
- struct cs_card *card = (struct cs_card *)file->private_data;
+ struct cs_card *card = file->private_data;
DECLARE_WAITQUEUE(wait, current);
unsigned long flags;
unsigned count, tmo;
@@ -1933,11 +1891,10 @@ static /*const*/ struct file_operations cs_midi_fops = {
static void CopySamples(char *dst, char *src, int count, unsigned fmt,
struct dmabuf *dmabuf)
{
-
s32 s32AudioSample;
- s16 *psSrc=(s16 *)src;
- s16 *psDst=(s16 *)dst;
- u8 *pucDst=(u8 *)dst;
+ s16 *psSrc = (s16 *)src;
+ s16 *psDst = (s16 *)dst;
+ u8 *pucDst = (u8 *)dst;
CS_DBGOUT(CS_FUNCTION, 2, printk(KERN_INFO "cs46xx: CopySamples()+ ") );
CS_DBGOUT(CS_WAVE_READ, 8, printk(KERN_INFO
@@ -1947,34 +1904,29 @@ static void CopySamples(char *dst, char *src, int count, unsigned fmt,
/*
* See if the data should be output as 8-bit unsigned stereo.
*/
- if((fmt & CS_FMT_STEREO) && !(fmt & CS_FMT_16BIT))
- {
+ if ((fmt & CS_FMT_STEREO) && !(fmt & CS_FMT_16BIT)) {
/*
* Convert each 16-bit signed stereo sample to 8-bit unsigned
* stereo using rounding.
*/
psSrc = (s16 *)src;
- count = count/2;
- while(count--)
- {
+ count = count / 2;
+ while (count--)
*(pucDst++) = (u8)(((s16)(*psSrc++) + (s16)0x8000) >> 8);
- }
}
/*
* See if the data should be output at 8-bit unsigned mono.
*/
- else if(!(fmt & CS_FMT_STEREO) && !(fmt & CS_FMT_16BIT))
- {
+ else if (!(fmt & CS_FMT_STEREO) && !(fmt & CS_FMT_16BIT)) {
/*
* Convert each 16-bit signed stereo sample to 8-bit unsigned
* mono using averaging and rounding.
*/
psSrc = (s16 *)src;
- count = count/2;
- while(count--)
- {
- s32AudioSample = ((*psSrc)+(*(psSrc + 1)))/2 + (s32)0x80;
- if(s32AudioSample > 0x7fff)
+ count = count / 2;
+ while (count--) {
+ s32AudioSample = ((*psSrc) + (*(psSrc + 1))) / 2 + (s32)0x80;
+ if (s32AudioSample > 0x7fff)
s32AudioSample = 0x7fff;
*(pucDst++) = (u8)(((s16)s32AudioSample + (s16)0x8000) >> 8);
psSrc += 2;
@@ -1983,17 +1935,15 @@ static void CopySamples(char *dst, char *src, int count, unsigned fmt,
/*
* See if the data should be output at 16-bit signed mono.
*/
- else if(!(fmt & CS_FMT_STEREO) && (fmt & CS_FMT_16BIT))
- {
+ else if (!(fmt & CS_FMT_STEREO) && (fmt & CS_FMT_16BIT)) {
/*
* Convert each 16-bit signed stereo sample to 16-bit signed
* mono using averaging.
*/
psSrc = (s16 *)src;
- count = count/2;
- while(count--)
- {
- *(psDst++) = (s16)((*psSrc)+(*(psSrc + 1)))/2;
+ count = count / 2;
+ while (count--) {
+ *(psDst++) = (s16)((*psSrc) + (*(psSrc + 1))) / 2;
psSrc += 2;
}
}
@@ -2020,20 +1970,15 @@ static unsigned cs_copy_to_user(
"cs_copy_to_user()+ fmt=0x%x cnt=%d dest=%p\n",
dmabuf->fmt,(unsigned)cnt,dest) );
- if(cnt > dmabuf->dmasize)
- {
+ if (cnt > dmabuf->dmasize)
cnt = dmabuf->dmasize;
- }
- if(!cnt)
- {
+ if (!cnt) {
*copied = 0;
return 0;
}
- if(dmabuf->divisor != 1)
- {
- if(!dmabuf->tmpbuff)
- {
- *copied = cnt/dmabuf->divisor;
+ if (dmabuf->divisor != 1) {
+ if (!dmabuf->tmpbuff) {
+ *copied = cnt / dmabuf->divisor;
return 0;
}
@@ -2042,17 +1987,16 @@ static unsigned cs_copy_to_user(
src = dmabuf->tmpbuff;
cnt = cnt/dmabuf->divisor;
}
- if (copy_to_user(dest, src, cnt))
- {
+ if (copy_to_user(dest, src, cnt)) {
CS_DBGOUT(CS_FUNCTION, 2, printk(KERN_ERR
"cs46xx: cs_copy_to_user()- fault dest=%p src=%p cnt=%d\n",
- dest,src,cnt) );
+ dest,src,cnt));
*copied = 0;
return -EFAULT;
}
*copied = cnt;
CS_DBGOUT(CS_FUNCTION, 2, printk(KERN_INFO
- "cs46xx: cs_copy_to_user()- copied bytes is %d \n",cnt) );
+ "cs46xx: cs_copy_to_user()- copied bytes is %d \n",cnt));
return 0;
}
@@ -2060,7 +2004,7 @@ static unsigned cs_copy_to_user(
the user's buffer. it is filled by the dma machine and drained by this loop. */
static ssize_t cs_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos)
{
- struct cs_card *card = (struct cs_card *) file->private_data;
+ struct cs_card *card = file->private_data;
struct cs_state *state;
DECLARE_WAITQUEUE(wait, current);
struct dmabuf *dmabuf;
@@ -2068,12 +2012,12 @@ static ssize_t cs_read(struct file *file, char __user *buffer, size_t count, lof
unsigned long flags;
unsigned swptr;
int cnt;
- unsigned copied=0;
+ unsigned copied = 0;
CS_DBGOUT(CS_WAVE_READ | CS_FUNCTION, 4,
printk("cs46xx: cs_read()+ %zd\n",count) );
- state = (struct cs_state *)card->states[0];
- if(!state)
+ state = card->states[0];
+ if (!state)
return -ENODEV;
dmabuf = &state->dmabuf;
@@ -2088,11 +2032,11 @@ static ssize_t cs_read(struct file *file, char __user *buffer, size_t count, lof
add_wait_queue(&state->dmabuf.wait, &wait);
while (count > 0) {
- while(!(card->pm.flags & CS46XX_PM_IDLE))
- {
+ while (!(card->pm.flags & CS46XX_PM_IDLE)) {
schedule();
if (signal_pending(current)) {
- if(!ret) ret = -ERESTARTSYS;
+ if (!ret)
+ ret = -ERESTARTSYS;
goto out;
}
}
@@ -2112,19 +2056,20 @@ static ssize_t cs_read(struct file *file, char __user *buffer, size_t count, lof
recorded */
start_adc(state);
if (file->f_flags & O_NONBLOCK) {
- if (!ret) ret = -EAGAIN;
+ if (!ret)
+ ret = -EAGAIN;
goto out;
}
mutex_unlock(&state->sem);
schedule();
if (signal_pending(current)) {
- if(!ret) ret = -ERESTARTSYS;
+ if (!ret)
+ ret = -ERESTARTSYS;
goto out;
}
mutex_lock(&state->sem);
- if (dmabuf->mapped)
- {
- if(!ret)
+ if (dmabuf->mapped) {
+ if (!ret)
ret = -ENXIO;
goto out;
}
@@ -2135,12 +2080,12 @@ static ssize_t cs_read(struct file *file, char __user *buffer, size_t count, lof
"_read() copy_to cnt=%d count=%zd ", cnt,count) );
CS_DBGOUT(CS_WAVE_READ, 8, printk(KERN_INFO
" .dmasize=%d .count=%d buffer=%p ret=%zd\n",
- dmabuf->dmasize,dmabuf->count,buffer,ret) );
+ dmabuf->dmasize,dmabuf->count,buffer,ret));
if (cs_copy_to_user(state, buffer,
- (char *)dmabuf->rawbuf + swptr, cnt, &copied))
- {
- if (!ret) ret = -EFAULT;
+ (char *)dmabuf->rawbuf + swptr, cnt, &copied)) {
+ if (!ret)
+ ret = -EFAULT;
goto out;
}
swptr = (swptr + cnt) % dmabuf->dmasize;
@@ -2167,7 +2112,7 @@ out2:
the soundcard. it is drained by the dma machine and filled by this loop. */
static ssize_t cs_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
{
- struct cs_card *card = (struct cs_card *) file->private_data;
+ struct cs_card *card = file->private_data;
struct cs_state *state;
DECLARE_WAITQUEUE(wait, current);
struct dmabuf *dmabuf;
@@ -2178,16 +2123,15 @@ static ssize_t cs_write(struct file *file, const char __user *buffer, size_t cou
CS_DBGOUT(CS_WAVE_WRITE | CS_FUNCTION, 4,
printk("cs46xx: cs_write called, count = %zd\n", count) );
- state = (struct cs_state *)card->states[1];
- if(!state)
+ state = card->states[1];
+ if (!state)
return -ENODEV;
if (!access_ok(VERIFY_READ, buffer, count))
return -EFAULT;
dmabuf = &state->dmabuf;
mutex_lock(&state->sem);
- if (dmabuf->mapped)
- {
+ if (dmabuf->mapped) {
ret = -ENXIO;
goto out;
}
@@ -2201,11 +2145,11 @@ static ssize_t cs_write(struct file *file, const char __user *buffer, size_t cou
* check for PM events and underrun/overrun in the loop.
*/
while (count > 0) {
- while(!(card->pm.flags & CS46XX_PM_IDLE))
- {
+ while (!(card->pm.flags & CS46XX_PM_IDLE)) {
schedule();
if (signal_pending(current)) {
- if(!ret) ret = -ERESTARTSYS;
+ if (!ret)
+ ret = -ERESTARTSYS;
goto out;
}
}
@@ -2216,8 +2160,7 @@ static ssize_t cs_write(struct file *file, const char __user *buffer, size_t cou
dmabuf->count = 0;
dmabuf->swptr = dmabuf->hwptr;
}
- if (dmabuf->underrun)
- {
+ if (dmabuf->underrun) {
dmabuf->underrun = 0;
dmabuf->hwptr = cs_get_dma_addr(state);
dmabuf->swptr = dmabuf->hwptr;
@@ -2238,34 +2181,35 @@ static ssize_t cs_write(struct file *file, const char __user *buffer, size_t cou
played */
start_dac(state);
if (file->f_flags & O_NONBLOCK) {
- if (!ret) ret = -EAGAIN;
+ if (!ret)
+ ret = -EAGAIN;
goto out;
}
mutex_unlock(&state->sem);
schedule();
if (signal_pending(current)) {
- if(!ret) ret = -ERESTARTSYS;
+ if (!ret)
+ ret = -ERESTARTSYS;
goto out;
}
mutex_lock(&state->sem);
- if (dmabuf->mapped)
- {
- if(!ret)
+ if (dmabuf->mapped) {
+ if (!ret)
ret = -ENXIO;
goto out;
}
continue;
}
if (copy_from_user(dmabuf->rawbuf + swptr, buffer, cnt)) {
- if (!ret) ret = -EFAULT;
+ if (!ret)
+ ret = -EFAULT;
goto out;
}
spin_lock_irqsave(&state->card->lock, flags);
swptr = (swptr + cnt) % dmabuf->dmasize;
dmabuf->swptr = swptr;
dmabuf->count += cnt;
- if(dmabuf->count > dmabuf->dmasize)
- {
+ if (dmabuf->count > dmabuf->dmasize) {
CS_DBGOUT(CS_WAVE_WRITE | CS_ERROR, 2, printk(
"cs46xx: cs_write() d->count > dmasize - resetting\n"));
dmabuf->count = dmabuf->dmasize;
@@ -2284,38 +2228,32 @@ out:
set_current_state(TASK_RUNNING);
CS_DBGOUT(CS_WAVE_WRITE | CS_FUNCTION, 2,
- printk("cs46xx: cs_write()- ret=%zd\n", ret) );
+ printk("cs46xx: cs_write()- ret=%zd\n", ret));
return ret;
}
static unsigned int cs_poll(struct file *file, struct poll_table_struct *wait)
{
- struct cs_card *card = (struct cs_card *)file->private_data;
+ struct cs_card *card = file->private_data;
struct dmabuf *dmabuf;
struct cs_state *state;
-
unsigned long flags;
unsigned int mask = 0;
CS_DBGOUT(CS_FUNCTION, 2, printk("cs46xx: cs_poll()+ \n"));
- if (!(file->f_mode & (FMODE_WRITE | FMODE_READ)))
- {
+ if (!(file->f_mode & (FMODE_WRITE | FMODE_READ))) {
return -EINVAL;
}
- if (file->f_mode & FMODE_WRITE)
- {
+ if (file->f_mode & FMODE_WRITE) {
state = card->states[1];
- if(state)
- {
+ if (state) {
dmabuf = &state->dmabuf;
poll_wait(file, &dmabuf->wait, wait);
}
}
- if (file->f_mode & FMODE_READ)
- {
+ if (file->f_mode & FMODE_READ) {
state = card->states[0];
- if(state)
- {
+ if (state) {
dmabuf = &state->dmabuf;
poll_wait(file, &dmabuf->wait, wait);
}
@@ -2325,8 +2263,7 @@ static unsigned int cs_poll(struct file *file, struct poll_table_struct *wait)
cs_update_ptr(card, CS_FALSE);
if (file->f_mode & FMODE_READ) {
state = card->states[0];
- if(state)
- {
+ if (state) {
dmabuf = &state->dmabuf;
if (dmabuf->count >= (signed)dmabuf->fragsize)
mask |= POLLIN | POLLRDNORM;
@@ -2334,8 +2271,7 @@ static unsigned int cs_poll(struct file *file, struct poll_table_struct *wait)
}
if (file->f_mode & FMODE_WRITE) {
state = card->states[1];
- if(state)
- {
+ if (state) {
dmabuf = &state->dmabuf;
if (dmabuf->mapped) {
if (dmabuf->count >= (signed)dmabuf->fragsize)
@@ -2364,7 +2300,7 @@ static unsigned int cs_poll(struct file *file, struct poll_table_struct *wait)
static int cs_mmap(struct file *file, struct vm_area_struct *vma)
{
- struct cs_card *card = (struct cs_card *)file->private_data;
+ struct cs_card *card = file->private_data;
struct cs_state *state;
struct dmabuf *dmabuf;
int ret = 0;
@@ -2376,8 +2312,7 @@ static int cs_mmap(struct file *file, struct vm_area_struct *vma)
if (vma->vm_flags & VM_WRITE) {
state = card->states[1];
- if(state)
- {
+ if (state) {
CS_DBGOUT(CS_OPEN, 2, printk(
"cs46xx: cs_mmap() VM_WRITE - state TRUE prog_dmabuf DAC\n") );
if ((ret = prog_dmabuf(state)) != 0)
@@ -2385,8 +2320,7 @@ static int cs_mmap(struct file *file, struct vm_area_struct *vma)
}
} else if (vma->vm_flags & VM_READ) {
state = card->states[0];
- if(state)
- {
+ if (state) {
CS_DBGOUT(CS_OPEN, 2, printk(
"cs46xx: cs_mmap() VM_READ - state TRUE prog_dmabuf ADC\n") );
if ((ret = prog_dmabuf(state)) != 0)
@@ -2414,8 +2348,7 @@ static int cs_mmap(struct file *file, struct vm_area_struct *vma)
mutex_lock(&state->sem);
dmabuf = &state->dmabuf;
- if (cs4x_pgoff(vma) != 0)
- {
+ if (cs4x_pgoff(vma) != 0) {
ret = -EINVAL;
goto out;
}
@@ -2423,15 +2356,13 @@ static int cs_mmap(struct file *file, struct vm_area_struct *vma)
CS_DBGOUT(CS_PARMS, 2, printk("cs46xx: cs_mmap(): size=%d\n",(unsigned)size) );
- if (size > (PAGE_SIZE << dmabuf->buforder))
- {
+ if (size > (PAGE_SIZE << dmabuf->buforder)) {
ret = -EINVAL;
goto out;
}
if (remap_pfn_range(vma, vma->vm_start,
virt_to_phys(dmabuf->rawbuf) >> PAGE_SHIFT,
- size, vma->vm_page_prot))
- {
+ size, vma->vm_page_prot)) {
ret = -EAGAIN;
goto out;
}
@@ -2445,25 +2376,24 @@ out:
static int cs_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
{
- struct cs_card *card = (struct cs_card *)file->private_data;
+ struct cs_card *card = file->private_data;
struct cs_state *state;
- struct dmabuf *dmabuf=NULL;
+ struct dmabuf *dmabuf = NULL;
unsigned long flags;
audio_buf_info abinfo;
count_info cinfo;
- int val, valsave, mapped, ret;
+ int val, valsave, ret;
+ int mapped = 0;
void __user *argp = (void __user *)arg;
int __user *p = argp;
- state = (struct cs_state *)card->states[0];
- if(state)
- {
+ state = card->states[0];
+ if (state) {
dmabuf = &state->dmabuf;
mapped = (file->f_mode & FMODE_READ) && dmabuf->mapped;
}
- state = (struct cs_state *)card->states[1];
- if(state)
- {
+ state = card->states[1];
+ if (state) {
dmabuf = &state->dmabuf;
mapped |= (file->f_mode & FMODE_WRITE) && dmabuf->mapped;
}
@@ -2472,17 +2402,14 @@ static int cs_ioctl(struct inode *inode, struct file *file, unsigned int cmd, un
printioctl(cmd);
#endif
- switch (cmd)
- {
+ switch (cmd) {
case OSS_GETVERSION:
return put_user(SOUND_VERSION, p);
-
case SNDCTL_DSP_RESET:
/* FIXME: spin_lock ? */
if (file->f_mode & FMODE_WRITE) {
- state = (struct cs_state *)card->states[1];
- if(state)
- {
+ state = card->states[1];
+ if (state) {
dmabuf = &state->dmabuf;
stop_dac(state);
synchronize_irq(card->irq);
@@ -2495,9 +2422,8 @@ static int cs_ioctl(struct inode *inode, struct file *file, unsigned int cmd, un
}
}
if (file->f_mode & FMODE_READ) {
- state = (struct cs_state *)card->states[0];
- if(state)
- {
+ state = card->states[0];
+ if (state) {
dmabuf = &state->dmabuf;
stop_adc(state);
synchronize_irq(card->irq);
@@ -2511,20 +2437,17 @@ static int cs_ioctl(struct inode *inode, struct file *file, unsigned int cmd, un
}
CS_DBGOUT(CS_IOCTL, 2, printk("cs46xx: DSP_RESET()-\n") );
return 0;
-
case SNDCTL_DSP_SYNC:
if (file->f_mode & FMODE_WRITE)
return drain_dac(state, file->f_flags & O_NONBLOCK);
return 0;
-
case SNDCTL_DSP_SPEED: /* set sample rate */
if (get_user(val, p))
return -EFAULT;
if (val >= 0) {
if (file->f_mode & FMODE_READ) {
- state = (struct cs_state *)card->states[0];
- if(state)
- {
+ state = card->states[0];
+ if (state) {
dmabuf = &state->dmabuf;
stop_adc(state);
dmabuf->ready = 0;
@@ -2534,9 +2457,8 @@ static int cs_ioctl(struct inode *inode, struct file *file, unsigned int cmd, un
}
}
if (file->f_mode & FMODE_WRITE) {
- state = (struct cs_state *)card->states[1];
- if(state)
- {
+ state = card->states[1];
+ if (state) {
dmabuf = &state->dmabuf;
stop_dac(state);
dmabuf->ready = 0;
@@ -2553,19 +2475,17 @@ static int cs_ioctl(struct inode *inode, struct file *file, unsigned int cmd, un
return put_user(dmabuf->rate, p);
}
return put_user(0, p);
-
case SNDCTL_DSP_STEREO: /* set stereo or mono channel */
if (get_user(val, p))
return -EFAULT;
if (file->f_mode & FMODE_WRITE) {
- state = (struct cs_state *)card->states[1];
- if(state)
- {
+ state = card->states[1];
+ if (state) {
dmabuf = &state->dmabuf;
stop_dac(state);
dmabuf->ready = 0;
dmabuf->SGok = 0;
- if(val)
+ if (val)
dmabuf->fmt |= CS_FMT_STEREO;
else
dmabuf->fmt &= ~CS_FMT_STEREO;
@@ -2577,14 +2497,13 @@ static int cs_ioctl(struct inode *inode, struct file *file, unsigned int cmd, un
}
}
if (file->f_mode & FMODE_READ) {
- state = (struct cs_state *)card->states[0];
- if(state)
- {
+ state = card->states[0];
+ if (state) {
dmabuf = &state->dmabuf;
stop_adc(state);
dmabuf->ready = 0;
dmabuf->SGok = 0;
- if(val)
+ if (val)
dmabuf->fmt |= CS_FMT_STEREO;
else
dmabuf->fmt &= ~CS_FMT_STEREO;
@@ -2596,12 +2515,10 @@ static int cs_ioctl(struct inode *inode, struct file *file, unsigned int cmd, un
}
}
return 0;
-
case SNDCTL_DSP_GETBLKSIZE:
if (file->f_mode & FMODE_WRITE) {
- state = (struct cs_state *)card->states[1];
- if(state)
- {
+ state = card->states[1];
+ if (state) {
dmabuf = &state->dmabuf;
if ((val = prog_dmabuf(state)))
return val;
@@ -2609,9 +2526,8 @@ static int cs_ioctl(struct inode *inode, struct file *file, unsigned int cmd, un
}
}
if (file->f_mode & FMODE_READ) {
- state = (struct cs_state *)card->states[0];
- if(state)
- {
+ state = card->states[0];
+ if (state) {
dmabuf = &state->dmabuf;
if ((val = prog_dmabuf(state)))
return val;
@@ -2620,10 +2536,8 @@ static int cs_ioctl(struct inode *inode, struct file *file, unsigned int cmd, un
}
}
return put_user(0, p);
-
case SNDCTL_DSP_GETFMTS: /* Returns a mask of supported sample format*/
return put_user(AFMT_S16_LE | AFMT_U8, p);
-
case SNDCTL_DSP_SETFMT: /* Select sample format */
if (get_user(val, p))
return -EFAULT;
@@ -2635,88 +2549,75 @@ static int cs_ioctl(struct inode *inode, struct file *file, unsigned int cmd, un
val == AFMT_U8 ? "8Bit Unsigned" : "") );
valsave = val;
if (val != AFMT_QUERY) {
- if(val==AFMT_S16_LE || val==AFMT_U8)
- {
+ if (val==AFMT_S16_LE || val==AFMT_U8) {
if (file->f_mode & FMODE_WRITE) {
- state = (struct cs_state *)card->states[1];
- if(state)
- {
+ state = card->states[1];
+ if (state) {
dmabuf = &state->dmabuf;
stop_dac(state);
dmabuf->ready = 0;
dmabuf->SGok = 0;
- if(val==AFMT_S16_LE)
+ if (val == AFMT_S16_LE)
dmabuf->fmt |= CS_FMT_16BIT;
else
dmabuf->fmt &= ~CS_FMT_16BIT;
cs_set_divisor(dmabuf);
- if((ret = prog_dmabuf(state)))
+ if ((ret = prog_dmabuf(state)))
return ret;
}
}
if (file->f_mode & FMODE_READ) {
val = valsave;
- state = (struct cs_state *)card->states[0];
- if(state)
- {
+ state = card->states[0];
+ if (state) {
dmabuf = &state->dmabuf;
stop_adc(state);
dmabuf->ready = 0;
dmabuf->SGok = 0;
- if(val==AFMT_S16_LE)
+ if (val == AFMT_S16_LE)
dmabuf->fmt |= CS_FMT_16BIT;
else
dmabuf->fmt &= ~CS_FMT_16BIT;
cs_set_divisor(dmabuf);
- if((ret = prog_dmabuf(state)))
+ if ((ret = prog_dmabuf(state)))
return ret;
}
}
- }
- else
- {
+ } else {
CS_DBGOUT(CS_IOCTL | CS_ERROR, 2, printk(
"cs46xx: DSP_SETFMT() Unsupported format (0x%x)\n",
valsave) );
}
- }
- else
- {
- if(file->f_mode & FMODE_WRITE)
- {
- state = (struct cs_state *)card->states[1];
- if(state)
+ } else {
+ if (file->f_mode & FMODE_WRITE) {
+ state = card->states[1];
+ if (state)
dmabuf = &state->dmabuf;
- }
- else if(file->f_mode & FMODE_READ)
- {
- state = (struct cs_state *)card->states[0];
- if(state)
+ } else if (file->f_mode & FMODE_READ) {
+ state = card->states[0];
+ if (state)
dmabuf = &state->dmabuf;
}
}
- if(dmabuf)
- {
- if(dmabuf->fmt & CS_FMT_16BIT)
+ if (dmabuf) {
+ if (dmabuf->fmt & CS_FMT_16BIT)
return put_user(AFMT_S16_LE, p);
else
return put_user(AFMT_U8, p);
}
return put_user(0, p);
-
case SNDCTL_DSP_CHANNELS:
if (get_user(val, p))
return -EFAULT;
if (val != 0) {
if (file->f_mode & FMODE_WRITE) {
- state = (struct cs_state *)card->states[1];
- if(state)
- {
+ state = card->states[1];
+ if (state) {
dmabuf = &state->dmabuf;
stop_dac(state);
dmabuf->ready = 0;
dmabuf->SGok = 0;
- if(val>1)
+ if (val > 1)
dmabuf->fmt |= CS_FMT_STEREO;
else
dmabuf->fmt &= ~CS_FMT_STEREO;
@@ -2726,14 +2627,13 @@ static int cs_ioctl(struct inode *inode, struct file *file, unsigned int cmd, un
}
}
if (file->f_mode & FMODE_READ) {
- state = (struct cs_state *)card->states[0];
- if(state)
- {
+ state = card->states[0];
+ if (state) {
dmabuf = &state->dmabuf;
stop_adc(state);
dmabuf->ready = 0;
dmabuf->SGok = 0;
- if(val>1)
+ if (val > 1)
dmabuf->fmt |= CS_FMT_STEREO;
else
dmabuf->fmt &= ~CS_FMT_STEREO;
@@ -2745,19 +2645,16 @@ static int cs_ioctl(struct inode *inode, struct file *file, unsigned int cmd, un
}
return put_user((dmabuf->fmt & CS_FMT_STEREO) ? 2 : 1,
p);
-
case SNDCTL_DSP_POST:
/*
* There will be a longer than normal pause in the data.
* so... do nothing, because there is nothing that we can do.
*/
return 0;
-
case SNDCTL_DSP_SUBDIVIDE:
if (file->f_mode & FMODE_WRITE) {
- state = (struct cs_state *)card->states[1];
- if(state)
- {
+ state = card->states[1];
+ if (state) {
dmabuf = &state->dmabuf;
if (dmabuf->subdivision)
return -EINVAL;
@@ -2769,9 +2666,8 @@ static int cs_ioctl(struct inode *inode, struct file *file, unsigned int cmd, un
}
}
if (file->f_mode & FMODE_READ) {
- state = (struct cs_state *)card->states[0];
- if(state)
- {
+ state = card->states[0];
+ if (state) {
dmabuf = &state->dmabuf;
if (dmabuf->subdivision)
return -EINVAL;
@@ -2783,37 +2679,31 @@ static int cs_ioctl(struct inode *inode, struct file *file, unsigned int cmd, un
}
}
return 0;
-
case SNDCTL_DSP_SETFRAGMENT:
if (get_user(val, p))
return -EFAULT;
-
if (file->f_mode & FMODE_WRITE) {
- state = (struct cs_state *)card->states[1];
- if(state)
- {
+ state = card->states[1];
+ if (state) {
dmabuf = &state->dmabuf;
dmabuf->ossfragshift = val & 0xffff;
dmabuf->ossmaxfrags = (val >> 16) & 0xffff;
}
}
if (file->f_mode & FMODE_READ) {
- state = (struct cs_state *)card->states[0];
- if(state)
- {
+ state = card->states[0];
+ if (state) {
dmabuf = &state->dmabuf;
dmabuf->ossfragshift = val & 0xffff;
dmabuf->ossmaxfrags = (val >> 16) & 0xffff;
}
}
return 0;
-
case SNDCTL_DSP_GETOSPACE:
if (!(file->f_mode & FMODE_WRITE))
return -EINVAL;
- state = (struct cs_state *)card->states[1];
- if(state)
- {
+ state = card->states[1];
+ if (state) {
dmabuf = &state->dmabuf;
spin_lock_irqsave(&state->card->lock, flags);
cs_update_ptr(card, CS_TRUE);
@@ -2832,13 +2722,11 @@ static int cs_ioctl(struct inode *inode, struct file *file, unsigned int cmd, un
return copy_to_user(argp, &abinfo, sizeof(abinfo)) ? -EFAULT : 0;
}
return -ENODEV;
-
case SNDCTL_DSP_GETISPACE:
if (!(file->f_mode & FMODE_READ))
return -EINVAL;
- state = (struct cs_state *)card->states[0];
- if(state)
- {
+ state = card->states[0];
+ if (state) {
dmabuf = &state->dmabuf;
spin_lock_irqsave(&state->card->lock, flags);
cs_update_ptr(card, CS_TRUE);
@@ -2850,48 +2738,39 @@ static int cs_ioctl(struct inode *inode, struct file *file, unsigned int cmd, un
return copy_to_user(argp, &abinfo, sizeof(abinfo)) ? -EFAULT : 0;
}
return -ENODEV;
-
case SNDCTL_DSP_NONBLOCK:
file->f_flags |= O_NONBLOCK;
return 0;
-
case SNDCTL_DSP_GETCAPS:
return put_user(DSP_CAP_REALTIME|DSP_CAP_TRIGGER|DSP_CAP_MMAP,
p);
-
case SNDCTL_DSP_GETTRIGGER:
val = 0;
CS_DBGOUT(CS_IOCTL, 2, printk("cs46xx: DSP_GETTRIGGER()+\n") );
- if (file->f_mode & FMODE_WRITE)
- {
- state = (struct cs_state *)card->states[1];
- if(state)
- {
+ if (file->f_mode & FMODE_WRITE) {
+ state = card->states[1];
+ if (state) {
dmabuf = &state->dmabuf;
- if(dmabuf->enable & DAC_RUNNING)
+ if (dmabuf->enable & DAC_RUNNING)
val |= PCM_ENABLE_INPUT;
}
}
- if (file->f_mode & FMODE_READ)
- {
- if(state)
- {
- state = (struct cs_state *)card->states[0];
+ if (file->f_mode & FMODE_READ) {
+ if (state) {
+ state = card->states[0];
dmabuf = &state->dmabuf;
- if(dmabuf->enable & ADC_RUNNING)
+ if (dmabuf->enable & ADC_RUNNING)
val |= PCM_ENABLE_OUTPUT;
}
}
CS_DBGOUT(CS_IOCTL, 2, printk("cs46xx: DSP_GETTRIGGER()- val=0x%x\n",val) );
return put_user(val, p);
-
case SNDCTL_DSP_SETTRIGGER:
if (get_user(val, p))
return -EFAULT;
if (file->f_mode & FMODE_READ) {
- state = (struct cs_state *)card->states[0];
- if(state)
- {
+ state = card->states[0];
+ if (state) {
dmabuf = &state->dmabuf;
if (val & PCM_ENABLE_INPUT) {
if (!dmabuf->ready && (ret = prog_dmabuf(state)))
@@ -2902,9 +2781,8 @@ static int cs_ioctl(struct inode *inode, struct file *file, unsigned int cmd, un
}
}
if (file->f_mode & FMODE_WRITE) {
- state = (struct cs_state *)card->states[1];
- if(state)
- {
+ state = card->states[1];
+ if (state) {
dmabuf = &state->dmabuf;
if (val & PCM_ENABLE_OUTPUT) {
if (!dmabuf->ready && (ret = prog_dmabuf(state)))
@@ -2915,13 +2793,11 @@ static int cs_ioctl(struct inode *inode, struct file *file, unsigned int cmd, un
}
}
return 0;
-
case SNDCTL_DSP_GETIPTR:
if (!(file->f_mode & FMODE_READ))
return -EINVAL;
- state = (struct cs_state *)card->states[0];
- if(state)
- {
+ state = card->states[0];
+ if (state) {
dmabuf = &state->dmabuf;
spin_lock_irqsave(&state->card->lock, flags);
cs_update_ptr(card, CS_TRUE);
@@ -2934,28 +2810,23 @@ static int cs_ioctl(struct inode *inode, struct file *file, unsigned int cmd, un
return 0;
}
return -ENODEV;
-
case SNDCTL_DSP_GETOPTR:
if (!(file->f_mode & FMODE_WRITE))
return -EINVAL;
- state = (struct cs_state *)card->states[1];
- if(state)
- {
+ state = card->states[1];
+ if (state) {
dmabuf = &state->dmabuf;
spin_lock_irqsave(&state->card->lock, flags);
cs_update_ptr(card, CS_TRUE);
cinfo.bytes = dmabuf->total_bytes;
- if (dmabuf->mapped)
- {
+ if (dmabuf->mapped) {
cinfo.blocks = (cinfo.bytes >> dmabuf->fragshift)
- dmabuf->blocks;
CS_DBGOUT(CS_PARMS, 8,
printk("total_bytes=%d blocks=%d dmabuf->blocks=%d\n",
cinfo.bytes,cinfo.blocks,dmabuf->blocks) );
dmabuf->blocks = cinfo.bytes >> dmabuf->fragshift;
- }
- else
- {
+ } else {
cinfo.blocks = dmabuf->count >> dmabuf->fragshift;
}
cinfo.ptr = dmabuf->hwptr;
@@ -2969,66 +2840,54 @@ static int cs_ioctl(struct inode *inode, struct file *file, unsigned int cmd, un
return 0;
}
return -ENODEV;
-
case SNDCTL_DSP_SETDUPLEX:
return 0;
-
case SNDCTL_DSP_GETODELAY:
if (!(file->f_mode & FMODE_WRITE))
return -EINVAL;
- state = (struct cs_state *)card->states[1];
- if(state)
- {
+ state = card->states[1];
+ if (state) {
dmabuf = &state->dmabuf;
spin_lock_irqsave(&state->card->lock, flags);
cs_update_ptr(card, CS_TRUE);
val = dmabuf->count;
spin_unlock_irqrestore(&state->card->lock, flags);
- }
- else
+ } else
val = 0;
return put_user(val, p);
-
case SOUND_PCM_READ_RATE:
- if(file->f_mode & FMODE_READ)
- state = (struct cs_state *)card->states[0];
+ if (file->f_mode & FMODE_READ)
+ state = card->states[0];
else
- state = (struct cs_state *)card->states[1];
- if(state)
- {
+ state = card->states[1];
+ if (state) {
dmabuf = &state->dmabuf;
return put_user(dmabuf->rate, p);
}
return put_user(0, p);
-
-
case SOUND_PCM_READ_CHANNELS:
- if(file->f_mode & FMODE_READ)
- state = (struct cs_state *)card->states[0];
+ if (file->f_mode & FMODE_READ)
+ state = card->states[0];
else
- state = (struct cs_state *)card->states[1];
- if(state)
- {
+ state = card->states[1];
+ if (state) {
dmabuf = &state->dmabuf;
return put_user((dmabuf->fmt & CS_FMT_STEREO) ? 2 : 1,
p);
}
return put_user(0, p);
-
case SOUND_PCM_READ_BITS:
- if(file->f_mode & FMODE_READ)
- state = (struct cs_state *)card->states[0];
+ if (file->f_mode & FMODE_READ)
+ state = card->states[0];
else
- state = (struct cs_state *)card->states[1];
- if(state)
- {
+ state = card->states[1];
+ if (state) {
dmabuf = &state->dmabuf;
return put_user((dmabuf->fmt & CS_FMT_16BIT) ?
AFMT_S16_LE : AFMT_U8, p);
}
return put_user(0, p);
-
case SNDCTL_DSP_MAPINBUF:
case SNDCTL_DSP_MAPOUTBUF:
case SNDCTL_DSP_SETSYNCRO:
@@ -3057,18 +2916,15 @@ static void amp_voyetra(struct cs_card *card, int change)
/* Manage the EAPD bit on the Crystal 4297
and the Analog AD1885 */
- int old=card->amplifier;
+ int old = card->amplifier;
card->amplifier+=change;
- if(card->amplifier && !old)
- {
+ if (card->amplifier && !old) {
/* Turn the EAPD amp on */
cs_ac97_set(card->ac97_codec[0], AC97_POWER_CONTROL,
cs_ac97_get(card->ac97_codec[0], AC97_POWER_CONTROL) |
0x8000);
- }
- else if(old && !card->amplifier)
- {
+ } else if(old && !card->amplifier) {
/* Turn the EAPD amp off */
cs_ac97_set(card->ac97_codec[0], AC97_POWER_CONTROL,
cs_ac97_get(card->ac97_codec[0], AC97_POWER_CONTROL) &
@@ -3083,25 +2939,21 @@ static void amp_voyetra(struct cs_card *card, int change)
static void amp_hercules(struct cs_card *card, int change)
{
- int old=card->amplifier;
- if(!card)
- {
+ int old = card->amplifier;
+ if (!card) {
CS_DBGOUT(CS_ERROR, 2, printk(KERN_INFO
"cs46xx: amp_hercules() called before initialized.\n"));
return;
}
card->amplifier+=change;
- if( (card->amplifier && !old) && !(hercules_egpio_disable))
- {
+ if ((card->amplifier && !old) && !(hercules_egpio_disable)) {
CS_DBGOUT(CS_PARMS, 4, printk(KERN_INFO
"cs46xx: amp_hercules() external amp enabled\n"));
cs461x_pokeBA0(card, BA0_EGPIODR,
EGPIODR_GPOE2); /* enable EGPIO2 output */
cs461x_pokeBA0(card, BA0_EGPIOPTR,
EGPIOPTR_GPPT2); /* open-drain on output */
- }
- else if(old && !card->amplifier)
- {
+ } else if (old && !card->amplifier) {
CS_DBGOUT(CS_PARMS, 4, printk(KERN_INFO
"cs46xx: amp_hercules() external amp disabled\n"));
cs461x_pokeBA0(card, BA0_EGPIODR, 0); /* disable */
@@ -3124,31 +2976,28 @@ static void clkrun_hack(struct cs_card *card, int change)
u16 control;
u8 pp;
unsigned long port;
- int old=card->active;
+ int old = card->active;
card->active+=change;
acpi_dev = pci_find_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3, NULL);
- if(acpi_dev == NULL)
+ if (acpi_dev == NULL)
return; /* Not a thinkpad thats for sure */
/* Find the control port */
pci_read_config_byte(acpi_dev, 0x41, &pp);
- port=pp<<8;
+ port = pp << 8;
/* Read ACPI port */
- control=inw(port+0x10);
+ control = inw(port + 0x10);
/* Flip CLKRUN off while running */
- if(!card->active && old)
- {
+ if (!card->active && old) {
CS_DBGOUT(CS_PARMS , 9, printk( KERN_INFO
"cs46xx: clkrun() enable clkrun - change=%d active=%d\n",
change,card->active));
outw(control|0x2000, port+0x10);
- }
- else
- {
+ } else {
/*
* sometimes on a resume the bit is set, so always reset the bit.
*/
@@ -3162,20 +3011,19 @@ static void clkrun_hack(struct cs_card *card, int change)
static int cs_open(struct inode *inode, struct file *file)
{
- struct cs_card *card = (struct cs_card *)file->private_data;
+ struct cs_card *card = file->private_data;
struct cs_state *state = NULL;
struct dmabuf *dmabuf = NULL;
struct list_head *entry;
unsigned int minor = iminor(inode);
- int ret=0;
+ int ret = 0;
unsigned int tmp;
CS_DBGOUT(CS_OPEN | CS_FUNCTION, 2, printk("cs46xx: cs_open()+ file=%p %s %s\n",
file, file->f_mode & FMODE_WRITE ? "FMODE_WRITE" : "",
file->f_mode & FMODE_READ ? "FMODE_READ" : "") );
- list_for_each(entry, &cs46xx_devs)
- {
+ list_for_each(entry, &cs46xx_devs) {
card = list_entry(entry, struct cs_card, list);
if (!((card->dev_audio ^ minor) & ~0xf))
@@ -3192,11 +3040,10 @@ static int cs_open(struct inode *inode, struct file *file)
/*
* hardcode state[0] for capture, [1] for playback
*/
- if(file->f_mode & FMODE_READ)
- {
+ if (file->f_mode & FMODE_READ) {
CS_DBGOUT(CS_WAVE_READ, 2, printk("cs46xx: cs_open() FMODE_READ\n") );
if (card->states[0] == NULL) {
- state = card->states[0] = (struct cs_state *)
+ state = card->states[0] =
kmalloc(sizeof(struct cs_state), GFP_KERNEL);
if (state == NULL)
return -ENOMEM;
@@ -3204,36 +3051,32 @@ static int cs_open(struct inode *inode, struct file *file)
mutex_init(&state->sem);
dmabuf = &state->dmabuf;
dmabuf->pbuf = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
- if(dmabuf->pbuf==NULL)
- {
+ if (dmabuf->pbuf == NULL) {
kfree(state);
- card->states[0]=NULL;
+ card->states[0] = NULL;
return -ENOMEM;
}
- }
- else
- {
+ } else {
state = card->states[0];
- if(state->open_mode & FMODE_READ)
+ if (state->open_mode & FMODE_READ)
return -EBUSY;
}
dmabuf->channel = card->alloc_rec_pcm_channel(card);
if (dmabuf->channel == NULL) {
- kfree (card->states[0]);
+ kfree(card->states[0]);
card->states[0] = NULL;
return -ENODEV;
}
/* Now turn on external AMP if needed */
state->card = card;
- state->card->active_ctrl(state->card,1);
- state->card->amplifier_ctrl(state->card,1);
+ state->card->active_ctrl(state->card, 1);
+ state->card->amplifier_ctrl(state->card, 1);
- if( (tmp = cs46xx_powerup(card, CS_POWER_ADC)) )
- {
+ if ((tmp = cs46xx_powerup(card, CS_POWER_ADC))) {
CS_DBGOUT(CS_ERROR | CS_INIT, 1, printk(KERN_INFO
- "cs46xx: cs46xx_powerup of ADC failed (0x%x)\n",tmp) );
+ "cs46xx: cs46xx_powerup of ADC failed (0x%x)\n", tmp));
return -EIO;
}
@@ -3263,11 +3106,10 @@ static int cs_open(struct inode *inode, struct file *file)
state->open_mode |= FMODE_READ;
mutex_unlock(&state->open_mutex);
}
- if(file->f_mode & FMODE_WRITE)
- {
+ if (file->f_mode & FMODE_WRITE) {
CS_DBGOUT(CS_OPEN, 2, printk("cs46xx: cs_open() FMODE_WRITE\n") );
if (card->states[1] == NULL) {
- state = card->states[1] = (struct cs_state *)
+ state = card->states[1] =
kmalloc(sizeof(struct cs_state), GFP_KERNEL);
if (state == NULL)
return -ENOMEM;
@@ -3275,36 +3117,32 @@ static int cs_open(struct inode *inode, struct file *file)
mutex_init(&state->sem);
dmabuf = &state->dmabuf;
dmabuf->pbuf = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
- if(dmabuf->pbuf==NULL)
- {
+ if (dmabuf->pbuf == NULL) {
kfree(state);
- card->states[1]=NULL;
+ card->states[1] = NULL;
return -ENOMEM;
}
- }
- else
- {
+ } else {
state = card->states[1];
- if(state->open_mode & FMODE_WRITE)
+ if (state->open_mode & FMODE_WRITE)
return -EBUSY;
}
dmabuf->channel = card->alloc_pcm_channel(card);
if (dmabuf->channel == NULL) {
- kfree (card->states[1]);
+ kfree(card->states[1]);
card->states[1] = NULL;
return -ENODEV;
}
/* Now turn on external AMP if needed */
state->card = card;
- state->card->active_ctrl(state->card,1);
- state->card->amplifier_ctrl(state->card,1);
+ state->card->active_ctrl(state->card, 1);
+ state->card->amplifier_ctrl(state->card, 1);
- if( (tmp = cs46xx_powerup(card, CS_POWER_DAC)) )
- {
+ if ((tmp = cs46xx_powerup(card, CS_POWER_DAC))) {
CS_DBGOUT(CS_ERROR | CS_INIT, 1, printk(KERN_INFO
- "cs46xx: cs46xx_powerup of DAC failed (0x%x)\n",tmp) );
+ "cs46xx: cs46xx_powerup of DAC failed (0x%x)\n", tmp));
return -EIO;
}
@@ -3333,33 +3171,29 @@ static int cs_open(struct inode *inode, struct file *file)
state->open_mode |= FMODE_WRITE;
mutex_unlock(&state->open_mutex);
- if((ret = prog_dmabuf(state)))
+ if ((ret = prog_dmabuf(state)))
return ret;
}
- CS_DBGOUT(CS_OPEN | CS_FUNCTION, 2, printk("cs46xx: cs_open()- 0\n") );
+ CS_DBGOUT(CS_OPEN | CS_FUNCTION, 2, printk("cs46xx: cs_open()- 0\n"));
return nonseekable_open(inode, file);
}
static int cs_release(struct inode *inode, struct file *file)
{
- struct cs_card *card = (struct cs_card *)file->private_data;
+ struct cs_card *card = file->private_data;
struct dmabuf *dmabuf;
struct cs_state *state;
unsigned int tmp;
CS_DBGOUT(CS_RELEASE | CS_FUNCTION, 2, printk("cs46xx: cs_release()+ file=%p %s %s\n",
file, file->f_mode & FMODE_WRITE ? "FMODE_WRITE" : "",
- file->f_mode & FMODE_READ ? "FMODE_READ" : "") );
+ file->f_mode & FMODE_READ ? "FMODE_READ" : ""));
if (!(file->f_mode & (FMODE_WRITE | FMODE_READ)))
- {
return -EINVAL;
- }
state = card->states[1];
- if(state)
- {
- if ( (state->open_mode & FMODE_WRITE) & (file->f_mode & FMODE_WRITE) )
- {
- CS_DBGOUT(CS_RELEASE, 2, printk("cs46xx: cs_release() FMODE_WRITE\n") );
+ if (state) {
+ if ((state->open_mode & FMODE_WRITE) & (file->f_mode & FMODE_WRITE)) {
+ CS_DBGOUT(CS_RELEASE, 2, printk("cs46xx: cs_release() FMODE_WRITE\n"));
dmabuf = &state->dmabuf;
cs_clear_tail(state);
drain_dac(state, file->f_flags & O_NONBLOCK);
@@ -3375,8 +3209,7 @@ static int cs_release(struct inode *inode, struct file *file)
state->card->states[state->virt] = NULL;
state->open_mode &= (~file->f_mode) & (FMODE_READ|FMODE_WRITE);
- if( (tmp = cs461x_powerdown(card, CS_POWER_DAC, CS_FALSE )) )
- {
+ if ((tmp = cs461x_powerdown(card, CS_POWER_DAC, CS_FALSE))) {
CS_DBGOUT(CS_ERROR, 1, printk(KERN_INFO
"cs46xx: cs_release_mixdev() powerdown DAC failure (0x%x)\n",tmp) );
}
@@ -3384,17 +3217,14 @@ static int cs_release(struct inode *inode, struct file *file)
/* Now turn off external AMP if needed */
state->card->amplifier_ctrl(state->card, -1);
state->card->active_ctrl(state->card, -1);
-
kfree(state);
}
}
state = card->states[0];
- if(state)
- {
- if ( (state->open_mode & FMODE_READ) & (file->f_mode & FMODE_READ) )
- {
- CS_DBGOUT(CS_RELEASE, 2, printk("cs46xx: cs_release() FMODE_READ\n") );
+ if (state) {
+ if ((state->open_mode & FMODE_READ) & (file->f_mode & FMODE_READ)) {
+ CS_DBGOUT(CS_RELEASE, 2, printk("cs46xx: cs_release() FMODE_READ\n"));
dmabuf = &state->dmabuf;
mutex_lock(&state->open_mutex);
stop_adc(state);
@@ -3407,8 +3237,7 @@ static int cs_release(struct inode *inode, struct file *file)
state->card->states[state->virt] = NULL;
state->open_mode &= (~file->f_mode) & (FMODE_READ|FMODE_WRITE);
- if( (tmp = cs461x_powerdown(card, CS_POWER_ADC, CS_FALSE )) )
- {
+ if ((tmp = cs461x_powerdown(card, CS_POWER_ADC, CS_FALSE))) {
CS_DBGOUT(CS_ERROR, 1, printk(KERN_INFO
"cs46xx: cs_release_mixdev() powerdown ADC failure (0x%x)\n",tmp) );
}
@@ -3416,12 +3245,11 @@ static int cs_release(struct inode *inode, struct file *file)
/* Now turn off external AMP if needed */
state->card->amplifier_ctrl(state->card, -1);
state->card->active_ctrl(state->card, -1);
-
kfree(state);
}
}
- CS_DBGOUT(CS_FUNCTION | CS_RELEASE, 2, printk("cs46xx: cs_release()- 0\n") );
+ CS_DBGOUT(CS_FUNCTION | CS_RELEASE, 2, printk("cs46xx: cs_release()- 0\n"));
return 0;
}
@@ -3474,21 +3302,18 @@ static void cs46xx_ac97_suspend(struct cs_card *card)
CS_DBGOUT(CS_PM, 9, printk("cs46xx: cs46xx_ac97_suspend()+\n"));
- if(card->states[1])
- {
+ if (card->states[1]) {
stop_dac(card->states[1]);
resync_dma_ptrs(card->states[1]);
}
- if(card->states[0])
- {
+ if (card->states[0]) {
stop_adc(card->states[0]);
resync_dma_ptrs(card->states[0]);
}
- for(Count = 0x2, i=0; (Count <= CS46XX_AC97_HIGHESTREGTORESTORE)
- && (i < CS46XX_AC97_NUMBER_RESTORE_REGS);
- Count += 2, i++)
- {
+ for (Count = 0x2, i = 0; (Count <= CS46XX_AC97_HIGHESTREGTORESTORE)
+ && (i < CS46XX_AC97_NUMBER_RESTORE_REGS);
+ Count += 2, i++) {
card->pm.ac97[i] = cs_ac97_get(dev, BA0_AC97_RESET + Count);
}
/*
@@ -3522,11 +3347,10 @@ static void cs46xx_ac97_suspend(struct cs_card *card)
* well, for now, only power down the DAC/ADC and MIXER VREFON components.
* trouble with removing VREF.
*/
- if( (tmp = cs461x_powerdown(card, CS_POWER_DAC | CS_POWER_ADC |
- CS_POWER_MIXVON, CS_TRUE )) )
- {
+ if ((tmp = cs461x_powerdown(card, CS_POWER_DAC | CS_POWER_ADC |
+ CS_POWER_MIXVON, CS_TRUE))) {
CS_DBGOUT(CS_ERROR | CS_INIT, 1, printk(KERN_INFO
- "cs46xx: cs46xx_ac97_suspend() failure (0x%x)\n",tmp) );
+ "cs46xx: cs46xx_ac97_suspend() failure (0x%x)\n",tmp));
}
CS_DBGOUT(CS_PM, 9, printk("cs46xx: cs46xx_ac97_suspend()-\n"));
@@ -3566,16 +3390,13 @@ static void cs46xx_ac97_resume(struct cs_card *card)
* Restore just the first set of registers, from register number
* 0x02 to the register number that ulHighestRegToRestore specifies.
*/
- for( Count = 0x2, i=0;
- (Count <= CS46XX_AC97_HIGHESTREGTORESTORE)
- && (i < CS46XX_AC97_NUMBER_RESTORE_REGS);
- Count += 2, i++)
- {
+ for (Count = 0x2, i=0; (Count <= CS46XX_AC97_HIGHESTREGTORESTORE) &&
+ (i < CS46XX_AC97_NUMBER_RESTORE_REGS); Count += 2, i++) {
cs_ac97_set(dev, (u8)(BA0_AC97_RESET + Count), (u16)card->pm.ac97[i]);
}
/* Check if we have to init the amplifier */
- if(card->amp_init)
+ if (card->amp_init)
card->amp_init(card);
CS_DBGOUT(CS_PM, 9, printk("cs46xx: cs46xx_ac97_resume()-\n"));
@@ -3585,30 +3406,27 @@ static void cs46xx_ac97_resume(struct cs_card *card)
static int cs46xx_restart_part(struct cs_card *card)
{
struct dmabuf *dmabuf;
+
CS_DBGOUT(CS_PM | CS_FUNCTION, 4,
printk( "cs46xx: cs46xx_restart_part()+\n"));
- if(card->states[1])
- {
+ if (card->states[1]) {
dmabuf = &card->states[1]->dmabuf;
dmabuf->ready = 0;
resync_dma_ptrs(card->states[1]);
cs_set_divisor(dmabuf);
- if(__prog_dmabuf(card->states[1]))
- {
+ if (__prog_dmabuf(card->states[1])) {
CS_DBGOUT(CS_PM | CS_ERROR, 1,
printk("cs46xx: cs46xx_restart_part()- (-1) prog_dmabuf() dac error\n"));
return -1;
}
cs_set_dac_rate(card->states[1], dmabuf->rate);
}
- if(card->states[0])
- {
+ if (card->states[0]) {
dmabuf = &card->states[0]->dmabuf;
dmabuf->ready = 0;
resync_dma_ptrs(card->states[0]);
cs_set_divisor(dmabuf);
- if(__prog_dmabuf(card->states[0]))
- {
+ if (__prog_dmabuf(card->states[0])) {
CS_DBGOUT(CS_PM | CS_ERROR, 1,
printk("cs46xx: cs46xx_restart_part()- (-1) prog_dmabuf() adc error\n"));
return -1;
@@ -3616,17 +3434,17 @@ static int cs46xx_restart_part(struct cs_card *card)
cs_set_adc_rate(card->states[0], dmabuf->rate);
}
card->pm.flags |= CS46XX_PM_RESUMED;
- if(card->states[0])
+ if (card->states[0])
start_adc(card->states[0]);
- if(card->states[1])
+ if (card->states[1])
start_dac(card->states[1]);
card->pm.flags |= CS46XX_PM_IDLE;
card->pm.flags &= ~(CS46XX_PM_SUSPENDING | CS46XX_PM_SUSPENDED
| CS46XX_PM_RESUMING | CS46XX_PM_RESUMED);
- if(card->states[0])
+ if (card->states[0])
wake_up(&card->states[0]->dmabuf.wait);
- if(card->states[1])
+ if (card->states[1])
wake_up(&card->states[1]->dmabuf.wait);
CS_DBGOUT(CS_PM | CS_FUNCTION, 4,
@@ -3634,20 +3452,19 @@ static int cs46xx_restart_part(struct cs_card *card)
return 0;
}
-
static void cs461x_reset(struct cs_card *card);
static void cs461x_proc_stop(struct cs_card *card);
static int cs46xx_suspend(struct cs_card *card, pm_message_t state)
{
unsigned int tmp;
+
CS_DBGOUT(CS_PM | CS_FUNCTION, 4,
printk("cs46xx: cs46xx_suspend()+ flags=0x%x s=%p\n",
(unsigned)card->pm.flags,card));
/*
* check the current state, only suspend if IDLE
*/
- if(!(card->pm.flags & CS46XX_PM_IDLE))
- {
+ if (!(card->pm.flags & CS46XX_PM_IDLE)) {
CS_DBGOUT(CS_PM | CS_ERROR, 2,
printk("cs46xx: cs46xx_suspend() unable to suspend, not IDLE\n"));
return 1;
@@ -3679,13 +3496,11 @@ static int cs46xx_suspend(struct cs_card *card, pm_message_t state)
tmp = cs461x_peek(card, BA1_CCTL);
cs461x_poke(card, BA1_CCTL, tmp & 0xffff0000);
- if(card->states[1])
- {
+ if (card->states[1]) {
card->pm.dmabuf_swptr_play = card->states[1]->dmabuf.swptr;
card->pm.dmabuf_count_play = card->states[1]->dmabuf.count;
}
- if(card->states[0])
- {
+ if (card->states[0]) {
card->pm.dmabuf_swptr_capture = card->states[0]->dmabuf.swptr;
card->pm.dmabuf_count_capture = card->states[0]->dmabuf.count;
}
@@ -3736,8 +3551,7 @@ static int cs46xx_resume(struct cs_card *card)
CS_DBGOUT(CS_PM | CS_FUNCTION, 4,
printk( "cs46xx: cs46xx_resume()+ flags=0x%x\n",
(unsigned)card->pm.flags));
- if(!(card->pm.flags & CS46XX_PM_SUSPENDED))
- {
+ if (!(card->pm.flags & CS46XX_PM_SUSPENDED)) {
CS_DBGOUT(CS_PM | CS_ERROR, 2,
printk("cs46xx: cs46xx_resume() unable to resume, not SUSPENDED\n"));
return 1;
@@ -3747,10 +3561,8 @@ static int cs46xx_resume(struct cs_card *card)
printpm(card);
card->active_ctrl(card, 1);
- for(i=0;i<5;i++)
- {
- if (cs_hardware_init(card) != 0)
- {
+ for (i = 0; i < 5; i++) {
+ if (cs_hardware_init(card) != 0) {
CS_DBGOUT(CS_PM | CS_ERROR, 4, printk(
"cs46xx: cs46xx_resume()- ERROR in cs_hardware_init()\n"));
mdelay(10 * cs_laptop_wait);
@@ -3759,15 +3571,13 @@ static int cs46xx_resume(struct cs_card *card)
}
break;
}
- if(i>=4)
- {
+ if (i >= 4) {
CS_DBGOUT(CS_PM | CS_ERROR, 1, printk(
"cs46xx: cs46xx_resume()- cs_hardware_init() failed, retried %d times.\n",i));
return 0;
}
- if(cs46xx_restart_part(card))
- {
+ if (cs46xx_restart_part(card)) {
CS_DBGOUT(CS_PM | CS_ERROR, 4, printk(
"cs46xx: cs46xx_resume(): cs46xx_restart_part() returned error\n"));
}
@@ -3835,7 +3645,7 @@ static u16 _cs_ac97_get(struct ac97_codec *dev, u8 reg)
/*
* Wait for the read to occur.
*/
- if(!(card->pm.flags & CS46XX_PM_IDLE))
+ if (!(card->pm.flags & CS46XX_PM_IDLE))
loopcnt = 2000;
else
loopcnt = 500 * cs_laptop_wait;
@@ -3866,7 +3676,7 @@ static u16 _cs_ac97_get(struct ac97_codec *dev, u8 reg)
* Wait for the valid status bit to go active.
*/
- if(!(card->pm.flags & CS46XX_PM_IDLE))
+ if (!(card->pm.flags & CS46XX_PM_IDLE))
loopcnt = 2000;
else
loopcnt = 1000;
@@ -3885,7 +3695,7 @@ static u16 _cs_ac97_get(struct ac97_codec *dev, u8 reg)
/*
* Make sure we got valid status.
*/
- if (!( (tmp=cs461x_peekBA0(card, BA0_ACSTS)) & ACSTS_VSTS)) {
+ if (!((tmp = cs461x_peekBA0(card, BA0_ACSTS)) & ACSTS_VSTS)) {
CS_DBGOUT(CS_ERROR, 2, printk(KERN_WARNING
"cs46xx: AC'97 read problem (ACSTS_VSTS), reg = 0x%x val=0x%x 0xffff \n",
reg, tmp));
@@ -3923,12 +3733,9 @@ static void cs_ac97_set(struct ac97_codec *dev, u8 reg, u16 val)
spin_lock(&card->ac97_lock);
- if(reg == AC97_CD_VOL)
- {
+ if (reg == AC97_CD_VOL)
val2 = _cs_ac97_get(dev, AC97_CD_VOL);
- }
-
-
+
/*
* 1. Write ACCAD = Command Address Register = 46Ch for AC97 register address
* 2. Write ACCDA = Command Data Register = 470h for data to write to AC97
@@ -3970,8 +3777,7 @@ static void cs_ac97_set(struct ac97_codec *dev, u8 reg, u16 val)
/*
* Make sure the write completed.
*/
- if (cs461x_peekBA0(card, BA0_ACCTL) & ACCTL_DCV)
- {
+ if (cs461x_peekBA0(card, BA0_ACCTL) & ACCTL_DCV) {
CS_DBGOUT(CS_ERROR, 1, printk(KERN_WARNING
"cs46xx: AC'97 write problem, reg = 0x%x, val = 0x%x\n", reg, val));
}
@@ -3998,25 +3804,23 @@ static void cs_ac97_set(struct ac97_codec *dev, u8 reg, u16 val)
/* CD mute change ? */
- if(reg==AC97_CD_VOL)
- {
+ if (reg == AC97_CD_VOL) {
/* Mute bit change ? */
- if((val2^val)&0x8000 || ((val2 == 0x1f1f || val == 0x1f1f) && val2 != val))
- {
+ if ((val2^val) & 0x8000 ||
+ ((val2 == 0x1f1f || val == 0x1f1f) && val2 != val)) {
/* This is a hack but its cleaner than the alternatives.
Right now card->ac97_codec[0] might be NULL as we are
still doing codec setup. This does an early assignment
to avoid the problem if it occurs */
- if(card->ac97_codec[0]==NULL)
- card->ac97_codec[0]=dev;
+ if (card->ac97_codec[0] == NULL)
+ card->ac97_codec[0] = dev;
/* Mute on */
- if(val&0x8000 || val == 0x1f1f)
+ if (val & 0x8000 || val == 0x1f1f)
card->amplifier_ctrl(card, -1);
- else /* Mute off power on */
- {
- if(card->amp_init)
+ else { /* Mute off power on */
+ if (card->amp_init)
card->amp_init(card);
card->amplifier_ctrl(card, 1);
}
@@ -4024,46 +3828,41 @@ static void cs_ac97_set(struct ac97_codec *dev, u8 reg, u16 val)
}
}
-
/* OSS /dev/mixer file operation methods */
static int cs_open_mixdev(struct inode *inode, struct file *file)
{
- int i=0;
+ int i = 0;
unsigned int minor = iminor(inode);
- struct cs_card *card=NULL;
+ struct cs_card *card = NULL;
struct list_head *entry;
unsigned int tmp;
CS_DBGOUT(CS_FUNCTION | CS_OPEN, 4,
printk(KERN_INFO "cs46xx: cs_open_mixdev()+\n"));
- list_for_each(entry, &cs46xx_devs)
- {
+ list_for_each(entry, &cs46xx_devs) {
card = list_entry(entry, struct cs_card, list);
for (i = 0; i < NR_AC97; i++)
if (card->ac97_codec[i] != NULL &&
card->ac97_codec[i]->dev_mixer == minor)
goto match;
}
- if (!card)
- {
+ if (!card) {
CS_DBGOUT(CS_FUNCTION | CS_OPEN | CS_ERROR, 2,
printk(KERN_INFO "cs46xx: cs46xx_open_mixdev()- -ENODEV\n"));
return -ENODEV;
}
match:
- if(!card->ac97_codec[i])
+ if (!card->ac97_codec[i])
return -ENODEV;
file->private_data = card->ac97_codec[i];
card->active_ctrl(card,1);
- if(!CS_IN_USE(&card->mixer_use_cnt))
- {
- if( (tmp = cs46xx_powerup(card, CS_POWER_MIXVON )) )
- {
+ if (!CS_IN_USE(&card->mixer_use_cnt)) {
+ if ((tmp = cs46xx_powerup(card, CS_POWER_MIXVON))) {
CS_DBGOUT(CS_ERROR | CS_INIT, 1, printk(KERN_INFO
- "cs46xx: cs_open_mixdev() powerup failure (0x%x)\n",tmp) );
+ "cs46xx: cs_open_mixdev() powerup failure (0x%x)\n", tmp));
return -EIO;
}
}
@@ -4077,7 +3876,7 @@ static int cs_open_mixdev(struct inode *inode, struct file *file)
static int cs_release_mixdev(struct inode *inode, struct file *file)
{
unsigned int minor = iminor(inode);
- struct cs_card *card=NULL;
+ struct cs_card *card = NULL;
struct list_head *entry;
int i;
unsigned int tmp;
@@ -4092,15 +3891,13 @@ static int cs_release_mixdev(struct inode *inode, struct file *file)
card->ac97_codec[i]->dev_mixer == minor)
goto match;
}
- if (!card)
- {
+ if (!card) {
CS_DBGOUT(CS_FUNCTION | CS_OPEN | CS_ERROR, 2,
printk(KERN_INFO "cs46xx: cs46xx_open_mixdev()- -ENODEV\n"));
return -ENODEV;
}
match:
- if(!CS_DEC_AND_TEST(&card->mixer_use_cnt))
- {
+ if (!CS_DEC_AND_TEST(&card->mixer_use_cnt)) {
CS_DBGOUT(CS_FUNCTION | CS_RELEASE, 4,
printk(KERN_INFO "cs46xx: cs_release_mixdev()- no powerdown, usecnt>0\n"));
card->active_ctrl(card, -1);
@@ -4110,10 +3907,9 @@ match:
/*
* ok, no outstanding mixer opens, so powerdown.
*/
- if( (tmp = cs461x_powerdown(card, CS_POWER_MIXVON, CS_FALSE )) )
- {
+ if ((tmp = cs461x_powerdown(card, CS_POWER_MIXVON, CS_FALSE))) {
CS_DBGOUT(CS_ERROR | CS_INIT, 1, printk(KERN_INFO
- "cs46xx: cs_release_mixdev() powerdown MIXVON failure (0x%x)\n",tmp) );
+ "cs46xx: cs_release_mixdev() powerdown MIXVON failure (0x%x)\n", tmp));
card->active_ctrl(card, -1);
card->amplifier_ctrl(card, -1);
return -EIO;
@@ -4126,76 +3922,60 @@ match:
}
static int cs_ioctl_mixdev(struct inode *inode, struct file *file, unsigned int cmd,
- unsigned long arg)
+ unsigned long arg)
{
- struct ac97_codec *codec = (struct ac97_codec *)file->private_data;
- struct cs_card *card=NULL;
+ struct ac97_codec *codec = file->private_data;
+ struct cs_card *card = NULL;
struct list_head *entry;
unsigned long __user *p = (long __user *)arg;
-
#if CSDEBUG_INTERFACE
int val;
- if( (cmd == SOUND_MIXER_CS_GETDBGMASK) ||
+ if ( (cmd == SOUND_MIXER_CS_GETDBGMASK) ||
(cmd == SOUND_MIXER_CS_SETDBGMASK) ||
(cmd == SOUND_MIXER_CS_GETDBGLEVEL) ||
(cmd == SOUND_MIXER_CS_SETDBGLEVEL) ||
- (cmd == SOUND_MIXER_CS_APM))
- {
- switch(cmd)
- {
-
+ (cmd == SOUND_MIXER_CS_APM)) {
+ switch (cmd) {
case SOUND_MIXER_CS_GETDBGMASK:
return put_user(cs_debugmask, p);
-
case SOUND_MIXER_CS_GETDBGLEVEL:
return put_user(cs_debuglevel, p);
-
case SOUND_MIXER_CS_SETDBGMASK:
if (get_user(val, p))
return -EFAULT;
cs_debugmask = val;
return 0;
-
case SOUND_MIXER_CS_SETDBGLEVEL:
if (get_user(val, p))
return -EFAULT;
cs_debuglevel = val;
return 0;
-
case SOUND_MIXER_CS_APM:
if (get_user(val, p))
return -EFAULT;
- if(val == CS_IOCTL_CMD_SUSPEND)
- {
- list_for_each(entry, &cs46xx_devs)
- {
+ if (val == CS_IOCTL_CMD_SUSPEND) {
+ list_for_each(entry, &cs46xx_devs) {
card = list_entry(entry, struct cs_card, list);
cs46xx_suspend(card, PMSG_ON);
}
- }
- else if(val == CS_IOCTL_CMD_RESUME)
- {
- list_for_each(entry, &cs46xx_devs)
- {
+ } else if (val == CS_IOCTL_CMD_RESUME) {
+ list_for_each(entry, &cs46xx_devs) {
card = list_entry(entry, struct cs_card, list);
cs46xx_resume(card);
}
- }
- else
- {
+ } else {
CS_DBGOUT(CS_ERROR, 1, printk(KERN_INFO
"cs46xx: mixer_ioctl(): invalid APM cmd (%d)\n",
val));
}
return 0;
-
default:
CS_DBGOUT(CS_ERROR, 1, printk(KERN_INFO
- "cs46xx: mixer_ioctl(): ERROR unknown debug cmd\n") );
+ "cs46xx: mixer_ioctl(): ERROR unknown debug cmd\n"));
return 0;
- }
+ }
}
#endif
return codec->mixer_ioctl(codec, cmd, arg);
@@ -4232,8 +4012,7 @@ static int __init cs_ac97_init(struct cs_card *card)
codec->codec_read = cs_ac97_get;
codec->codec_write = cs_ac97_set;
- if (ac97_probe_codec(codec) == 0)
- {
+ if (ac97_probe_codec(codec) == 0) {
CS_DBGOUT(CS_FUNCTION | CS_INIT, 2, printk(KERN_INFO
"cs46xx: cs_ac97_init()- codec number %d not found\n",
num_ac97) );
@@ -4241,12 +4020,11 @@ static int __init cs_ac97_init(struct cs_card *card)
break;
}
CS_DBGOUT(CS_FUNCTION | CS_INIT, 2, printk(KERN_INFO
- "cs46xx: cs_ac97_init() found codec %d\n",num_ac97) );
+ "cs46xx: cs_ac97_init() found codec %d\n",num_ac97));
eid = cs_ac97_get(codec, AC97_EXTENDED_ID);
- if(eid==0xFFFF)
- {
+ if (eid == 0xFFFF) {
printk(KERN_WARNING "cs46xx: codec %d not present\n",num_ac97);
ac97_release_codec(codec);
break;
@@ -4285,27 +4063,23 @@ static void cs461x_download_image(struct cs_card *card)
{
unsigned i, j, temp1, temp2, offset, count;
unsigned char __iomem *pBA1 = ioremap(card->ba1_addr, 0x40000);
- for( i=0; i < CLEAR__COUNT; i++)
- {
+ for (i = 0; i < CLEAR__COUNT; i++) {
offset = ClrStat[i].BA1__DestByteOffset;
count = ClrStat[i].BA1__SourceSize;
- for( temp1 = offset; temp1<(offset+count); temp1+=4 )
+ for (temp1 = offset; temp1 < (offset + count); temp1 += 4)
writel(0, pBA1+temp1);
}
- for(i=0; i<FILL__COUNT; i++)
- {
+ for (i = 0; i < FILL__COUNT; i++) {
temp2 = FillStat[i].Offset;
- for(j=0; j<(FillStat[i].Size)/4; j++)
- {
+ for (j = 0; j < (FillStat[i].Size) / 4; j++) {
temp1 = (FillStat[i]).pFill[j];
- writel(temp1, pBA1+temp2+j*4);
+ writel(temp1, pBA1+temp2 + j * 4);
}
}
iounmap(pBA1);
}
-
/*
* Chip reset
*/
@@ -4365,15 +4139,13 @@ static void cs461x_clear_serial_FIFOs(struct cs_card *card, int type)
* playing or capturing then we don't want to put in 128 bytes of
* "noise".
*/
- if(type & CS_TYPE_DAC)
- {
+ if (type & CS_TYPE_DAC) {
startfifo = 128;
endfifo = 256;
}
- if(type & CS_TYPE_ADC)
- {
+ if (type & CS_TYPE_ADC) {
startfifo = 0;
- if(!endfifo)
+ if (!endfifo)
endfifo = 128;
}
/*
@@ -4417,8 +4189,7 @@ static int cs461x_powerdown(struct cs_card *card, unsigned int type, int suspend
CS_DBGOUT(CS_FUNCTION, 4, printk(KERN_INFO
"cs46xx: cs461x_powerdown()+ type=0x%x\n",type));
- if(!cs_powerdown && !suspendflag)
- {
+ if (!cs_powerdown && !suspendflag) {
CS_DBGOUT(CS_FUNCTION, 8, printk(KERN_INFO
"cs46xx: cs461x_powerdown() DISABLED exiting\n"));
return 0;
@@ -4432,12 +4203,11 @@ static int cs461x_powerdown(struct cs_card *card, unsigned int type, int suspend
* currently powered down. If powering down DAC and ADC, then
* it is possible to power down the VREF (ON).
*/
- if ( ((type & CS_POWER_MIXVON) &&
- (!(type & CS_POWER_ADC) || (!(type & CS_POWER_DAC))) )
+ if (((type & CS_POWER_MIXVON) &&
+ (!(type & CS_POWER_ADC) || (!(type & CS_POWER_DAC))))
&&
((tmp & CS_AC97_POWER_CONTROL_ADC_ON) ||
- (tmp & CS_AC97_POWER_CONTROL_DAC_ON) ) )
- {
+ (tmp & CS_AC97_POWER_CONTROL_DAC_ON))) {
CS_DBGOUT(CS_FUNCTION, 8, printk(KERN_INFO
"cs46xx: cs461x_powerdown()- 0 unable to powerdown. tmp=0x%x\n",tmp));
return 0;
@@ -4452,8 +4222,7 @@ static int cs461x_powerdown(struct cs_card *card, unsigned int type, int suspend
/*
* Power down indicated areas.
*/
- if(type & CS_POWER_MIXVOFF)
- {
+ if (type & CS_POWER_MIXVOFF) {
CS_DBGOUT(CS_FUNCTION, 4,
printk(KERN_INFO "cs46xx: cs461x_powerdown()+ MIXVOFF\n"));
@@ -4461,12 +4230,10 @@ static int cs461x_powerdown(struct cs_card *card, unsigned int type, int suspend
* Power down the MIXER (VREF ON) on the AC97 card.
*/
tmp = cs_ac97_get(card->ac97_codec[0], AC97_POWER_CONTROL);
- if (tmp & CS_AC97_POWER_CONTROL_MIXVOFF_ON)
- {
- if(!muted)
- {
+ if (tmp & CS_AC97_POWER_CONTROL_MIXVOFF_ON) {
+ if (!muted) {
cs_mute(card, CS_TRUE);
- muted=1;
+ muted = 1;
}
tmp |= CS_AC97_POWER_CONTROL_MIXVOFF;
cs_ac97_set(card->ac97_codec[0], AC97_POWER_CONTROL, tmp );
@@ -4492,16 +4259,14 @@ static int cs461x_powerdown(struct cs_card *card, unsigned int type, int suspend
* Check the status..
*/
if (cs_ac97_get(card->ac97_codec[0], AC97_POWER_CONTROL) &
- CS_AC97_POWER_CONTROL_MIXVOFF_ON)
- {
+ CS_AC97_POWER_CONTROL_MIXVOFF_ON) {
CS_DBGOUT(CS_ERROR, 1, printk(KERN_WARNING
"cs46xx: powerdown MIXVOFF failed\n"));
return 1;
}
}
}
- if(type & CS_POWER_MIXVON)
- {
+ if (type & CS_POWER_MIXVON) {
CS_DBGOUT(CS_FUNCTION, 4,
printk(KERN_INFO "cs46xx: cs461x_powerdown()+ MIXVON\n"));
@@ -4509,15 +4274,13 @@ static int cs461x_powerdown(struct cs_card *card, unsigned int type, int suspend
* Power down the MIXER (VREF ON) on the AC97 card.
*/
tmp = cs_ac97_get(card->ac97_codec[0], AC97_POWER_CONTROL);
- if (tmp & CS_AC97_POWER_CONTROL_MIXVON_ON)
- {
- if(!muted)
- {
+ if (tmp & CS_AC97_POWER_CONTROL_MIXVON_ON) {
+ if (!muted) {
cs_mute(card, CS_TRUE);
- muted=1;
+ muted = 1;
}
tmp |= CS_AC97_POWER_CONTROL_MIXVON;
- cs_ac97_set(card->ac97_codec[0], AC97_POWER_CONTROL, tmp );
+ cs_ac97_set(card->ac97_codec[0], AC97_POWER_CONTROL, tmp);
/*
* Now, we wait until we sample a ready state.
*/
@@ -4540,30 +4303,26 @@ static int cs461x_powerdown(struct cs_card *card, unsigned int type, int suspend
* Check the status..
*/
if (cs_ac97_get(card->ac97_codec[0], AC97_POWER_CONTROL) &
- CS_AC97_POWER_CONTROL_MIXVON_ON)
- {
+ CS_AC97_POWER_CONTROL_MIXVON_ON) {
CS_DBGOUT(CS_ERROR, 1, printk(KERN_WARNING
"cs46xx: powerdown MIXVON failed\n"));
return 1;
}
}
}
- if(type & CS_POWER_ADC)
- {
+ if (type & CS_POWER_ADC) {
/*
* Power down the ADC on the AC97 card.
*/
CS_DBGOUT(CS_FUNCTION, 4, printk(KERN_INFO "cs46xx: cs461x_powerdown()+ ADC\n"));
tmp = cs_ac97_get(card->ac97_codec[0], AC97_POWER_CONTROL);
- if (tmp & CS_AC97_POWER_CONTROL_ADC_ON)
- {
- if(!muted)
- {
+ if (tmp & CS_AC97_POWER_CONTROL_ADC_ON) {
+ if (!muted) {
cs_mute(card, CS_TRUE);
- muted=1;
+ muted = 1;
}
tmp |= CS_AC97_POWER_CONTROL_ADC;
- cs_ac97_set(card->ac97_codec[0], AC97_POWER_CONTROL, tmp );
+ cs_ac97_set(card->ac97_codec[0], AC97_POWER_CONTROL, tmp);
/*
* Now, we wait until we sample a ready state.
@@ -4587,16 +4346,14 @@ static int cs461x_powerdown(struct cs_card *card, unsigned int type, int suspend
* Check the status..
*/
if (cs_ac97_get(card->ac97_codec[0], AC97_POWER_CONTROL) &
- CS_AC97_POWER_CONTROL_ADC_ON)
- {
+ CS_AC97_POWER_CONTROL_ADC_ON) {
CS_DBGOUT(CS_ERROR, 1, printk(KERN_WARNING
"cs46xx: powerdown ADC failed\n"));
return 1;
}
}
}
- if(type & CS_POWER_DAC)
- {
+ if (type & CS_POWER_DAC) {
/*
* Power down the DAC on the AC97 card.
*/
@@ -4604,15 +4361,13 @@ static int cs461x_powerdown(struct cs_card *card, unsigned int type, int suspend
CS_DBGOUT(CS_FUNCTION, 4,
printk(KERN_INFO "cs46xx: cs461x_powerdown()+ DAC\n"));
tmp = cs_ac97_get(card->ac97_codec[0], AC97_POWER_CONTROL);
- if (tmp & CS_AC97_POWER_CONTROL_DAC_ON)
- {
- if(!muted)
- {
+ if (tmp & CS_AC97_POWER_CONTROL_DAC_ON) {
+ if (!muted) {
cs_mute(card, CS_TRUE);
- muted=1;
+ muted = 1;
}
tmp |= CS_AC97_POWER_CONTROL_DAC;
- cs_ac97_set(card->ac97_codec[0], AC97_POWER_CONTROL, tmp );
+ cs_ac97_set(card->ac97_codec[0], AC97_POWER_CONTROL, tmp);
/*
* Now, we wait until we sample a ready state.
*/
@@ -4635,8 +4390,7 @@ static int cs461x_powerdown(struct cs_card *card, unsigned int type, int suspend
* Check the status..
*/
if (cs_ac97_get(card->ac97_codec[0], AC97_POWER_CONTROL) &
- CS_AC97_POWER_CONTROL_DAC_ON)
- {
+ CS_AC97_POWER_CONTROL_DAC_ON) {
CS_DBGOUT(CS_ERROR, 1, printk(KERN_WARNING
"cs46xx: powerdown DAC failed\n"));
return 1;
@@ -4644,7 +4398,7 @@ static int cs461x_powerdown(struct cs_card *card, unsigned int type, int suspend
}
}
tmp = cs_ac97_get(card->ac97_codec[0], AC97_POWER_CONTROL);
- if(muted)
+ if (muted)
cs_mute(card, CS_FALSE);
CS_DBGOUT(CS_FUNCTION, 4, printk(KERN_INFO
"cs46xx: cs461x_powerdown()- 0 tmp=0x%x\n",tmp));
@@ -4654,23 +4408,22 @@ static int cs461x_powerdown(struct cs_card *card, unsigned int type, int suspend
static int cs46xx_powerup(struct cs_card *card, unsigned int type)
{
int count;
- unsigned int tmp=0,muted=0;
+ unsigned int tmp = 0, muted = 0;
CS_DBGOUT(CS_FUNCTION, 8, printk(KERN_INFO
"cs46xx: cs46xx_powerup()+ type=0x%x\n",type));
/*
* check for VREF and powerup if need to.
*/
- if(type & CS_POWER_MIXVON)
+ if (type & CS_POWER_MIXVON)
type |= CS_POWER_MIXVOFF;
- if(type & (CS_POWER_DAC | CS_POWER_ADC))
+ if (type & (CS_POWER_DAC | CS_POWER_ADC))
type |= CS_POWER_MIXVON | CS_POWER_MIXVOFF;
/*
* Power up indicated areas.
*/
- if(type & CS_POWER_MIXVOFF)
- {
+ if (type & CS_POWER_MIXVOFF) {
CS_DBGOUT(CS_FUNCTION, 4,
printk(KERN_INFO "cs46xx: cs46xx_powerup()+ MIXVOFF\n"));
@@ -4678,12 +4431,10 @@ static int cs46xx_powerup(struct cs_card *card, unsigned int type)
* Power up the MIXER (VREF ON) on the AC97 card.
*/
tmp = cs_ac97_get(card->ac97_codec[0], AC97_POWER_CONTROL);
- if (!(tmp & CS_AC97_POWER_CONTROL_MIXVOFF_ON))
- {
- if(!muted)
- {
+ if (!(tmp & CS_AC97_POWER_CONTROL_MIXVOFF_ON)) {
+ if (!muted) {
cs_mute(card, CS_TRUE);
- muted=1;
+ muted = 1;
}
tmp &= ~CS_AC97_POWER_CONTROL_MIXVOFF;
cs_ac97_set(card->ac97_codec[0], AC97_POWER_CONTROL, tmp );
@@ -4709,16 +4460,14 @@ static int cs46xx_powerup(struct cs_card *card, unsigned int type)
* Check the status..
*/
if (!(cs_ac97_get(card->ac97_codec[0], AC97_POWER_CONTROL) &
- CS_AC97_POWER_CONTROL_MIXVOFF_ON))
- {
+ CS_AC97_POWER_CONTROL_MIXVOFF_ON)) {
CS_DBGOUT(CS_ERROR, 1, printk(KERN_WARNING
"cs46xx: powerup MIXVOFF failed\n"));
return 1;
}
}
}
- if(type & CS_POWER_MIXVON)
- {
+ if(type & CS_POWER_MIXVON) {
CS_DBGOUT(CS_FUNCTION, 4,
printk(KERN_INFO "cs46xx: cs46xx_powerup()+ MIXVON\n"));
@@ -4726,12 +4475,10 @@ static int cs46xx_powerup(struct cs_card *card, unsigned int type)
* Power up the MIXER (VREF ON) on the AC97 card.
*/
tmp = cs_ac97_get(card->ac97_codec[0], AC97_POWER_CONTROL);
- if (!(tmp & CS_AC97_POWER_CONTROL_MIXVON_ON))
- {
- if(!muted)
- {
+ if (!(tmp & CS_AC97_POWER_CONTROL_MIXVON_ON)) {
+ if (!muted) {
cs_mute(card, CS_TRUE);
- muted=1;
+ muted = 1;
}
tmp &= ~CS_AC97_POWER_CONTROL_MIXVON;
cs_ac97_set(card->ac97_codec[0], AC97_POWER_CONTROL, tmp );
@@ -4757,27 +4504,23 @@ static int cs46xx_powerup(struct cs_card *card, unsigned int type)
* Check the status..
*/
if (!(cs_ac97_get(card->ac97_codec[0], AC97_POWER_CONTROL) &
- CS_AC97_POWER_CONTROL_MIXVON_ON))
- {
+ CS_AC97_POWER_CONTROL_MIXVON_ON)) {
CS_DBGOUT(CS_ERROR, 1, printk(KERN_WARNING
"cs46xx: powerup MIXVON failed\n"));
return 1;
}
}
}
- if(type & CS_POWER_ADC)
- {
+ if (type & CS_POWER_ADC) {
/*
* Power up the ADC on the AC97 card.
*/
CS_DBGOUT(CS_FUNCTION, 4, printk(KERN_INFO "cs46xx: cs46xx_powerup()+ ADC\n"));
tmp = cs_ac97_get(card->ac97_codec[0], AC97_POWER_CONTROL);
- if (!(tmp & CS_AC97_POWER_CONTROL_ADC_ON))
- {
- if(!muted)
- {
+ if (!(tmp & CS_AC97_POWER_CONTROL_ADC_ON)) {
+ if (!muted) {
cs_mute(card, CS_TRUE);
- muted=1;
+ muted = 1;
}
tmp &= ~CS_AC97_POWER_CONTROL_ADC;
cs_ac97_set(card->ac97_codec[0], AC97_POWER_CONTROL, tmp );
@@ -4804,16 +4547,14 @@ static int cs46xx_powerup(struct cs_card *card, unsigned int type)
* Check the status..
*/
if (!(cs_ac97_get(card->ac97_codec[0], AC97_POWER_CONTROL) &
- CS_AC97_POWER_CONTROL_ADC_ON))
- {
+ CS_AC97_POWER_CONTROL_ADC_ON)) {
CS_DBGOUT(CS_ERROR, 1, printk(KERN_WARNING
"cs46xx: powerup ADC failed\n"));
return 1;
}
}
}
- if(type & CS_POWER_DAC)
- {
+ if (type & CS_POWER_DAC) {
/*
* Power up the DAC on the AC97 card.
*/
@@ -4821,12 +4562,10 @@ static int cs46xx_powerup(struct cs_card *card, unsigned int type)
CS_DBGOUT(CS_FUNCTION, 4,
printk(KERN_INFO "cs46xx: cs46xx_powerup()+ DAC\n"));
tmp = cs_ac97_get(card->ac97_codec[0], AC97_POWER_CONTROL);
- if (!(tmp & CS_AC97_POWER_CONTROL_DAC_ON))
- {
- if(!muted)
- {
+ if (!(tmp & CS_AC97_POWER_CONTROL_DAC_ON)) {
+ if (!muted) {
cs_mute(card, CS_TRUE);
- muted=1;
+ muted = 1;
}
tmp &= ~CS_AC97_POWER_CONTROL_DAC;
cs_ac97_set(card->ac97_codec[0], AC97_POWER_CONTROL, tmp );
@@ -4852,8 +4591,7 @@ static int cs46xx_powerup(struct cs_card *card, unsigned int type)
* Check the status..
*/
if (!(cs_ac97_get(card->ac97_codec[0], AC97_POWER_CONTROL) &
- CS_AC97_POWER_CONTROL_DAC_ON))
- {
+ CS_AC97_POWER_CONTROL_DAC_ON)) {
CS_DBGOUT(CS_ERROR, 1, printk(KERN_WARNING
"cs46xx: powerup DAC failed\n"));
return 1;
@@ -4861,14 +4599,13 @@ static int cs46xx_powerup(struct cs_card *card, unsigned int type)
}
}
tmp = cs_ac97_get(card->ac97_codec[0], AC97_POWER_CONTROL);
- if(muted)
+ if (muted)
cs_mute(card, CS_FALSE);
CS_DBGOUT(CS_FUNCTION, 4, printk(KERN_INFO
"cs46xx: cs46xx_powerup()- 0 tmp=0x%x\n",tmp));
return 0;
}
-
static void cs461x_proc_start(struct cs_card *card)
{
int cnt;
@@ -4965,7 +4702,7 @@ static int cs_hardware_init(struct cs_card *card)
* is not enough for some platforms! tested on an IBM Thinkpads and
* reference cards.
*/
- if(!(card->pm.flags & CS46XX_PM_IDLE))
+ if (!(card->pm.flags & CS46XX_PM_IDLE))
mdelay(initdelay);
/*
* Write the selected clock control setup to the hardware. Do not turn on
@@ -5017,8 +4754,7 @@ static int cs_hardware_init(struct cs_card *card)
* If we are resuming under 2.2.x then we can not schedule a timeout.
* so, just spin the CPU.
*/
- if(card->pm.flags & CS46XX_PM_IDLE)
- {
+ if (card->pm.flags & CS46XX_PM_IDLE) {
/*
* Wait for the card ready signal from the AC97 card.
*/
@@ -5033,9 +4769,7 @@ static int cs_hardware_init(struct cs_card *card)
current->state = TASK_UNINTERRUPTIBLE;
schedule_timeout(1);
} while (time_before(jiffies, end_time));
- }
- else
- {
+ } else {
for (count = 0; count < 100; count++) {
// First, we want to wait for a short time.
udelay(25 * cs_laptop_wait);
@@ -5064,8 +4798,7 @@ static int cs_hardware_init(struct cs_card *card)
*/
cs461x_pokeBA0(card, BA0_ACCTL, ACCTL_VFRM | ACCTL_ESYN | ACCTL_RSTN);
- if(card->pm.flags & CS46XX_PM_IDLE)
- {
+ if (card->pm.flags & CS46XX_PM_IDLE) {
/*
* Wait until we've sampled input slots 3 and 4 as valid, meaning that
* the card is pumping ADC data across the AC-link.
@@ -5081,9 +4814,7 @@ static int cs_hardware_init(struct cs_card *card)
current->state = TASK_UNINTERRUPTIBLE;
schedule_timeout(1);
} while (time_before(jiffies, end_time));
- }
- else
- {
+ } else {
for (count = 0; count < 100; count++) {
// First, we want to wait for a short time.
udelay(25 * cs_laptop_wait);
@@ -5140,17 +4871,13 @@ static int cs_hardware_init(struct cs_card *card)
cs461x_poke(card, BA1_CCTL, tmp & 0xffff0000);
/* initialize AC97 codec and register /dev/mixer */
- if(card->pm.flags & CS46XX_PM_IDLE)
- {
- if (cs_ac97_init(card) <= 0)
- {
+ if (card->pm.flags & CS46XX_PM_IDLE) {
+ if (cs_ac97_init(card) <= 0) {
CS_DBGOUT(CS_ERROR | CS_INIT, 1, printk(KERN_INFO
- "cs46xx: cs_ac97_init() failure\n") );
+ "cs46xx: cs_ac97_init() failure\n"));
return -EIO;
}
- }
- else
- {
+ } else {
cs46xx_ac97_resume(card);
}
@@ -5174,23 +4901,17 @@ static int cs_hardware_init(struct cs_card *card)
* If IDLE then Power down the part. We will power components up
* when we need them.
*/
- if(card->pm.flags & CS46XX_PM_IDLE)
- {
- if(!cs_powerdown)
- {
- if( (tmp = cs46xx_powerup(card, CS_POWER_DAC | CS_POWER_ADC |
- CS_POWER_MIXVON )) )
- {
+ if (card->pm.flags & CS46XX_PM_IDLE) {
+ if (!cs_powerdown) {
+ if ((tmp = cs46xx_powerup(card, CS_POWER_DAC | CS_POWER_ADC |
+ CS_POWER_MIXVON))) {
CS_DBGOUT(CS_ERROR | CS_INIT, 1, printk(KERN_INFO
"cs46xx: cs461x_powerup() failure (0x%x)\n",tmp) );
return -EIO;
}
- }
- else
- {
- if( (tmp = cs461x_powerdown(card, CS_POWER_DAC | CS_POWER_ADC |
- CS_POWER_MIXVON, CS_FALSE )) )
- {
+ } else {
+ if ((tmp = cs461x_powerdown(card, CS_POWER_DAC | CS_POWER_ADC |
+ CS_POWER_MIXVON, CS_FALSE))) {
CS_DBGOUT(CS_ERROR | CS_INIT, 1, printk(KERN_INFO
"cs46xx: cs461x_powerdown() failure (0x%x)\n",tmp) );
return -EIO;
@@ -5310,14 +5031,13 @@ MODULE_AUTHOR("Alan Cox <alan@redhat.com>, Jaroslav Kysela, <pcaudio@crystal.cir
MODULE_DESCRIPTION("Crystal SoundFusion Audio Support");
MODULE_LICENSE("GPL");
-
static const char cs46xx_banner[] = KERN_INFO "Crystal 4280/46xx + AC97 Audio, version " CS46XX_MAJOR_VERSION "." CS46XX_MINOR_VERSION "." CS46XX_ARCH ", " __TIME__ " " __DATE__ "\n";
static const char fndmsg[] = KERN_INFO "cs46xx: Found %d audio device(s).\n";
static int __devinit cs46xx_probe(struct pci_dev *pci_dev,
const struct pci_device_id *pciid)
{
- int i,j;
+ int i, j;
u16 ss_card, ss_vendor;
struct cs_card *card;
dma_addr_t dma_mask;
@@ -5378,42 +5098,35 @@ static int __devinit cs46xx_probe(struct pci_dev *pci_dev,
while (cp->name)
{
- if(cp->vendor == ss_vendor && cp->id == ss_card)
- {
+ if (cp->vendor == ss_vendor && cp->id == ss_card) {
card->amplifier_ctrl = cp->amp;
- if(cp->active)
+ if (cp->active)
card->active_ctrl = cp->active;
- if(cp->amp_init)
+ if (cp->amp_init)
card->amp_init = cp->amp_init;
break;
}
cp++;
}
- if (cp->name==NULL)
- {
+ if (cp->name == NULL) {
printk(KERN_INFO "cs46xx: Unknown card (%04X:%04X) at 0x%08lx/0x%08lx, IRQ %d\n",
ss_vendor, ss_card, card->ba0_addr, card->ba1_addr, card->irq);
- }
- else
- {
+ } else {
printk(KERN_INFO "cs46xx: %s (%04X:%04X) at 0x%08lx/0x%08lx, IRQ %d\n",
cp->name, ss_vendor, ss_card, card->ba0_addr, card->ba1_addr, card->irq);
}
- if (card->amplifier_ctrl==NULL)
- {
+ if (card->amplifier_ctrl == NULL) {
card->amplifier_ctrl = amp_none;
card->active_ctrl = clkrun_hack;
}
- if (external_amp == 1)
- {
+ if (external_amp == 1) {
printk(KERN_INFO "cs46xx: Crystal EAPD support forced on.\n");
card->amplifier_ctrl = amp_voyetra;
}
- if (thinkpad == 1)
- {
+ if (thinkpad == 1) {
printk(KERN_INFO "cs46xx: Activating CLKRUN hack for Thinkpad.\n");
card->active_ctrl = clkrun_hack;
}
@@ -5425,13 +5138,11 @@ static int __devinit cs46xx_probe(struct pci_dev *pci_dev,
* and mdelay kernel code is replaced by a pm timer, or the delays
* work well for battery and/or AC power both.
*/
- if(card->active_ctrl == clkrun_hack)
- {
+ if (card->active_ctrl == clkrun_hack) {
initdelay = 2100;
cs_laptop_wait = 5;
}
- if((card->active_ctrl == clkrun_hack) && !(powerdown == 1))
- {
+ if ((card->active_ctrl == clkrun_hack) && !(powerdown == 1)) {
/*
* for some currently unknown reason, powering down the DAC and ADC component
* blocks on thinkpads causes some funky behavior... distoorrrtion and ac97
@@ -5440,7 +5151,7 @@ static int __devinit cs46xx_probe(struct pci_dev *pci_dev,
*/
cs_powerdown = 0;
}
- if(powerdown == 0)
+ if (powerdown == 0)
cs_powerdown = 0;
card->active_ctrl(card, 1);
@@ -5461,7 +5172,7 @@ static int __devinit cs46xx_probe(struct pci_dev *pci_dev,
card->ba1.name.pmem,
card->ba1.name.reg) );
- if(card->ba0 == 0 || card->ba1.name.data0 == 0 ||
+ if (card->ba0 == 0 || card->ba1.name.data0 == 0 ||
card->ba1.name.data1 == 0 || card->ba1.name.pmem == 0 ||
card->ba1.name.reg == 0)
goto fail2;
@@ -5477,14 +5188,12 @@ static int __devinit cs46xx_probe(struct pci_dev *pci_dev,
}
/* register /dev/midi */
- if((card->dev_midi = register_sound_midi(&cs_midi_fops, -1)) < 0)
+ if ((card->dev_midi = register_sound_midi(&cs_midi_fops, -1)) < 0)
printk(KERN_ERR "cs46xx: unable to register midi\n");
card->pm.flags |= CS46XX_PM_IDLE;
- for(i=0;i<5;i++)
- {
- if (cs_hardware_init(card) != 0)
- {
+ for (i = 0; i < 5; i++) {
+ if (cs_hardware_init(card) != 0) {
CS_DBGOUT(CS_ERROR, 4, printk(
"cs46xx: ERROR in cs_hardware_init()... retrying\n"));
for (j = 0; j < NR_AC97; j++)
@@ -5497,12 +5206,11 @@ static int __devinit cs46xx_probe(struct pci_dev *pci_dev,
}
break;
}
- if(i>=4)
- {
+ if(i >= 4) {
CS_DBGOUT(CS_PM | CS_ERROR, 1, printk(
"cs46xx: cs46xx_probe()- cs_hardware_init() failed, retried %d times.\n",i));
unregister_sound_dsp(card->dev_audio);
- if(card->dev_midi)
+ if (card->dev_midi)
unregister_sound_midi(card->dev_midi);
goto fail;
}
@@ -5518,7 +5226,7 @@ static int __devinit cs46xx_probe(struct pci_dev *pci_dev,
* Check if we have to init the amplifier, but probably already done
* since the CD logic in the ac97 init code will turn on the ext amp.
*/
- if(cp->amp_init)
+ if (cp->amp_init)
cp->amp_init(card);
card->active_ctrl(card, -1);
@@ -5536,15 +5244,15 @@ static int __devinit cs46xx_probe(struct pci_dev *pci_dev,
fail:
free_irq(card->irq, card);
fail2:
- if(card->ba0)
+ if (card->ba0)
iounmap(card->ba0);
- if(card->ba1.name.data0)
+ if (card->ba1.name.data0)
iounmap(card->ba1.name.data0);
- if(card->ba1.name.data1)
+ if (card->ba1.name.data1)
iounmap(card->ba1.name.data1);
- if(card->ba1.name.pmem)
+ if (card->ba1.name.pmem)
iounmap(card->ba1.name.pmem);
- if(card->ba1.name.reg)
+ if (card->ba1.name.reg)
iounmap(card->ba1.name.reg);
kfree(card);
CS_DBGOUT(CS_INIT | CS_ERROR, 1, printk(KERN_INFO
@@ -5598,9 +5306,8 @@ static void __devexit cs46xx_remove(struct pci_dev *pci_dev)
* Power down the DAC and ADC. We will power them up (if) when we need
* them.
*/
- if( (tmp = cs461x_powerdown(card, CS_POWER_DAC | CS_POWER_ADC |
- CS_POWER_MIXVON, CS_TRUE )) )
- {
+ if ((tmp = cs461x_powerdown(card, CS_POWER_DAC | CS_POWER_ADC |
+ CS_POWER_MIXVON, CS_TRUE))) {
CS_DBGOUT(CS_ERROR | CS_INIT, 1, printk(KERN_INFO
"cs46xx: cs461x_powerdown() failure (0x%x)\n",tmp) );
}
@@ -5634,7 +5341,7 @@ static void __devexit cs46xx_remove(struct pci_dev *pci_dev)
ac97_release_codec(card->ac97_codec[i]);
}
unregister_sound_dsp(card->dev_audio);
- if(card->dev_midi)
+ if (card->dev_midi)
unregister_sound_midi(card->dev_midi);
list_del(&card->list);
kfree(card);
@@ -5693,8 +5400,7 @@ static int __init cs46xx_init_module(void)
"cs46xx: cs46xx_init_module()+ \n"));
rtn = pci_register_driver(&cs46xx_pci_driver);
- if(rtn == -ENODEV)
- {
+ if (rtn == -ENODEV) {
CS_DBGOUT(CS_ERROR | CS_INIT, 1, printk(
"cs46xx: Unable to detect valid cs46xx device\n"));
}