summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/00-INDEX6
-rw-r--r--Documentation/DocBook/dvb/dvbproperty.xml5
-rw-r--r--Documentation/DocBook/media-entities.tmpl7
-rw-r--r--Documentation/DocBook/mtdnand.tmpl3
-rw-r--r--Documentation/DocBook/v4l/media-controller.xml6
-rw-r--r--Documentation/DocBook/v4l/pixfmt.xml1
-rw-r--r--Documentation/DocBook/v4l/subdev-formats.xml10
-rw-r--r--Documentation/RCU/trace.txt17
-rw-r--r--Documentation/arm/Booting33
-rw-r--r--Documentation/arm/Samsung/Overview.txt2
-rw-r--r--Documentation/devicetree/booting-without-of.txt48
-rw-r--r--Documentation/dmaengine.txt97
-rw-r--r--Documentation/filesystems/Locking4
-rw-r--r--Documentation/filesystems/nfs/idmapper.txt4
-rw-r--r--Documentation/filesystems/vfs.txt2
-rw-r--r--Documentation/lockstat.txt36
-rw-r--r--Documentation/networking/dns_resolver.txt4
-rw-r--r--Documentation/power/regulator/machine.txt4
-rw-r--r--Documentation/scsi/ChangeLog.megaraid_sas14
-rw-r--r--Documentation/security/00-INDEX18
-rw-r--r--Documentation/security/SELinux.txt (renamed from Documentation/SELinux.txt)0
-rw-r--r--Documentation/security/Smack.txt (renamed from Documentation/Smack.txt)0
-rw-r--r--Documentation/security/apparmor.txt (renamed from Documentation/apparmor.txt)0
-rw-r--r--Documentation/security/credentials.txt (renamed from Documentation/credentials.txt)2
-rw-r--r--Documentation/security/keys-request-key.txt (renamed from Documentation/keys-request-key.txt)4
-rw-r--r--Documentation/security/keys-trusted-encrypted.txt (renamed from Documentation/keys-trusted-encrypted.txt)0
-rw-r--r--Documentation/security/keys.txt (renamed from Documentation/keys.txt)4
-rw-r--r--Documentation/security/tomoyo.txt (renamed from Documentation/tomoyo.txt)0
-rw-r--r--MAINTAINERS37
-rw-r--r--arch/alpha/include/asm/unistd.h3
-rw-r--r--arch/alpha/kernel/systbls.S1
-rw-r--r--arch/arm/Kconfig29
-rw-r--r--arch/arm/Makefile1
-rw-r--r--arch/arm/common/Kconfig2
-rw-r--r--arch/arm/configs/at572d940hfek_defconfig358
-rw-r--r--arch/arm/configs/at91sam9261_defconfig (renamed from arch/arm/configs/at91sam9261ek_defconfig)85
-rw-r--r--arch/arm/configs/at91sam9263_defconfig (renamed from arch/arm/configs/at91sam9263ek_defconfig)84
-rw-r--r--arch/arm/configs/exynos4_defconfig2
-rw-r--r--arch/arm/configs/neocore926_defconfig104
-rw-r--r--arch/arm/configs/s5p6442_defconfig65
-rw-r--r--arch/arm/configs/usb-a9263_defconfig106
-rw-r--r--arch/arm/include/asm/fiq.h23
-rw-r--r--arch/arm/include/asm/mach/arch.h9
-rw-r--r--arch/arm/include/asm/page.h2
-rw-r--r--arch/arm/include/asm/prom.h37
-rw-r--r--arch/arm/include/asm/setup.h4
-rw-r--r--arch/arm/include/asm/smp.h1
-rw-r--r--arch/arm/include/asm/unistd.h2
-rw-r--r--arch/arm/kernel/Makefile3
-rw-r--r--arch/arm/kernel/calls.S2
-rw-r--r--arch/arm/kernel/devtree.c145
-rw-r--r--arch/arm/kernel/fiq.c45
-rw-r--r--arch/arm/kernel/fiqasm.S49
-rw-r--r--arch/arm/kernel/head-common.S24
-rw-r--r--arch/arm/kernel/head.S15
-rw-r--r--arch/arm/kernel/setup.c90
-rw-r--r--arch/arm/kernel/smp.c1
-rw-r--r--arch/arm/lib/lib1funcs.S25
-rw-r--r--arch/arm/mach-at91/Kconfig40
-rw-r--r--arch/arm/mach-at91/Makefile4
-rw-r--r--arch/arm/mach-at91/at572d940hf.c377
-rw-r--r--arch/arm/mach-at91/at572d940hf_devices.c970
-rw-r--r--arch/arm/mach-at91/at91cap9.c41
-rw-r--r--arch/arm/mach-at91/at91cap9_devices.c24
-rw-r--r--arch/arm/mach-at91/at91rm9200.c53
-rw-r--r--arch/arm/mach-at91/at91rm9200_devices.c24
-rw-r--r--arch/arm/mach-at91/at91sam9260.c48
-rw-r--r--arch/arm/mach-at91/at91sam9260_devices.c26
-rw-r--r--arch/arm/mach-at91/at91sam9261.c41
-rw-r--r--arch/arm/mach-at91/at91sam9261_devices.c21
-rw-r--r--arch/arm/mach-at91/at91sam9263.c39
-rw-r--r--arch/arm/mach-at91/at91sam9263_devices.c20
-rw-r--r--arch/arm/mach-at91/at91sam9g45.c64
-rw-r--r--arch/arm/mach-at91/at91sam9g45_devices.c27
-rw-r--r--arch/arm/mach-at91/at91sam9rl.c40
-rw-r--r--arch/arm/mach-at91/at91sam9rl_devices.c23
-rw-r--r--arch/arm/mach-at91/at91x40.c5
-rw-r--r--arch/arm/mach-at91/board-1arm.c12
-rw-r--r--arch/arm/mach-at91/board-afeb-9260v1.c6
-rw-r--r--arch/arm/mach-at91/board-at572d940hf_ek.c326
-rw-r--r--arch/arm/mach-at91/board-cam60.c6
-rw-r--r--arch/arm/mach-at91/board-cap9adk.c13
-rw-r--r--arch/arm/mach-at91/board-carmeva.c8
-rw-r--r--arch/arm/mach-at91/board-cpu9krea.c6
-rw-r--r--arch/arm/mach-at91/board-cpuat91.c12
-rw-r--r--arch/arm/mach-at91/board-csb337.c8
-rw-r--r--arch/arm/mach-at91/board-csb637.c8
-rw-r--r--arch/arm/mach-at91/board-eb01.c4
-rw-r--r--arch/arm/mach-at91/board-eb9200.c8
-rw-r--r--arch/arm/mach-at91/board-ecbat91.c12
-rw-r--r--arch/arm/mach-at91/board-eco920.c32
-rw-r--r--arch/arm/mach-at91/board-flexibity.c6
-rw-r--r--arch/arm/mach-at91/board-foxg20.c6
-rw-r--r--arch/arm/mach-at91/board-gsia18s.c8
-rw-r--r--arch/arm/mach-at91/board-kafa.c12
-rw-r--r--arch/arm/mach-at91/board-kb9202.c13
-rw-r--r--arch/arm/mach-at91/board-neocore926.c6
-rw-r--r--arch/arm/mach-at91/board-pcontrol-g20.c8
-rw-r--r--arch/arm/mach-at91/board-picotux200.c8
-rw-r--r--arch/arm/mach-at91/board-qil-a9260.c6
-rw-r--r--arch/arm/mach-at91/board-rm9200dk.c8
-rw-r--r--arch/arm/mach-at91/board-rm9200ek.c8
-rw-r--r--arch/arm/mach-at91/board-sam9-l9260.c6
-rw-r--r--arch/arm/mach-at91/board-sam9260ek.c13
-rw-r--r--arch/arm/mach-at91/board-sam9261ek.c13
-rw-r--r--arch/arm/mach-at91/board-sam9263ek.c13
-rw-r--r--arch/arm/mach-at91/board-sam9g20ek.c17
-rw-r--r--arch/arm/mach-at91/board-sam9m10g45ek.c13
-rw-r--r--arch/arm/mach-at91/board-sam9rlek.c6
-rw-r--r--arch/arm/mach-at91/board-snapper9260.c6
-rw-r--r--arch/arm/mach-at91/board-stamp9g20.c18
-rw-r--r--arch/arm/mach-at91/board-usb-a9260.c6
-rw-r--r--arch/arm/mach-at91/board-usb-a9263.c6
-rw-r--r--arch/arm/mach-at91/board-yl-9200.c12
-rw-r--r--arch/arm/mach-at91/clock.c69
-rw-r--r--arch/arm/mach-at91/clock.h20
-rw-r--r--arch/arm/mach-at91/generic.h30
-rw-r--r--arch/arm/mach-at91/include/mach/at572d940hf.h123
-rw-r--r--arch/arm/mach-at91/include/mach/at572d940hf_matrix.h123
-rw-r--r--arch/arm/mach-at91/include/mach/at91cap9.h4
-rw-r--r--arch/arm/mach-at91/include/mach/at91rm9200.h2
-rw-r--r--arch/arm/mach-at91/include/mach/at91sam9260.h2
-rw-r--r--arch/arm/mach-at91/include/mach/at91sam9261.h2
-rw-r--r--arch/arm/mach-at91/include/mach/at91sam9263.h2
-rw-r--r--arch/arm/mach-at91/include/mach/at91sam9g45.h4
-rw-r--r--arch/arm/mach-at91/include/mach/at91sam9rl.h2
-rw-r--r--arch/arm/mach-at91/include/mach/at91x40.h2
-rw-r--r--arch/arm/mach-at91/include/mach/board.h6
-rw-r--r--arch/arm/mach-at91/include/mach/clkdev.h7
-rw-r--r--arch/arm/mach-at91/include/mach/cpu.h15
-rw-r--r--arch/arm/mach-at91/include/mach/hardware.h15
-rw-r--r--arch/arm/mach-at91/include/mach/memory.h2
-rw-r--r--arch/arm/mach-at91/include/mach/stamp9g20.h2
-rw-r--r--arch/arm/mach-at91/include/mach/system_rev.h25
-rw-r--r--arch/arm/mach-at91/include/mach/timex.h5
-rw-r--r--arch/arm/mach-davinci/da850.c2
-rw-r--r--arch/arm/mach-davinci/devices-da8xx.c16
-rw-r--r--arch/arm/mach-davinci/devices.c3
-rw-r--r--arch/arm/mach-davinci/include/mach/da8xx.h4
-rw-r--r--arch/arm/mach-davinci/include/mach/hardware.h3
-rw-r--r--arch/arm/mach-exynos4/Kconfig2
-rw-r--r--arch/arm/mach-exynos4/Makefile3
-rw-r--r--arch/arm/mach-exynos4/cpuidle.c86
-rw-r--r--arch/arm/mach-exynos4/mach-nuri.c89
-rw-r--r--arch/arm/mach-gemini/board-wbd111.c7
-rw-r--r--arch/arm/mach-gemini/board-wbd222.c7
-rw-r--r--arch/arm/mach-ixp4xx/ixdp425-setup.c4
-rw-r--r--arch/arm/mach-netx/fb.c1
-rw-r--r--arch/arm/mach-nomadik/Kconfig1
-rw-r--r--arch/arm/mach-omap2/board-rx51-peripherals.c27
-rw-r--r--arch/arm/mach-pxa/Kconfig1
-rw-r--r--arch/arm/mach-s3c2410/mach-amlm5900.c5
-rw-r--r--arch/arm/mach-s3c2410/mach-tct_hammer.c6
-rw-r--r--arch/arm/mach-s3c64xx/dev-spi.c20
-rw-r--r--arch/arm/mach-s3c64xx/include/mach/gpio-bank-a.h48
-rw-r--r--arch/arm/mach-s3c64xx/include/mach/gpio-bank-b.h60
-rw-r--r--arch/arm/mach-s3c64xx/include/mach/gpio-bank-c.h53
-rw-r--r--arch/arm/mach-s3c64xx/include/mach/gpio-bank-d.h49
-rw-r--r--arch/arm/mach-s3c64xx/include/mach/gpio-bank-e.h44
-rw-r--r--arch/arm/mach-s3c64xx/include/mach/gpio-bank-f.h71
-rw-r--r--arch/arm/mach-s3c64xx/include/mach/gpio-bank-g.h42
-rw-r--r--arch/arm/mach-s3c64xx/include/mach/gpio-bank-h.h74
-rw-r--r--arch/arm/mach-s3c64xx/include/mach/gpio-bank-i.h40
-rw-r--r--arch/arm/mach-s3c64xx/include/mach/gpio-bank-j.h36
-rw-r--r--arch/arm/mach-s3c64xx/include/mach/gpio-bank-n.h54
-rw-r--r--arch/arm/mach-s3c64xx/include/mach/gpio-bank-o.h70
-rw-r--r--arch/arm/mach-s3c64xx/include/mach/gpio-bank-p.h69
-rw-r--r--arch/arm/mach-s3c64xx/include/mach/gpio-bank-q.h46
-rw-r--r--arch/arm/mach-s3c64xx/mach-smdk6410.c1
-rw-r--r--arch/arm/mach-s3c64xx/pm.c34
-rw-r--r--arch/arm/mach-s3c64xx/setup-i2c0.c7
-rw-r--r--arch/arm/mach-s3c64xx/setup-i2c1.c7
-rw-r--r--arch/arm/mach-s3c64xx/sleep.S8
-rw-r--r--arch/arm/mach-s5p6442/Kconfig25
-rw-r--r--arch/arm/mach-s5p6442/Makefile24
-rw-r--r--arch/arm/mach-s5p6442/Makefile.boot2
-rw-r--r--arch/arm/mach-s5p6442/clock.c420
-rw-r--r--arch/arm/mach-s5p6442/cpu.c143
-rw-r--r--arch/arm/mach-s5p6442/dev-audio.c217
-rw-r--r--arch/arm/mach-s5p6442/dev-spi.c121
-rw-r--r--arch/arm/mach-s5p6442/dma.c105
-rw-r--r--arch/arm/mach-s5p6442/include/mach/debug-macro.S35
-rw-r--r--arch/arm/mach-s5p6442/include/mach/entry-macro.S48
-rw-r--r--arch/arm/mach-s5p6442/include/mach/gpio.h123
-rw-r--r--arch/arm/mach-s5p6442/include/mach/hardware.h18
-rw-r--r--arch/arm/mach-s5p6442/include/mach/io.h17
-rw-r--r--arch/arm/mach-s5p6442/include/mach/irqs.h87
-rw-r--r--arch/arm/mach-s5p6442/include/mach/map.h76
-rw-r--r--arch/arm/mach-s5p6442/include/mach/memory.h19
-rw-r--r--arch/arm/mach-s5p6442/include/mach/pwm-clock.h70
-rw-r--r--arch/arm/mach-s5p6442/include/mach/regs-clock.h104
-rw-r--r--arch/arm/mach-s5p6442/include/mach/regs-irq.h19
-rw-r--r--arch/arm/mach-s5p6442/include/mach/spi-clocks.h17
-rw-r--r--arch/arm/mach-s5p6442/include/mach/system.h23
-rw-r--r--arch/arm/mach-s5p6442/include/mach/tick.h26
-rw-r--r--arch/arm/mach-s5p6442/include/mach/timex.h24
-rw-r--r--arch/arm/mach-s5p6442/include/mach/uncompress.h24
-rw-r--r--arch/arm/mach-s5p6442/include/mach/vmalloc.h17
-rw-r--r--arch/arm/mach-s5p6442/init.c44
-rw-r--r--arch/arm/mach-s5p6442/mach-smdk6442.c102
-rw-r--r--arch/arm/mach-s5p6442/setup-i2c0.c28
-rw-r--r--arch/arm/mach-s5pc100/Makefile2
-rw-r--r--arch/arm/mach-s5pv210/Makefile2
-rw-r--r--arch/arm/mach-u300/Makefile2
-rw-r--r--arch/arm/mach-ux500/Kconfig1
-rw-r--r--arch/arm/mach-ux500/board-mop500-sdi.c16
-rw-r--r--arch/arm/mach-ux500/devices-common.h10
-rw-r--r--arch/arm/mach-ux500/devices-db5500.h28
-rw-r--r--arch/arm/mach-ux500/devices-db8500.h34
-rw-r--r--arch/arm/mach-ux500/include/mach/hardware.h3
-rw-r--r--arch/arm/mm/cache-v6.S1
-rw-r--r--arch/arm/mm/cache-v7.S2
-rw-r--r--arch/arm/mm/context.c17
-rw-r--r--arch/arm/mm/init.c15
-rw-r--r--arch/arm/mm/mm.h7
-rw-r--r--arch/arm/mm/mmu.c9
-rw-r--r--arch/arm/mm/proc-v6.S4
-rw-r--r--arch/arm/mm/proc-v7.S14
-rw-r--r--arch/arm/plat-nomadik/Kconfig5
-rw-r--r--arch/arm/plat-nomadik/Makefile1
-rw-r--r--arch/arm/plat-nomadik/include/plat/gpio.h2
-rw-r--r--arch/arm/plat-omap/Makefile2
-rw-r--r--arch/arm/plat-omap/include/plat/gpio.h103
-rw-r--r--arch/arm/plat-s5p/Kconfig2
-rw-r--r--arch/arm/plat-s5p/cpu.c10
-rw-r--r--arch/arm/plat-s5p/include/plat/s5p6442.h33
-rw-r--r--arch/arm/plat-samsung/Makefile1
-rw-r--r--arch/arm/plat-samsung/include/plat/cpu.h1
-rw-r--r--arch/arm/plat-samsung/include/plat/debug-macro.S2
-rw-r--r--arch/arm/plat-samsung/include/plat/devs.h6
-rw-r--r--arch/arm/plat-samsung/include/plat/regs-serial.h2
-rw-r--r--arch/arm/plat-samsung/include/plat/s3c64xx-spi.h1
-rw-r--r--arch/avr32/include/asm/unistd.h3
-rw-r--r--arch/avr32/kernel/syscall_table.S1
-rw-r--r--arch/avr32/mach-at32ap/at32ap700x.c4
-rw-r--r--arch/avr32/mach-at32ap/include/mach/board.h1
-rw-r--r--arch/blackfin/include/asm/bfin_serial.h6
-rw-r--r--arch/blackfin/include/asm/gptimers.h18
-rw-r--r--arch/blackfin/include/asm/unistd.h4
-rw-r--r--arch/blackfin/kernel/debug-mmrs.c36
-rw-r--r--arch/blackfin/mach-bf518/include/mach/bfin_serial_5xx.h79
-rw-r--r--arch/blackfin/mach-bf518/include/mach/defBF514.h16
-rw-r--r--arch/blackfin/mach-bf527/include/mach/bfin_serial_5xx.h79
-rw-r--r--arch/blackfin/mach-bf527/include/mach/defBF525.h4
-rw-r--r--arch/blackfin/mach-bf533/include/mach/bfin_serial_5xx.h52
-rw-r--r--arch/blackfin/mach-bf537/boards/stamp.c4
-rw-r--r--arch/blackfin/mach-bf537/include/mach/bfin_serial_5xx.h79
-rw-r--r--arch/blackfin/mach-bf538/include/mach/bfin_serial_5xx.h93
-rw-r--r--arch/blackfin/mach-bf548/include/mach/bfin_serial_5xx.h94
-rw-r--r--arch/blackfin/mach-bf548/include/mach/defBF547.h19
-rw-r--r--arch/blackfin/mach-bf561/boards/acvilon.c4
-rw-r--r--arch/blackfin/mach-bf561/include/mach/bfin_serial_5xx.h52
-rw-r--r--arch/blackfin/mach-common/entry.S2
-rw-r--r--arch/blackfin/mm/maccess.c4
-rw-r--r--arch/cris/Kconfig1
-rw-r--r--arch/cris/arch-v10/drivers/axisflashmap.c10
-rw-r--r--arch/cris/arch-v10/kernel/entry.S1
-rw-r--r--arch/cris/arch-v32/drivers/Kconfig1
-rw-r--r--arch/cris/arch-v32/drivers/axisflashmap.c7
-rw-r--r--arch/cris/arch-v32/kernel/entry.S1
-rw-r--r--arch/cris/include/asm/unistd.h3
-rw-r--r--arch/frv/include/asm/suspend.h20
-rw-r--r--arch/frv/include/asm/unistd.h3
-rw-r--r--arch/frv/kernel/entry.S1
-rw-r--r--arch/h8300/include/asm/unistd.h3
-rw-r--r--arch/h8300/kernel/syscalls.S1
-rw-r--r--arch/ia64/include/asm/unistd.h3
-rw-r--r--arch/ia64/kernel/entry.S1
-rw-r--r--arch/m32r/include/asm/unistd.h3
-rw-r--r--arch/m32r/kernel/syscall_table.S1
-rw-r--r--arch/m68k/include/asm/unistd.h3
-rw-r--r--arch/m68k/kernel/syscalltable.S1
-rw-r--r--arch/microblaze/include/asm/unistd.h3
-rw-r--r--arch/microblaze/kernel/prom.c2
-rw-r--r--arch/microblaze/kernel/syscall_table.S1
-rw-r--r--arch/mips/cavium-octeon/flash_setup.c11
-rw-r--r--arch/mips/include/asm/prom.h3
-rw-r--r--arch/mips/include/asm/suspend.h2
-rw-r--r--arch/mips/include/asm/unistd.h15
-rw-r--r--arch/mips/kernel/prom.c3
-rw-r--r--arch/mips/kernel/scall32-o32.S1
-rw-r--r--arch/mips/kernel/scall64-64.S1
-rw-r--r--arch/mips/kernel/scall64-n32.S1
-rw-r--r--arch/mips/kernel/scall64-o32.S1
-rw-r--r--arch/mips/txx9/generic/setup.c3
-rw-r--r--arch/mn10300/include/asm/unistd.h3
-rw-r--r--arch/mn10300/kernel/entry.S1
-rw-r--r--arch/parisc/include/asm/unistd.h3
-rw-r--r--arch/parisc/kernel/syscall_table.S1
-rw-r--r--arch/powerpc/include/asm/fsl_lbc.h2
-rw-r--r--arch/powerpc/include/asm/rio.h5
-rw-r--r--arch/powerpc/include/asm/suspend.h6
-rw-r--r--arch/powerpc/include/asm/systbl.h1
-rw-r--r--arch/powerpc/include/asm/unistd.h3
-rw-r--r--arch/powerpc/kernel/prom.c2
-rw-r--r--arch/powerpc/kernel/swsusp.c1
-rw-r--r--arch/powerpc/kernel/traps.c13
-rw-r--r--arch/powerpc/sysdev/fsl_lbc.c9
-rw-r--r--arch/powerpc/sysdev/fsl_rio.c100
-rw-r--r--arch/s390/Kconfig2
-rw-r--r--arch/s390/appldata/appldata_mem.c2
-rw-r--r--arch/s390/include/asm/delay.h8
-rw-r--r--arch/s390/include/asm/irq.h8
-rw-r--r--arch/s390/include/asm/s390_ext.h17
-rw-r--r--arch/s390/include/asm/suspend.h10
-rw-r--r--arch/s390/include/asm/topology.h4
-rw-r--r--arch/s390/include/asm/uaccess.h11
-rw-r--r--arch/s390/include/asm/unistd.h3
-rw-r--r--arch/s390/kernel/Makefile8
-rw-r--r--arch/s390/kernel/compat_wrapper.S6
-rw-r--r--arch/s390/kernel/dis.c2
-rw-r--r--arch/s390/kernel/irq.c137
-rw-r--r--arch/s390/kernel/s390_ext.c108
-rw-r--r--arch/s390/kernel/smp.c1
-rw-r--r--arch/s390/kernel/syscalls.S1
-rw-r--r--arch/s390/kernel/time.c1
-rw-r--r--arch/s390/kernel/topology.c1
-rw-r--r--arch/s390/kernel/traps.c1
-rw-r--r--arch/s390/kernel/vtime.c2
-rw-r--r--arch/s390/lib/delay.c15
-rw-r--r--arch/s390/mm/fault.c62
-rw-r--r--arch/s390/mm/init.c2
-rw-r--r--arch/s390/mm/maccess.c4
-rw-r--r--arch/s390/oprofile/hwsampler.c4
-rw-r--r--arch/sh/include/asm/suspend.h1
-rw-r--r--arch/sh/include/asm/unistd_32.h3
-rw-r--r--arch/sh/include/asm/unistd_64.h3
-rw-r--r--arch/sh/kernel/syscalls_32.S1
-rw-r--r--arch/sh/kernel/syscalls_64.S1
-rw-r--r--arch/sparc/include/asm/unistd.h3
-rw-r--r--arch/sparc/kernel/systbls_32.S2
-rw-r--r--arch/sparc/kernel/systbls_64.S4
-rw-r--r--arch/unicore32/include/asm/suspend.h1
-rw-r--r--arch/x86/ia32/ia32entry.S1
-rw-r--r--arch/x86/include/asm/cpufeature.h2
-rw-r--r--arch/x86/include/asm/desc.h152
-rw-r--r--arch/x86/include/asm/mmu.h4
-rw-r--r--arch/x86/include/asm/suspend_32.h2
-rw-r--r--arch/x86/include/asm/suspend_64.h5
-rw-r--r--arch/x86/include/asm/unistd_32.h3
-rw-r--r--arch/x86/include/asm/unistd_64.h2
-rw-r--r--arch/x86/include/asm/uv/uv_bau.h590
-rw-r--r--arch/x86/include/asm/uv/uv_hub.h71
-rw-r--r--arch/x86/include/asm/uv/uv_mmrs.h1012
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c40
-rw-r--r--arch/x86/kernel/cpu/amd.c7
-rw-r--r--arch/x86/kernel/cpu/common.c7
-rw-r--r--arch/x86/kernel/ftrace.c12
-rw-r--r--arch/x86/kernel/setup.c7
-rw-r--r--arch/x86/kernel/syscall_table_32.S1
-rw-r--r--arch/x86/mm/fault.c35
-rw-r--r--arch/x86/oprofile/op_model_amd.c95
-rw-r--r--arch/x86/platform/efi/efi.c45
-rw-r--r--arch/x86/platform/efi/efi_64.c5
-rw-r--r--arch/x86/platform/uv/tlb_uv.c1484
-rw-r--r--arch/x86/platform/uv/uv_time.c16
-rw-r--r--arch/xtensa/include/asm/unistd.h4
-rw-r--r--block/blk-core.c5
-rw-r--r--block/genhd.c2
-rw-r--r--drivers/Makefile4
-rw-r--r--drivers/amba/bus.c5
-rw-r--r--drivers/block/brd.c42
-rw-r--r--drivers/block/loop.c17
-rw-r--r--drivers/dma/Kconfig12
-rw-r--r--drivers/dma/TODO14
-rw-r--r--drivers/dma/at_hdmac.c376
-rw-r--r--drivers/dma/at_hdmac_regs.h30
-rw-r--r--drivers/dma/coh901318.c2
-rw-r--r--drivers/dma/dw_dmac.c272
-rw-r--r--drivers/dma/dw_dmac_regs.h2
-rw-r--r--drivers/dma/intel_mid_dma.c17
-rw-r--r--drivers/dma/ioat/dma_v2.c8
-rw-r--r--drivers/dma/iop-adma.c6
-rw-r--r--drivers/dma/mv_xor.c6
-rw-r--r--drivers/dma/pch_dma.c96
-rw-r--r--drivers/dma/ppc4xx/adma.c8
-rw-r--r--drivers/dma/ste_dma40.c4
-rw-r--r--drivers/gpio/Kconfig41
-rw-r--r--drivers/gpio/Makefile8
-rw-r--r--drivers/gpio/gpio-exynos4.c (renamed from arch/arm/mach-exynos4/gpiolib.c)0
-rw-r--r--drivers/gpio/gpio-nomadik.c (renamed from arch/arm/plat-nomadik/gpio.c)65
-rw-r--r--drivers/gpio/gpio-omap.c (renamed from arch/arm/plat-omap/gpio.c)105
-rw-r--r--drivers/gpio/gpio-plat-samsung.c (renamed from arch/arm/plat-samsung/gpiolib.c)0
-rw-r--r--drivers/gpio/gpio-s5pc100.c (renamed from arch/arm/mach-s5pc100/gpiolib.c)0
-rw-r--r--drivers/gpio/gpio-s5pv210.c (renamed from arch/arm/mach-s5pv210/gpiolib.c)0
-rw-r--r--drivers/gpio/gpio-u300.c (renamed from arch/arm/mach-u300/gpio.c)0
-rw-r--r--drivers/gpio/gpiolib.c4
-rw-r--r--drivers/gpio/langwell_gpio.c65
-rw-r--r--drivers/gpio/pca953x.c249
-rw-r--r--drivers/gpio/pch_gpio.c2
-rw-r--r--drivers/gpio/tps65910-gpio.c100
-rw-r--r--drivers/hwmon/coretemp.c16
-rw-r--r--drivers/hwmon/pmbus_core.c1
-rw-r--r--drivers/media/dvb/dm1105/dm1105.c272
-rw-r--r--drivers/media/dvb/dvb-usb/lmedm04.c107
-rw-r--r--drivers/media/dvb/frontends/stb0899_algo.c2
-rw-r--r--drivers/media/dvb/frontends/tda8261.c1
-rw-r--r--drivers/media/radio/radio-wl1273.c2
-rw-r--r--drivers/media/radio/wl128x/fmdrv_v4l2.c2
-rw-r--r--drivers/media/rc/Kconfig12
-rw-r--r--drivers/media/rc/Makefile1
-rw-r--r--drivers/media/rc/fintek-cir.c684
-rw-r--r--drivers/media/rc/fintek-cir.h243
-rw-r--r--drivers/media/rc/keymaps/rc-lme2510.c134
-rw-r--r--drivers/media/video/Kconfig6
-rw-r--r--drivers/media/video/Makefile1
-rw-r--r--drivers/media/video/cpia2/cpia2_v4l.c4
-rw-r--r--drivers/media/video/gspca/kinect.c2
-rw-r--r--drivers/media/video/m5mols/Kconfig5
-rw-r--r--drivers/media/video/m5mols/Makefile3
-rw-r--r--drivers/media/video/m5mols/m5mols.h296
-rw-r--r--drivers/media/video/m5mols/m5mols_capture.c191
-rw-r--r--drivers/media/video/m5mols/m5mols_controls.c299
-rw-r--r--drivers/media/video/m5mols/m5mols_core.c1004
-rw-r--r--drivers/media/video/m5mols/m5mols_reg.h399
-rw-r--r--drivers/media/video/uvc/Makefile3
-rw-r--r--drivers/media/video/uvc/uvc_driver.c66
-rw-r--r--drivers/media/video/uvc/uvc_entity.c118
-rw-r--r--drivers/media/video/uvc/uvcvideo.h20
-rw-r--r--drivers/mfd/Kconfig9
-rw-r--r--drivers/mfd/Makefile1
-rw-r--r--drivers/mfd/db8500-prcmu.c3
-rw-r--r--drivers/mfd/tps65910-irq.c218
-rw-r--r--drivers/mfd/tps65910.c229
-rw-r--r--drivers/mfd/tps65911-comparator.c188
-rw-r--r--drivers/mmc/host/mmci.c25
-rw-r--r--drivers/mtd/Kconfig18
-rw-r--r--drivers/mtd/Makefile3
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c10
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c10
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0020.c1
-rw-r--r--drivers/mtd/devices/block2mtd.c4
-rw-r--r--drivers/mtd/devices/doc2000.c4
-rw-r--r--drivers/mtd/devices/doc2001.c4
-rw-r--r--drivers/mtd/devices/doc2001plus.c4
-rw-r--r--drivers/mtd/devices/lart.c9
-rw-r--r--drivers/mtd/devices/m25p80.c109
-rw-r--r--drivers/mtd/devices/ms02-nv.c4
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c45
-rw-r--r--drivers/mtd/devices/mtdram.c5
-rw-r--r--drivers/mtd/devices/phram.c4
-rw-r--r--drivers/mtd/devices/pmc551.c6
-rw-r--r--drivers/mtd/devices/slram.c4
-rw-r--r--drivers/mtd/devices/sst25l.c68
-rw-r--r--drivers/mtd/lpddr/lpddr_cmds.c8
-rw-r--r--drivers/mtd/maps/Kconfig23
-rw-r--r--drivers/mtd/maps/amd76xrom.c4
-rw-r--r--drivers/mtd/maps/autcpu12-nvram.c4
-rw-r--r--drivers/mtd/maps/bcm963xx-flash.c6
-rw-r--r--drivers/mtd/maps/bfin-async-flash.c21
-rw-r--r--drivers/mtd/maps/cdb89712.c12
-rw-r--r--drivers/mtd/maps/ceiva.c6
-rw-r--r--drivers/mtd/maps/cfi_flagadm.c4
-rw-r--r--drivers/mtd/maps/ck804xrom.c4
-rw-r--r--drivers/mtd/maps/dbox2-flash.c4
-rw-r--r--drivers/mtd/maps/dc21285.c20
-rw-r--r--drivers/mtd/maps/dilnetpc.c9
-rw-r--r--drivers/mtd/maps/dmv182.c4
-rw-r--r--drivers/mtd/maps/edb7312.c26
-rw-r--r--drivers/mtd/maps/esb2rom.c4
-rw-r--r--drivers/mtd/maps/fortunet.c7
-rw-r--r--drivers/mtd/maps/gpio-addr-flash.c27
-rw-r--r--drivers/mtd/maps/h720x-flash.c6
-rw-r--r--drivers/mtd/maps/ichxrom.c4
-rw-r--r--drivers/mtd/maps/impa7.c22
-rw-r--r--drivers/mtd/maps/intel_vr_nor.c19
-rw-r--r--drivers/mtd/maps/ixp2000.c4
-rw-r--r--drivers/mtd/maps/ixp4xx.c16
-rw-r--r--drivers/mtd/maps/l440gx.c4
-rw-r--r--drivers/mtd/maps/latch-addr-flash.c45
-rw-r--r--drivers/mtd/maps/mbx860.c6
-rw-r--r--drivers/mtd/maps/netsc520.c4
-rw-r--r--drivers/mtd/maps/nettel.c12
-rw-r--r--drivers/mtd/maps/octagon-5066.c4
-rw-r--r--drivers/mtd/maps/pci.c4
-rw-r--r--drivers/mtd/maps/pcmciamtd.c4
-rw-r--r--drivers/mtd/maps/physmap.c34
-rw-r--r--drivers/mtd/maps/physmap_of.c30
-rw-r--r--drivers/mtd/maps/plat-ram.c24
-rw-r--r--drivers/mtd/maps/pmcmsp-flash.c6
-rw-r--r--drivers/mtd/maps/pxa2xx-flash.c18
-rw-r--r--drivers/mtd/maps/rbtx4939-flash.c24
-rw-r--r--drivers/mtd/maps/rpxlite.c4
-rw-r--r--drivers/mtd/maps/sa1100-flash.c21
-rw-r--r--drivers/mtd/maps/sbc_gxx.c4
-rw-r--r--drivers/mtd/maps/sc520cdp.c8
-rw-r--r--drivers/mtd/maps/scb2_flash.c6
-rw-r--r--drivers/mtd/maps/scx200_docflash.c16
-rw-r--r--drivers/mtd/maps/solutionengine.c12
-rw-r--r--drivers/mtd/maps/sun_uflash.c4
-rw-r--r--drivers/mtd/maps/tqm8xxl.c20
-rw-r--r--drivers/mtd/maps/ts5500_flash.c4
-rw-r--r--drivers/mtd/maps/tsunami_flash.c4
-rw-r--r--drivers/mtd/maps/uclinux.c12
-rw-r--r--drivers/mtd/maps/vmax301.c4
-rw-r--r--drivers/mtd/maps/vmu-flash.c4
-rw-r--r--drivers/mtd/maps/wr_sbc82xx_flash.c15
-rw-r--r--drivers/mtd/mtd_blkdevs.c24
-rw-r--r--drivers/mtd/mtdchar.c55
-rw-r--r--drivers/mtd/mtdconcat.c4
-rw-r--r--drivers/mtd/mtdcore.c167
-rw-r--r--drivers/mtd/mtdcore.h6
-rw-r--r--drivers/mtd/mtdpart.c9
-rw-r--r--drivers/mtd/mtdswap.c8
-rw-r--r--drivers/mtd/nand/Kconfig5
-rw-r--r--drivers/mtd/nand/alauda.c4
-rw-r--r--drivers/mtd/nand/ams-delta.c4
-rw-r--r--drivers/mtd/nand/atmel_nand.c13
-rw-r--r--drivers/mtd/nand/au1550nd.c3
-rw-r--r--drivers/mtd/nand/autcpu12.c16
-rw-r--r--drivers/mtd/nand/bcm_umi_nand.c4
-rw-r--r--drivers/mtd/nand/bf5xx_nand.c7
-rw-r--r--drivers/mtd/nand/cafe_nand.c11
-rw-r--r--drivers/mtd/nand/cmx270_nand.c2
-rw-r--r--drivers/mtd/nand/cs553x_nand.c19
-rw-r--r--drivers/mtd/nand/davinci_nand.c51
-rw-r--r--drivers/mtd/nand/denali.c247
-rw-r--r--drivers/mtd/nand/denali.h373
-rw-r--r--drivers/mtd/nand/diskonchip.c18
-rw-r--r--drivers/mtd/nand/edb7312.c9
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c12
-rw-r--r--drivers/mtd/nand/fsl_upm.c12
-rw-r--r--drivers/mtd/nand/fsmc_nand.c25
-rw-r--r--drivers/mtd/nand/gpio.c4
-rw-r--r--drivers/mtd/nand/h1910.c5
-rw-r--r--drivers/mtd/nand/jz4740_nand.c10
-rw-r--r--drivers/mtd/nand/mpc5121_nfc.c12
-rw-r--r--drivers/mtd/nand/mxc_nand.c64
-rw-r--r--drivers/mtd/nand/nand_base.c18
-rw-r--r--drivers/mtd/nand/nand_bbt.c27
-rw-r--r--drivers/mtd/nand/nandsim.c4
-rw-r--r--drivers/mtd/nand/ndfc.c65
-rw-r--r--drivers/mtd/nand/nomadik_nand.c7
-rw-r--r--drivers/mtd/nand/nuc900_nand.c4
-rw-r--r--drivers/mtd/nand/omap2.c32
-rw-r--r--drivers/mtd/nand/orion_nand.c14
-rw-r--r--drivers/mtd/nand/pasemi_nand.c2
-rw-r--r--drivers/mtd/nand/plat_nand.c12
-rw-r--r--drivers/mtd/nand/ppchameleonevb.c15
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c13
-rw-r--r--drivers/mtd/nand/rtc_from4.c3
-rw-r--r--drivers/mtd/nand/s3c2410.c75
-rw-r--r--drivers/mtd/nand/sh_flctl.c2
-rw-r--r--drivers/mtd/nand/sharpsl.c12
-rw-r--r--drivers/mtd/nand/sm_common.c2
-rw-r--r--drivers/mtd/nand/socrates_nand.c16
-rw-r--r--drivers/mtd/nand/spia.c2
-rw-r--r--drivers/mtd/nand/tmio_nand.c10
-rw-r--r--drivers/mtd/nand/txx9ndfmc.c14
-rw-r--r--drivers/mtd/onenand/Kconfig1
-rw-r--r--drivers/mtd/onenand/generic.c16
-rw-r--r--drivers/mtd/onenand/omap2.c10
-rw-r--r--drivers/mtd/onenand/onenand_base.c54
-rw-r--r--drivers/mtd/onenand/onenand_sim.c3
-rw-r--r--drivers/mtd/onenand/samsung.c12
-rw-r--r--drivers/mtd/ubi/gluebi.c6
-rw-r--r--drivers/net/bonding/bond_main.c34
-rw-r--r--drivers/net/davinci_emac.c22
-rw-r--r--drivers/net/sfc/mtd.c6
-rw-r--r--drivers/of/fdt.c8
-rw-r--r--drivers/oprofile/event_buffer.h2
-rw-r--r--drivers/oprofile/oprof.c2
-rw-r--r--drivers/power/Kconfig9
-rw-r--r--drivers/power/Makefile1
-rw-r--r--drivers/power/bq27x00_battery.c11
-rw-r--r--drivers/power/ds2760_battery.c6
-rw-r--r--drivers/power/gpio-charger.c15
-rw-r--r--drivers/power/isp1704_charger.c22
-rw-r--r--drivers/power/max8903_charger.c391
-rw-r--r--drivers/power/test_power.c276
-rw-r--r--drivers/power/z2_battery.c20
-rw-r--r--drivers/regulator/Kconfig6
-rw-r--r--drivers/regulator/Makefile1
-rw-r--r--drivers/regulator/core.c93
-rw-r--r--drivers/regulator/max8997.c13
-rw-r--r--drivers/regulator/max8998.c22
-rw-r--r--drivers/regulator/mc13892-regulator.c18
-rw-r--r--drivers/regulator/mc13xxx-regulator-core.c2
-rw-r--r--drivers/regulator/tps6105x-regulator.c1
-rw-r--r--drivers/regulator/tps65023-regulator.c3
-rw-r--r--drivers/regulator/tps6507x-regulator.c3
-rw-r--r--drivers/regulator/tps65910-regulator.c993
-rw-r--r--drivers/regulator/twl-regulator.c564
-rw-r--r--drivers/regulator/wm831x-dcdc.c2
-rw-r--r--drivers/regulator/wm8400-regulator.c12
-rw-r--r--drivers/s390/block/dasd_diag.c6
-rw-r--r--drivers/s390/char/sclp.c7
-rw-r--r--drivers/s390/kvm/kvm_virtio.c3
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c2
-rw-r--r--drivers/scsi/bfa/bfa_ioc.c4
-rw-r--r--drivers/scsi/bfa/bfa_ioc.h1
-rw-r--r--drivers/scsi/bfa/bfa_ioc_cb.c11
-rw-r--r--drivers/scsi/bfa/bfa_ioc_ct.c26
-rw-r--r--drivers/scsi/bnx2i/bnx2i.h16
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c27
-rw-r--r--drivers/scsi/bnx2i/bnx2i_init.c2
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c13
-rw-r--r--drivers/scsi/fcoe/fcoe.c58
-rw-r--r--drivers/scsi/fcoe/fcoe.h10
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c133
-rw-r--r--drivers/scsi/fcoe/fcoe_transport.c40
-rw-r--r--drivers/scsi/ipr.c12
-rw-r--r--drivers/scsi/libfc/fc_disc.c1
-rw-r--r--drivers/scsi/libfc/fc_exch.c2
-rw-r--r--drivers/scsi/libfc/fc_fcp.c16
-rw-r--r--drivers/scsi/libfc/fc_libfc.h1
-rw-r--r--drivers/scsi/libsas/sas_ata.c60
-rw-r--r--drivers/scsi/libsas/sas_internal.h2
-rw-r--r--drivers/scsi/libsas/sas_phy.c4
-rw-r--r--drivers/scsi/libsas/sas_port.c21
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c14
-rw-r--r--drivers/scsi/lpfc/lpfc.h43
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c312
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c2111
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.h87
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h10
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c10
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c28
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c54
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h8
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h501
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c545
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c166
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c13
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c28
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c56
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c1659
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h33
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h10
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c93
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c83
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.h4
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c102
-rw-r--r--drivers/scsi/osst.c6
-rw-r--r--drivers/scsi/qla4xxx/Makefile2
-rw-r--r--drivers/scsi/qla4xxx/ql4_attr.c69
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h11
-rw-r--r--drivers/scsi/qla4xxx/ql4_fw.h23
-rw-r--r--drivers/scsi/qla4xxx/ql4_glbl.h3
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c2
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c22
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c77
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c19
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c68
-rw-r--r--drivers/scsi/qla4xxx/ql4_version.h2
-rw-r--r--drivers/scsi/scsi_error.c87
-rw-r--r--drivers/scsi/scsi_proc.c5
-rw-r--r--drivers/scsi/scsi_trace.c4
-rw-r--r--drivers/scsi/sd.c82
-rw-r--r--drivers/scsi/ultrastor.c2
-rw-r--r--drivers/spi/Kconfig9
-rw-r--r--drivers/spi/Makefile1
-rw-r--r--drivers/spi/spi_bfin_sport.c952
-rw-r--r--drivers/spi/tle62x0.c3
-rw-r--r--drivers/target/loopback/tcm_loop.c25
-rw-r--r--drivers/target/target_core_configfs.c4
-rw-r--r--drivers/target/target_core_device.c29
-rw-r--r--drivers/target/target_core_pscsi.c4
-rw-r--r--drivers/target/target_core_tmr.c7
-rw-r--r--drivers/target/target_core_transport.c68
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c20
-rw-r--r--drivers/target/tcm_fc/tfc_conf.c8
-rw-r--r--drivers/tty/serial/atmel_serial.c2
-rw-r--r--drivers/usb/host/ehci-pci.c39
-rw-r--r--drivers/usb/host/pci-quirks.c63
-rw-r--r--drivers/usb/host/pci-quirks.h2
-rw-r--r--drivers/usb/host/xhci-pci.c26
-rw-r--r--drivers/usb/host/xhci-ring.c89
-rw-r--r--drivers/usb/host/xhci.c240
-rw-r--r--drivers/usb/host/xhci.h22
-rw-r--r--drivers/usb/otg/twl6030-usb.c10
-rw-r--r--fs/9p/vfs_inode.c4
-rw-r--r--fs/affs/namei.c5
-rw-r--r--fs/afs/dir.c5
-rw-r--r--fs/attr.c7
-rw-r--r--fs/bfs/dir.c3
-rw-r--r--fs/bio.c16
-rw-r--r--fs/btrfs/Makefile2
-rw-r--r--fs/btrfs/acl.c2
-rw-r--r--fs/btrfs/btrfs_inode.h15
-rw-r--r--fs/btrfs/compression.c47
-rw-r--r--fs/btrfs/compression.h2
-rw-r--r--fs/btrfs/ctree.c51
-rw-r--r--fs/btrfs/ctree.h246
-rw-r--r--fs/btrfs/delayed-inode.c1695
-rw-r--r--fs/btrfs/delayed-inode.h141
-rw-r--r--fs/btrfs/delayed-ref.c114
-rw-r--r--fs/btrfs/delayed-ref.h6
-rw-r--r--fs/btrfs/dir-item.c39
-rw-r--r--fs/btrfs/disk-io.c210
-rw-r--r--fs/btrfs/disk-io.h19
-rw-r--r--fs/btrfs/export.c25
-rw-r--r--fs/btrfs/extent-tree.c1788
-rw-r--r--fs/btrfs/extent_io.c324
-rw-r--r--fs/btrfs/extent_io.h40
-rw-r--r--fs/btrfs/extent_map.c8
-rw-r--r--fs/btrfs/extent_map.h4
-rw-r--r--fs/btrfs/file-item.c38
-rw-r--r--fs/btrfs/file.c302
-rw-r--r--fs/btrfs/free-space-cache.c993
-rw-r--r--fs/btrfs/free-space-cache.h48
-rw-r--r--fs/btrfs/inode-item.c2
-rw-r--r--fs/btrfs/inode-map.c444
-rw-r--r--fs/btrfs/inode-map.h13
-rw-r--r--fs/btrfs/inode.c702
-rw-r--r--fs/btrfs/ioctl.c624
-rw-r--r--fs/btrfs/ioctl.h107
-rw-r--r--fs/btrfs/locking.c25
-rw-r--r--fs/btrfs/locking.h2
-rw-r--r--fs/btrfs/ref-cache.c164
-rw-r--r--fs/btrfs/ref-cache.h24
-rw-r--r--fs/btrfs/relocation.c67
-rw-r--r--fs/btrfs/root-tree.c61
-rw-r--r--fs/btrfs/scrub.c1369
-rw-r--r--fs/btrfs/super.c51
-rw-r--r--fs/btrfs/sysfs.c77
-rw-r--r--fs/btrfs/transaction.c196
-rw-r--r--fs/btrfs/transaction.h5
-rw-r--r--fs/btrfs/tree-defrag.c2
-rw-r--r--fs/btrfs/tree-log.c208
-rw-r--r--fs/btrfs/tree-log.h1
-rw-r--r--fs/btrfs/version.sh43
-rw-r--r--fs/btrfs/volumes.c657
-rw-r--r--fs/btrfs/volumes.h27
-rw-r--r--fs/btrfs/xattr.c12
-rw-r--r--fs/buffer.c1
-rw-r--r--fs/cifs/Kconfig20
-rw-r--r--fs/cifs/README3
-rw-r--r--fs/cifs/cache.c6
-rw-r--r--fs/cifs/cifs_debug.c26
-rw-r--r--fs/cifs/cifs_dfs_ref.c2
-rw-r--r--fs/cifs/cifs_fs_sb.h3
-rw-r--r--fs/cifs/cifs_spnego.c2
-rw-r--r--fs/cifs/cifs_spnego.h2
-rw-r--r--fs/cifs/cifsacl.c9
-rw-r--r--fs/cifs/cifsencrypt.c14
-rw-r--r--fs/cifs/cifsfs.c233
-rw-r--r--fs/cifs/cifsglob.h129
-rw-r--r--fs/cifs/cifsproto.h209
-rw-r--r--fs/cifs/cifssmb.c463
-rw-r--r--fs/cifs/connect.c625
-rw-r--r--fs/cifs/dir.c33
-rw-r--r--fs/cifs/file.c376
-rw-r--r--fs/cifs/fscache.c6
-rw-r--r--fs/cifs/fscache.h8
-rw-r--r--fs/cifs/inode.c92
-rw-r--r--fs/cifs/ioctl.c2
-rw-r--r--fs/cifs/link.c46
-rw-r--r--fs/cifs/misc.c32
-rw-r--r--fs/cifs/netmisc.c2
-rw-r--r--fs/cifs/readdir.c8
-rw-r--r--fs/cifs/sess.c42
-rw-r--r--fs/cifs/transport.c214
-rw-r--r--fs/cifs/xattr.c8
-rw-r--r--fs/coda/dir.c5
-rw-r--r--fs/configfs/dir.c2
-rw-r--r--fs/ecryptfs/inode.c7
-rw-r--r--fs/ecryptfs/keystore.c46
-rw-r--r--fs/ext3/inode.c2
-rw-r--r--fs/ext4/ext4.h2
-rw-r--r--fs/ext4/inode.c2
-rw-r--r--fs/fat/namei_msdos.c5
-rw-r--r--fs/fat/namei_vfat.c5
-rw-r--r--fs/fs-writeback.c5
-rw-r--r--fs/fuse/dir.c5
-rw-r--r--fs/hfs/dir.c6
-rw-r--r--fs/hfsplus/dir.c8
-rw-r--r--fs/hostfs/hostfs_kern.c5
-rw-r--r--fs/hpfs/namei.c5
-rw-r--r--fs/inode.c54
-rw-r--r--fs/jffs2/dir.c9
-rw-r--r--fs/jffs2/fs.c2
-rw-r--r--fs/jffs2/os-linux.h2
-rw-r--r--fs/jffs2/scan.c19
-rw-r--r--fs/jfs/inode.c2
-rw-r--r--fs/jfs/jfs_inode.h2
-rw-r--r--fs/jfs/namei.c5
-rw-r--r--fs/logfs/dir.c5
-rw-r--r--fs/minix/namei.c5
-rw-r--r--fs/namei.c41
-rw-r--r--fs/ncpfs/dir.c15
-rw-r--r--fs/nilfs2/inode.c2
-rw-r--r--fs/nilfs2/namei.c5
-rw-r--r--fs/nilfs2/nilfs.h2
-rw-r--r--fs/ocfs2/move_extents.c41
-rw-r--r--fs/omfs/dir.c11
-rw-r--r--fs/partitions/check.c8
-rw-r--r--fs/reiserfs/namei.c5
-rw-r--r--fs/reiserfs/super.c2
-rw-r--r--fs/reiserfs/xattr.c1
-rw-r--r--fs/sysv/namei.c5
-rw-r--r--fs/ubifs/dir.c5
-rw-r--r--fs/ubifs/super.c2
-rw-r--r--fs/udf/namei.c5
-rw-r--r--fs/ufs/namei.c5
-rw-r--r--fs/xattr.c23
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c3
-rw-r--r--include/asm-generic/bug.h40
-rw-r--r--include/asm-generic/gpio.h10
-rw-r--r--include/asm-generic/unistd.h4
-rw-r--r--include/linux/atomic.h13
-rw-r--r--include/linux/cpuset.h2
-rw-r--r--include/linux/cred.h2
-rw-r--r--include/linux/dw_dmac.h1
-rw-r--r--include/linux/efi.h1
-rw-r--r--include/linux/ext3_fs.h2
-rw-r--r--include/linux/fs.h15
-rw-r--r--include/linux/ftrace_event.h12
-rw-r--r--include/linux/gpio.h8
-rw-r--r--include/linux/if_ether.h4
-rw-r--r--include/linux/key.h2
-rw-r--r--include/linux/mfd/max8997-private.h4
-rw-r--r--include/linux/mfd/tps65910.h800
-rw-r--r--include/linux/mmzone.h2
-rw-r--r--include/linux/mtd/mtd.h17
-rw-r--r--include/linux/mtd/nand.h4
-rw-r--r--include/linux/mtd/partitions.h16
-rw-r--r--include/linux/mtd/physmap.h4
-rw-r--r--include/linux/net.h6
-rw-r--r--include/linux/netfilter.h1
-rw-r--r--include/linux/netfilter/ipset/ip_set_ahash.h4
-rw-r--r--include/linux/netfilter/ipset/ip_set_timeout.h18
-rw-r--r--include/linux/pci_ids.h1
-rw-r--r--include/linux/power/isp1704_charger.h (renamed from arch/arm/mach-s5p6442/include/mach/dma.h)19
-rw-r--r--include/linux/power/max8903_charger.h57
-rw-r--r--include/linux/ratelimit.h40
-rw-r--r--include/linux/regulator/machine.h7
-rw-r--r--include/linux/sched.h9
-rw-r--r--include/linux/uaccess.h8
-rw-r--r--include/media/m5mols.h35
-rw-r--r--include/media/videobuf-dvb.h4
-rw-r--r--include/net/ip_vs.h3
-rw-r--r--include/net/net_namespace.h1
-rw-r--r--include/net/net_ratelimit.h8
-rw-r--r--include/scsi/libsas.h1
-rw-r--r--include/scsi/scsi_tcq.h1
-rw-r--r--include/target/target_core_base.h1
-rw-r--r--include/target/target_core_fabric_ops.h1
-rw-r--r--include/target/target_core_transport.h1
-rw-r--r--include/trace/events/btrfs.h4
-rw-r--r--include/trace/ftrace.h13
-rw-r--r--kernel/cpuset.c4
-rw-r--r--kernel/cred.c2
-rw-r--r--kernel/events/core.c8
-rw-r--r--kernel/jump_label.c18
-rw-r--r--kernel/kthread.c4
-rw-r--r--kernel/pm_qos_params.c33
-rw-r--r--kernel/power/hibernate.c220
-rw-r--r--kernel/rcutree.c164
-rw-r--r--kernel/rcutree.h30
-rw-r--r--kernel/rcutree_plugin.h24
-rw-r--r--kernel/rcutree_trace.c12
-rw-r--r--kernel/sched.c56
-rw-r--r--kernel/sched_fair.c5
-rw-r--r--kernel/sched_rt.c10
-rw-r--r--kernel/sched_stats.h4
-rw-r--r--kernel/trace/ftrace.c31
-rw-r--r--kernel/trace/ring_buffer.c10
-rw-r--r--kernel/trace/trace.h15
-rw-r--r--kernel/trace/trace_events.c7
-rw-r--r--kernel/trace/trace_output.c27
-rw-r--r--kernel/watchdog.c9
-rw-r--r--lib/locking-selftest.c2
-rw-r--r--mm/filemap.c18
-rw-r--r--mm/maccess.c8
-rw-r--r--mm/rmap.c17
-rw-r--r--mm/shmem.c2
-rw-r--r--net/8021q/vlan.c5
-rw-r--r--net/atm/atm_sysfs.c10
-rw-r--r--net/bridge/netfilter/ebtables.c6
-rw-r--r--net/can/proc.c7
-rw-r--r--net/core/ethtool.c25
-rw-r--r--net/core/filter.c1
-rw-r--r--net/core/sysctl_net_core.c1
-rw-r--r--net/core/utils.c1
-rw-r--r--net/ipv4/inetpeer.c42
-rw-r--r--net/iucv/iucv.c2
-rw-r--r--net/netfilter/ipset/ip_set_core.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_ftp.c27
-rw-r--r--scripts/recordmcount.h8
-rw-r--r--scripts/selinux/README2
-rwxr-xr-xscripts/tags.sh6
-rw-r--r--security/apparmor/match.c2
-rw-r--r--security/apparmor/policy_unpack.c4
-rw-r--r--security/keys/encrypted.c2
-rw-r--r--security/keys/request_key.c2
-rw-r--r--security/keys/request_key_auth.c2
-rw-r--r--security/keys/trusted.c2
-rw-r--r--sound/core/control.c3
-rw-r--r--sound/core/init.c3
-rw-r--r--sound/core/oss/linear.c3
-rw-r--r--sound/core/pcm_lib.c17
-rw-r--r--sound/core/pcm_native.c21
-rw-r--r--sound/core/seq/seq_queue.c2
-rw-r--r--sound/pci/hda/hda_codec.c2
-rw-r--r--sound/pci/hda/hda_eld.c21
-rw-r--r--sound/pci/hda/hda_intel.c206
-rw-r--r--sound/pci/hda/patch_analog.c1
-rw-r--r--sound/pci/hda/patch_conexant.c42
-rw-r--r--sound/pci/hda/patch_hdmi.c123
-rw-r--r--sound/soc/atmel/sam9g20_wm8731.c2
-rw-r--r--sound/soc/codecs/wm1250-ev1.c2
-rw-r--r--sound/soc/codecs/wm8731.c2
-rw-r--r--sound/soc/codecs/wm8915.c1
-rw-r--r--sound/soc/pxa/raumfeld.c92
-rw-r--r--sound/soc/samsung/Kconfig4
-rw-r--r--sound/soc/samsung/smdk_wm8580.c2
-rw-r--r--sound/soc/soc-core.c8
-rw-r--r--sound/soc/soc-dapm.c2
-rw-r--r--sound/usb/card.c17
-rw-r--r--sound/usb/mixer.c32
-rw-r--r--sound/usb/mixer.h14
-rw-r--r--sound/usb/mixer_quirks.c70
-rw-r--r--sound/usb/quirks-table.h4
-rw-r--r--sound/usb/quirks.c18
-rw-r--r--sound/usb/usbaudio.h1
-rw-r--r--tools/perf/Makefile2
-rw-r--r--tools/perf/builtin-annotate.c2
-rw-r--r--tools/perf/builtin-record.c10
-rw-r--r--tools/perf/builtin-report.c23
-rw-r--r--tools/perf/builtin-script.c1
-rw-r--r--tools/perf/builtin-top.c37
-rw-r--r--tools/perf/util/event.c15
-rw-r--r--tools/perf/util/evsel.c10
-rw-r--r--tools/perf/util/header.c8
-rw-r--r--tools/perf/util/include/linux/const.h1
-rw-r--r--tools/perf/util/symbol.c48
-rw-r--r--tools/perf/util/symbol.h3
931 files changed, 31275 insertions, 18723 deletions
diff --git a/Documentation/00-INDEX b/Documentation/00-INDEX
index 1b777b960492..1f89424c36a6 100644
--- a/Documentation/00-INDEX
+++ b/Documentation/00-INDEX
@@ -192,10 +192,6 @@ kernel-docs.txt
- listing of various WWW + books that document kernel internals.
kernel-parameters.txt
- summary listing of command line / boot prompt args for the kernel.
-keys-request-key.txt
- - description of the kernel key request service.
-keys.txt
- - description of the kernel key retention service.
kobject.txt
- info of the kobject infrastructure of the Linux kernel.
kprobes.txt
@@ -294,6 +290,8 @@ scheduler/
- directory with info on the scheduler.
scsi/
- directory with info on Linux scsi support.
+security/
+ - directory that contains security-related info
serial/
- directory with info on the low level serial API.
serial-console.txt
diff --git a/Documentation/DocBook/dvb/dvbproperty.xml b/Documentation/DocBook/dvb/dvbproperty.xml
index 52d5e3c7cf6c..b5365f61d69b 100644
--- a/Documentation/DocBook/dvb/dvbproperty.xml
+++ b/Documentation/DocBook/dvb/dvbproperty.xml
@@ -141,13 +141,15 @@ struct dtv_properties {
</row></tbody></tgroup></informaltable>
</section>
+<section>
+ <title>Property types</title>
<para>
On <link linkend="FE_GET_PROPERTY">FE_GET_PROPERTY</link>/<link linkend="FE_SET_PROPERTY">FE_SET_PROPERTY</link>,
the actual action is determined by the dtv_property cmd/data pairs. With one single ioctl, is possible to
get/set up to 64 properties. The actual meaning of each property is described on the next sections.
</para>
-<para>The Available frontend property types are:</para>
+<para>The available frontend property types are:</para>
<programlisting>
#define DTV_UNDEFINED 0
#define DTV_TUNE 1
@@ -193,6 +195,7 @@ get/set up to 64 properties. The actual meaning of each property is described on
#define DTV_ISDBT_LAYER_ENABLED 41
#define DTV_ISDBS_TS_ID 42
</programlisting>
+</section>
<section id="fe_property_common">
<title>Parameters that are common to all Digital TV standards</title>
diff --git a/Documentation/DocBook/media-entities.tmpl b/Documentation/DocBook/media-entities.tmpl
index c8abb23ef1e7..e5fe09430fd9 100644
--- a/Documentation/DocBook/media-entities.tmpl
+++ b/Documentation/DocBook/media-entities.tmpl
@@ -293,6 +293,7 @@
<!ENTITY sub-yuyv SYSTEM "v4l/pixfmt-yuyv.xml">
<!ENTITY sub-yvyu SYSTEM "v4l/pixfmt-yvyu.xml">
<!ENTITY sub-srggb10 SYSTEM "v4l/pixfmt-srggb10.xml">
+<!ENTITY sub-srggb12 SYSTEM "v4l/pixfmt-srggb12.xml">
<!ENTITY sub-srggb8 SYSTEM "v4l/pixfmt-srggb8.xml">
<!ENTITY sub-y10 SYSTEM "v4l/pixfmt-y10.xml">
<!ENTITY sub-y12 SYSTEM "v4l/pixfmt-y12.xml">
@@ -373,9 +374,9 @@
<!ENTITY sub-media-indices SYSTEM "media-indices.tmpl">
<!ENTITY sub-media-controller SYSTEM "v4l/media-controller.xml">
-<!ENTITY sub-media-open SYSTEM "v4l/media-func-open.xml">
-<!ENTITY sub-media-close SYSTEM "v4l/media-func-close.xml">
-<!ENTITY sub-media-ioctl SYSTEM "v4l/media-func-ioctl.xml">
+<!ENTITY sub-media-func-open SYSTEM "v4l/media-func-open.xml">
+<!ENTITY sub-media-func-close SYSTEM "v4l/media-func-close.xml">
+<!ENTITY sub-media-func-ioctl SYSTEM "v4l/media-func-ioctl.xml">
<!ENTITY sub-media-ioc-device-info SYSTEM "v4l/media-ioc-device-info.xml">
<!ENTITY sub-media-ioc-enum-entities SYSTEM "v4l/media-ioc-enum-entities.xml">
<!ENTITY sub-media-ioc-enum-links SYSTEM "v4l/media-ioc-enum-links.xml">
diff --git a/Documentation/DocBook/mtdnand.tmpl b/Documentation/DocBook/mtdnand.tmpl
index 6f242d5dee9a..17910e2052ad 100644
--- a/Documentation/DocBook/mtdnand.tmpl
+++ b/Documentation/DocBook/mtdnand.tmpl
@@ -189,8 +189,7 @@ static void __iomem *baseaddr;
<title>Partition defines</title>
<para>
If you want to divide your device into partitions, then
- enable the configuration switch CONFIG_MTD_PARTITIONS and define
- a partitioning scheme suitable to your board.
+ define a partitioning scheme suitable to your board.
</para>
<programlisting>
#define NUM_PARTITIONS 2
diff --git a/Documentation/DocBook/v4l/media-controller.xml b/Documentation/DocBook/v4l/media-controller.xml
index 2dc25e1d4089..873ac3a621f0 100644
--- a/Documentation/DocBook/v4l/media-controller.xml
+++ b/Documentation/DocBook/v4l/media-controller.xml
@@ -78,9 +78,9 @@
<appendix id="media-user-func">
<title>Function Reference</title>
<!-- Keep this alphabetically sorted. -->
- &sub-media-open;
- &sub-media-close;
- &sub-media-ioctl;
+ &sub-media-func-open;
+ &sub-media-func-close;
+ &sub-media-func-ioctl;
<!-- All ioctls go here. -->
&sub-media-ioc-device-info;
&sub-media-ioc-enum-entities;
diff --git a/Documentation/DocBook/v4l/pixfmt.xml b/Documentation/DocBook/v4l/pixfmt.xml
index dbfe3b08435f..deb660207f94 100644
--- a/Documentation/DocBook/v4l/pixfmt.xml
+++ b/Documentation/DocBook/v4l/pixfmt.xml
@@ -673,6 +673,7 @@ access the palette, this must be done with ioctls of the Linux framebuffer API.<
&sub-srggb8;
&sub-sbggr16;
&sub-srggb10;
+ &sub-srggb12;
</section>
<section id="yuv-formats">
diff --git a/Documentation/DocBook/v4l/subdev-formats.xml b/Documentation/DocBook/v4l/subdev-formats.xml
index a26b10c07857..8d3409d2c632 100644
--- a/Documentation/DocBook/v4l/subdev-formats.xml
+++ b/Documentation/DocBook/v4l/subdev-formats.xml
@@ -2531,13 +2531,13 @@
<constant>_JPEG</constant> prefix the format code is made of
the following information.
<itemizedlist>
- <listitem>The number of bus samples per entropy encoded byte.</listitem>
- <listitem>The bus width.</listitem>
+ <listitem><para>The number of bus samples per entropy encoded byte.</para></listitem>
+ <listitem><para>The bus width.</para></listitem>
</itemizedlist>
+ </para>
- <para>For instance, for a JPEG baseline process and an 8-bit bus width
- the format will be named <constant>V4L2_MBUS_FMT_JPEG_1X8</constant>.
- </para>
+ <para>For instance, for a JPEG baseline process and an 8-bit bus width
+ the format will be named <constant>V4L2_MBUS_FMT_JPEG_1X8</constant>.
</para>
<para>The following table lists existing JPEG compressed formats.</para>
diff --git a/Documentation/RCU/trace.txt b/Documentation/RCU/trace.txt
index c078ad48f7a1..8173cec473aa 100644
--- a/Documentation/RCU/trace.txt
+++ b/Documentation/RCU/trace.txt
@@ -99,18 +99,11 @@ o "qp" indicates that RCU still expects a quiescent state from
o "dt" is the current value of the dyntick counter that is incremented
when entering or leaving dynticks idle state, either by the
- scheduler or by irq. The number after the "/" is the interrupt
- nesting depth when in dyntick-idle state, or one greater than
- the interrupt-nesting depth otherwise.
-
- This field is displayed only for CONFIG_NO_HZ kernels.
-
-o "dn" is the current value of the dyntick counter that is incremented
- when entering or leaving dynticks idle state via NMI. If both
- the "dt" and "dn" values are even, then this CPU is in dynticks
- idle mode and may be ignored by RCU. If either of these two
- counters is odd, then RCU must be alert to the possibility of
- an RCU read-side critical section running on this CPU.
+ scheduler or by irq. This number is even if the CPU is in
+ dyntick idle mode and odd otherwise. The number after the first
+ "/" is the interrupt nesting depth when in dyntick-idle state,
+ or one greater than the interrupt-nesting depth otherwise.
+ The number after the second "/" is the NMI nesting depth.
This field is displayed only for CONFIG_NO_HZ kernels.
diff --git a/Documentation/arm/Booting b/Documentation/arm/Booting
index 76850295af8f..4e686a2ed91e 100644
--- a/Documentation/arm/Booting
+++ b/Documentation/arm/Booting
@@ -65,13 +65,19 @@ looks at the connected hardware is beyond the scope of this document.
The boot loader must ultimately be able to provide a MACH_TYPE_xxx
value to the kernel. (see linux/arch/arm/tools/mach-types).
-
-4. Setup the kernel tagged list
--------------------------------
+4. Setup boot data
+------------------
Existing boot loaders: OPTIONAL, HIGHLY RECOMMENDED
New boot loaders: MANDATORY
+The boot loader must provide either a tagged list or a dtb image for
+passing configuration data to the kernel. The physical address of the
+boot data is passed to the kernel in register r2.
+
+4a. Setup the kernel tagged list
+--------------------------------
+
The boot loader must create and initialise the kernel tagged list.
A valid tagged list starts with ATAG_CORE and ends with ATAG_NONE.
The ATAG_CORE tag may or may not be empty. An empty ATAG_CORE tag
@@ -101,6 +107,24 @@ The tagged list must be placed in a region of memory where neither
the kernel decompressor nor initrd 'bootp' program will overwrite
it. The recommended placement is in the first 16KiB of RAM.
+4b. Setup the device tree
+-------------------------
+
+The boot loader must load a device tree image (dtb) into system ram
+at a 64bit aligned address and initialize it with the boot data. The
+dtb format is documented in Documentation/devicetree/booting-without-of.txt.
+The kernel will look for the dtb magic value of 0xd00dfeed at the dtb
+physical address to determine if a dtb has been passed instead of a
+tagged list.
+
+The boot loader must pass at a minimum the size and location of the
+system memory, and the root filesystem location. The dtb must be
+placed in a region of memory where the kernel decompressor will not
+overwrite it. The recommended placement is in the first 16KiB of RAM
+with the caveat that it may not be located at physical address 0 since
+the kernel interprets a value of 0 in r2 to mean neither a tagged list
+nor a dtb were passed.
+
5. Calling the kernel image
---------------------------
@@ -125,7 +149,8 @@ In either case, the following conditions must be met:
- CPU register settings
r0 = 0,
r1 = machine type number discovered in (3) above.
- r2 = physical address of tagged list in system RAM.
+ r2 = physical address of tagged list in system RAM, or
+ physical address of device tree block (dtb) in system RAM
- CPU mode
All forms of interrupts must be disabled (IRQs and FIQs)
diff --git a/Documentation/arm/Samsung/Overview.txt b/Documentation/arm/Samsung/Overview.txt
index c3094ea51aa7..658abb258cef 100644
--- a/Documentation/arm/Samsung/Overview.txt
+++ b/Documentation/arm/Samsung/Overview.txt
@@ -14,7 +14,6 @@ Introduction
- S3C24XX: See Documentation/arm/Samsung-S3C24XX/Overview.txt for full list
- S3C64XX: S3C6400 and S3C6410
- S5P6440
- - S5P6442
- S5PC100
- S5PC110 / S5PV210
@@ -36,7 +35,6 @@ Configuration
unifying all the SoCs into one kernel.
s5p6440_defconfig - S5P6440 specific default configuration
- s5p6442_defconfig - S5P6442 specific default configuration
s5pc100_defconfig - S5PC100 specific default configuration
s5pc110_defconfig - S5PC110 specific default configuration
s5pv210_defconfig - S5PV210 specific default configuration
diff --git a/Documentation/devicetree/booting-without-of.txt b/Documentation/devicetree/booting-without-of.txt
index 50619a0720a8..7c1329de0596 100644
--- a/Documentation/devicetree/booting-without-of.txt
+++ b/Documentation/devicetree/booting-without-of.txt
@@ -12,8 +12,9 @@ Table of Contents
=================
I - Introduction
- 1) Entry point for arch/powerpc
- 2) Entry point for arch/x86
+ 1) Entry point for arch/arm
+ 2) Entry point for arch/powerpc
+ 3) Entry point for arch/x86
II - The DT block format
1) Header
@@ -148,7 +149,46 @@ upgrades without significantly impacting the kernel code or cluttering
it with special cases.
-1) Entry point for arch/powerpc
+1) Entry point for arch/arm
+---------------------------
+
+ There is one single entry point to the kernel, at the start
+ of the kernel image. That entry point supports two calling
+ conventions. A summary of the interface is described here. A full
+ description of the boot requirements is documented in
+ Documentation/arm/Booting
+
+ a) ATAGS interface. Minimal information is passed from firmware
+ to the kernel with a tagged list of predefined parameters.
+
+ r0 : 0
+
+ r1 : Machine type number
+
+ r2 : Physical address of tagged list in system RAM
+
+ b) Entry with a flattened device-tree block. Firmware loads the
+ physical address of the flattened device tree block (dtb) into r2,
+ r1 is not used, but it is considered good practise to use a valid
+ machine number as described in Documentation/arm/Booting.
+
+ r0 : 0
+
+ r1 : Valid machine type number. When using a device tree,
+ a single machine type number will often be assigned to
+ represent a class or family of SoCs.
+
+ r2 : physical pointer to the device-tree block
+ (defined in chapter II) in RAM. Device tree can be located
+ anywhere in system RAM, but it should be aligned on a 64 bit
+ boundary.
+
+ The kernel will differentiate between ATAGS and device tree booting by
+ reading the memory pointed to by r2 and looking for either the flattened
+ device tree block magic value (0xd00dfeed) or the ATAG_CORE value at
+ offset 0x4 from r2 (0x54410001).
+
+2) Entry point for arch/powerpc
-------------------------------
There is one single entry point to the kernel, at the start
@@ -226,7 +266,7 @@ it with special cases.
cannot support both configurations with Book E and configurations
with classic Powerpc architectures.
-2) Entry point for arch/x86
+3) Entry point for arch/x86
-------------------------------
There is one single 32bit entry point to the kernel at code32_start,
diff --git a/Documentation/dmaengine.txt b/Documentation/dmaengine.txt
index 0c1c2f63c0a9..5a0cb1ef6164 100644
--- a/Documentation/dmaengine.txt
+++ b/Documentation/dmaengine.txt
@@ -1 +1,96 @@
-See Documentation/crypto/async-tx-api.txt
+ DMA Engine API Guide
+ ====================
+
+ Vinod Koul <vinod dot koul at intel.com>
+
+NOTE: For DMA Engine usage in async_tx please see:
+ Documentation/crypto/async-tx-api.txt
+
+
+Below is a guide to device driver writers on how to use the Slave-DMA API of the
+DMA Engine. This is applicable only for slave DMA usage only.
+
+The slave DMA usage consists of following steps
+1. Allocate a DMA slave channel
+2. Set slave and controller specific parameters
+3. Get a descriptor for transaction
+4. Submit the transaction and wait for callback notification
+
+1. Allocate a DMA slave channel
+Channel allocation is slightly different in the slave DMA context, client
+drivers typically need a channel from a particular DMA controller only and even
+in some cases a specific channel is desired. To request a channel
+dma_request_channel() API is used.
+
+Interface:
+struct dma_chan *dma_request_channel(dma_cap_mask_t mask,
+ dma_filter_fn filter_fn,
+ void *filter_param);
+where dma_filter_fn is defined as:
+typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
+
+When the optional 'filter_fn' parameter is set to NULL dma_request_channel
+simply returns the first channel that satisfies the capability mask. Otherwise,
+when the mask parameter is insufficient for specifying the necessary channel,
+the filter_fn routine can be used to disposition the available channels in the
+system. The filter_fn routine is called once for each free channel in the
+system. Upon seeing a suitable channel filter_fn returns DMA_ACK which flags
+that channel to be the return value from dma_request_channel. A channel
+allocated via this interface is exclusive to the caller, until
+dma_release_channel() is called.
+
+2. Set slave and controller specific parameters
+Next step is always to pass some specific information to the DMA driver. Most of
+the generic information which a slave DMA can use is in struct dma_slave_config.
+It allows the clients to specify DMA direction, DMA addresses, bus widths, DMA
+burst lengths etc. If some DMA controllers have more parameters to be sent then
+they should try to embed struct dma_slave_config in their controller specific
+structure. That gives flexibility to client to pass more parameters, if
+required.
+
+Interface:
+int dmaengine_slave_config(struct dma_chan *chan,
+ struct dma_slave_config *config)
+
+3. Get a descriptor for transaction
+For slave usage the various modes of slave transfers supported by the
+DMA-engine are:
+slave_sg - DMA a list of scatter gather buffers from/to a peripheral
+dma_cyclic - Perform a cyclic DMA operation from/to a peripheral till the
+ operation is explicitly stopped.
+The non NULL return of this transfer API represents a "descriptor" for the given
+transaction.
+
+Interface:
+struct dma_async_tx_descriptor *(*chan->device->device_prep_dma_sg)(
+ struct dma_chan *chan,
+ struct scatterlist *dst_sg, unsigned int dst_nents,
+ struct scatterlist *src_sg, unsigned int src_nents,
+ unsigned long flags);
+struct dma_async_tx_descriptor *(*chan->device->device_prep_dma_cyclic)(
+ struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
+ size_t period_len, enum dma_data_direction direction);
+
+4. Submit the transaction and wait for callback notification
+To schedule the transaction to be scheduled by dma device, the "descriptor"
+returned in above (3) needs to be submitted.
+To tell the dma driver that a transaction is ready to be serviced, the
+descriptor->submit() callback needs to be invoked. This chains the descriptor to
+the pending queue.
+The transactions in the pending queue can be activated by calling the
+issue_pending API. If channel is idle then the first transaction in queue is
+started and subsequent ones queued up.
+On completion of the DMA operation the next in queue is submitted and a tasklet
+triggered. The tasklet would then call the client driver completion callback
+routine for notification, if set.
+Interface:
+void dma_async_issue_pending(struct dma_chan *chan);
+
+==============================================================================
+
+Additional usage notes for dma driver writers
+1/ Although DMA engine specifies that completion callback routines cannot submit
+any new operations, but typically for slave DMA subsequent transaction may not
+be available for submit prior to callback routine being called. This requirement
+is not a requirement for DMA-slave devices. But they should take care to drop
+the spin-lock they might be holding before calling the callback routine
diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking
index 61b31acb9176..57d827d6071d 100644
--- a/Documentation/filesystems/Locking
+++ b/Documentation/filesystems/Locking
@@ -104,7 +104,7 @@ of the locking scheme for directory operations.
prototypes:
struct inode *(*alloc_inode)(struct super_block *sb);
void (*destroy_inode)(struct inode *);
- void (*dirty_inode) (struct inode *);
+ void (*dirty_inode) (struct inode *, int flags);
int (*write_inode) (struct inode *, struct writeback_control *wbc);
int (*drop_inode) (struct inode *);
void (*evict_inode) (struct inode *);
@@ -126,7 +126,7 @@ locking rules:
s_umount
alloc_inode:
destroy_inode:
-dirty_inode: (must not sleep)
+dirty_inode:
write_inode:
drop_inode: !!!inode->i_lock!!!
evict_inode:
diff --git a/Documentation/filesystems/nfs/idmapper.txt b/Documentation/filesystems/nfs/idmapper.txt
index b9b4192ea8b5..9c8fd6148656 100644
--- a/Documentation/filesystems/nfs/idmapper.txt
+++ b/Documentation/filesystems/nfs/idmapper.txt
@@ -47,8 +47,8 @@ request-key will find the first matching line and corresponding program. In
this case, /some/other/program will handle all uid lookups and
/usr/sbin/nfs.idmap will handle gid, user, and group lookups.
-See <file:Documentation/keys-request-keys.txt> for more information about the
-request-key function.
+See <file:Documentation/security/keys-request-keys.txt> for more information
+about the request-key function.
=========
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index 21a7dc467bba..88b9f5519af9 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -211,7 +211,7 @@ struct super_operations {
struct inode *(*alloc_inode)(struct super_block *sb);
void (*destroy_inode)(struct inode *);
- void (*dirty_inode) (struct inode *);
+ void (*dirty_inode) (struct inode *, int flags);
int (*write_inode) (struct inode *, int);
void (*drop_inode) (struct inode *);
void (*delete_inode) (struct inode *);
diff --git a/Documentation/lockstat.txt b/Documentation/lockstat.txt
index 9c0a80d17a23..cef00d42ed5b 100644
--- a/Documentation/lockstat.txt
+++ b/Documentation/lockstat.txt
@@ -12,8 +12,9 @@ Because things like lock contention can severely impact performance.
- HOW
Lockdep already has hooks in the lock functions and maps lock instances to
-lock classes. We build on that. The graph below shows the relation between
-the lock functions and the various hooks therein.
+lock classes. We build on that (see Documentation/lockdep-design.txt).
+The graph below shows the relation between the lock functions and the various
+hooks therein.
__acquire
|
@@ -128,6 +129,37 @@ points are the points we're contending with.
The integer part of the time values is in us.
+Dealing with nested locks, subclasses may appear:
+
+32...............................................................................................................................................................................................
+33
+34 &rq->lock: 13128 13128 0.43 190.53 103881.26 97454 3453404 0.00 401.11 13224683.11
+35 ---------
+36 &rq->lock 645 [<ffffffff8103bfc4>] task_rq_lock+0x43/0x75
+37 &rq->lock 297 [<ffffffff8104ba65>] try_to_wake_up+0x127/0x25a
+38 &rq->lock 360 [<ffffffff8103c4c5>] select_task_rq_fair+0x1f0/0x74a
+39 &rq->lock 428 [<ffffffff81045f98>] scheduler_tick+0x46/0x1fb
+40 ---------
+41 &rq->lock 77 [<ffffffff8103bfc4>] task_rq_lock+0x43/0x75
+42 &rq->lock 174 [<ffffffff8104ba65>] try_to_wake_up+0x127/0x25a
+43 &rq->lock 4715 [<ffffffff8103ed4b>] double_rq_lock+0x42/0x54
+44 &rq->lock 893 [<ffffffff81340524>] schedule+0x157/0x7b8
+45
+46...............................................................................................................................................................................................
+47
+48 &rq->lock/1: 11526 11488 0.33 388.73 136294.31 21461 38404 0.00 37.93 109388.53
+49 -----------
+50 &rq->lock/1 11526 [<ffffffff8103ed58>] double_rq_lock+0x4f/0x54
+51 -----------
+52 &rq->lock/1 5645 [<ffffffff8103ed4b>] double_rq_lock+0x42/0x54
+53 &rq->lock/1 1224 [<ffffffff81340524>] schedule+0x157/0x7b8
+54 &rq->lock/1 4336 [<ffffffff8103ed58>] double_rq_lock+0x4f/0x54
+55 &rq->lock/1 181 [<ffffffff8104ba65>] try_to_wake_up+0x127/0x25a
+
+Line 48 shows statistics for the second subclass (/1) of &rq->lock class
+(subclass starts from 0), since in this case, as line 50 suggests,
+double_rq_lock actually acquires a nested lock of two spinlocks.
+
View the top contending locks:
# grep : /proc/lock_stat | head
diff --git a/Documentation/networking/dns_resolver.txt b/Documentation/networking/dns_resolver.txt
index 04ca06325b08..7f531ad83285 100644
--- a/Documentation/networking/dns_resolver.txt
+++ b/Documentation/networking/dns_resolver.txt
@@ -139,8 +139,8 @@ the key will be discarded and recreated when the data it holds has expired.
dns_query() returns a copy of the value attached to the key, or an error if
that is indicated instead.
-See <file:Documentation/keys-request-key.txt> for further information about
-request-key function.
+See <file:Documentation/security/keys-request-key.txt> for further
+information about request-key function.
=========
diff --git a/Documentation/power/regulator/machine.txt b/Documentation/power/regulator/machine.txt
index bdec39b9bd75..b42419b52e44 100644
--- a/Documentation/power/regulator/machine.txt
+++ b/Documentation/power/regulator/machine.txt
@@ -53,11 +53,11 @@ static struct regulator_init_data regulator1_data = {
Regulator-1 supplies power to Regulator-2. This relationship must be registered
with the core so that Regulator-1 is also enabled when Consumer A enables its
-supply (Regulator-2). The supply regulator is set by the supply_regulator_dev
+supply (Regulator-2). The supply regulator is set by the supply_regulator
field below:-
static struct regulator_init_data regulator2_data = {
- .supply_regulator_dev = &platform_regulator1_device.dev,
+ .supply_regulator = "regulator_name",
.constraints = {
.min_uV = 1800000,
.max_uV = 2000000,
diff --git a/Documentation/scsi/ChangeLog.megaraid_sas b/Documentation/scsi/ChangeLog.megaraid_sas
index 4d9ce73ff730..9ed1d9d96783 100644
--- a/Documentation/scsi/ChangeLog.megaraid_sas
+++ b/Documentation/scsi/ChangeLog.megaraid_sas
@@ -1,3 +1,17 @@
+Release Date : Wed. May 11, 2011 17:00:00 PST 2010 -
+ (emaild-id:megaraidlinux@lsi.com)
+ Adam Radford
+Current Version : 00.00.05.38-rc1
+Old Version : 00.00.05.34-rc1
+ 1. Remove MSI-X black list, use MFI_REG_STATE.ready.msiEnable.
+ 2. Remove un-used function megasas_return_cmd_for_smid().
+ 3. Check MFI_REG_STATE.fault.resetAdapter in megasas_reset_fusion().
+ 4. Disable interrupts/free_irq() in megasas_shutdown().
+ 5. Fix bug where AENs could be lost in probe() and resume().
+ 6. Convert 6,10,12 byte CDB's to 16 byte CDB for large LBA's for FastPath
+ IO.
+ 7. Add 1078 OCR support.
+-------------------------------------------------------------------------------
Release Date : Thu. Feb 24, 2011 17:00:00 PST 2010 -
(emaild-id:megaraidlinux@lsi.com)
Adam Radford
diff --git a/Documentation/security/00-INDEX b/Documentation/security/00-INDEX
new file mode 100644
index 000000000000..19bc49439cac
--- /dev/null
+++ b/Documentation/security/00-INDEX
@@ -0,0 +1,18 @@
+00-INDEX
+ - this file.
+SELinux.txt
+ - how to get started with the SELinux security enhancement.
+Smack.txt
+ - documentation on the Smack Linux Security Module.
+apparmor.txt
+ - documentation on the AppArmor security extension.
+credentials.txt
+ - documentation about credentials in Linux.
+keys-request-key.txt
+ - description of the kernel key request service.
+keys-trusted-encrypted.txt
+ - info on the Trusted and Encrypted keys in the kernel key ring service.
+keys.txt
+ - description of the kernel key retention service.
+tomoyo.txt
+ - documentation on the TOMOYO Linux Security Module.
diff --git a/Documentation/SELinux.txt b/Documentation/security/SELinux.txt
index 07eae00f3314..07eae00f3314 100644
--- a/Documentation/SELinux.txt
+++ b/Documentation/security/SELinux.txt
diff --git a/Documentation/Smack.txt b/Documentation/security/Smack.txt
index e9dab41c0fe0..e9dab41c0fe0 100644
--- a/Documentation/Smack.txt
+++ b/Documentation/security/Smack.txt
diff --git a/Documentation/apparmor.txt b/Documentation/security/apparmor.txt
index 93c1fd7d0635..93c1fd7d0635 100644
--- a/Documentation/apparmor.txt
+++ b/Documentation/security/apparmor.txt
diff --git a/Documentation/credentials.txt b/Documentation/security/credentials.txt
index 995baf379c07..fc0366cbd7ce 100644
--- a/Documentation/credentials.txt
+++ b/Documentation/security/credentials.txt
@@ -216,7 +216,7 @@ The Linux kernel supports the following types of credentials:
When a process accesses a key, if not already present, it will normally be
cached on one of these keyrings for future accesses to find.
- For more information on using keys, see Documentation/keys.txt.
+ For more information on using keys, see Documentation/security/keys.txt.
(5) LSM
diff --git a/Documentation/keys-request-key.txt b/Documentation/security/keys-request-key.txt
index 69686ad12c66..51987bfecfed 100644
--- a/Documentation/keys-request-key.txt
+++ b/Documentation/security/keys-request-key.txt
@@ -3,8 +3,8 @@
===================
The key request service is part of the key retention service (refer to
-Documentation/keys.txt). This document explains more fully how the requesting
-algorithm works.
+Documentation/security/keys.txt). This document explains more fully how
+the requesting algorithm works.
The process starts by either the kernel requesting a service by calling
request_key*():
diff --git a/Documentation/keys-trusted-encrypted.txt b/Documentation/security/keys-trusted-encrypted.txt
index 8fb79bc1ac4b..8fb79bc1ac4b 100644
--- a/Documentation/keys-trusted-encrypted.txt
+++ b/Documentation/security/keys-trusted-encrypted.txt
diff --git a/Documentation/keys.txt b/Documentation/security/keys.txt
index 6523a9e6f293..4d75931d2d79 100644
--- a/Documentation/keys.txt
+++ b/Documentation/security/keys.txt
@@ -434,7 +434,7 @@ The main syscalls are:
/sbin/request-key will be invoked in an attempt to obtain a key. The
callout_info string will be passed as an argument to the program.
- See also Documentation/keys-request-key.txt.
+ See also Documentation/security/keys-request-key.txt.
The keyctl syscall functions are:
@@ -864,7 +864,7 @@ payload contents" for more information.
If successful, the key will have been attached to the default keyring for
implicitly obtained request-key keys, as set by KEYCTL_SET_REQKEY_KEYRING.
- See also Documentation/keys-request-key.txt.
+ See also Documentation/security/keys-request-key.txt.
(*) To search for a key, passing auxiliary data to the upcaller, call:
diff --git a/Documentation/tomoyo.txt b/Documentation/security/tomoyo.txt
index 200a2d37cbc8..200a2d37cbc8 100644
--- a/Documentation/tomoyo.txt
+++ b/Documentation/security/tomoyo.txt
diff --git a/MAINTAINERS b/MAINTAINERS
index bad19e9d9dcb..29801f760b6f 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2174,6 +2174,8 @@ M: Dan Williams <dan.j.williams@intel.com>
S: Supported
F: drivers/dma/
F: include/linux/dma*
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx.git
+T: git git://git.infradead.org/users/vkoul/slave-dma.git (slave-dma)
DME1737 HARDWARE MONITOR DRIVER
M: Juerg Haefliger <juergh@gmail.com>
@@ -2300,7 +2302,7 @@ F: net/bridge/netfilter/ebt*.c
ECRYPT FILE SYSTEM
M: Tyler Hicks <tyhicks@linux.vnet.ibm.com>
M: Dustin Kirkland <kirkland@canonical.com>
-L: ecryptfs-devel@lists.launchpad.net
+L: ecryptfs@vger.kernel.org
W: https://launchpad.net/ecryptfs
S: Supported
F: Documentation/filesystems/ecryptfs.txt
@@ -2580,6 +2582,13 @@ S: Maintained
F: drivers/hwmon/f75375s.c
F: include/linux/f75375s.h
+FIREWIRE AUDIO DRIVERS
+M: Clemens Ladisch <clemens@ladisch.de>
+L: alsa-devel@alsa-project.org (moderated for non-subscribers)
+T: git git://git.alsa-project.org/alsa-kernel.git
+S: Maintained
+F: sound/firewire/
+
FIREWIRE SUBSYSTEM
M: Stefan Richter <stefanr@s5r6.in-berlin.de>
L: linux1394-devel@lists.sourceforge.net
@@ -3714,7 +3723,7 @@ KEYS/KEYRINGS:
M: David Howells <dhowells@redhat.com>
L: keyrings@linux-nfs.org
S: Maintained
-F: Documentation/keys.txt
+F: Documentation/security/keys.txt
F: include/linux/key.h
F: include/linux/key-type.h
F: include/keys/
@@ -3726,7 +3735,7 @@ M: Mimi Zohar <zohar@us.ibm.com>
L: linux-security-module@vger.kernel.org
L: keyrings@linux-nfs.org
S: Supported
-F: Documentation/keys-trusted-encrypted.txt
+F: Documentation/security/keys-trusted-encrypted.txt
F: include/keys/trusted-type.h
F: security/keys/trusted.c
F: security/keys/trusted.h
@@ -3737,7 +3746,7 @@ M: David Safford <safford@watson.ibm.com>
L: linux-security-module@vger.kernel.org
L: keyrings@linux-nfs.org
S: Supported
-F: Documentation/keys-trusted-encrypted.txt
+F: Documentation/security/keys-trusted-encrypted.txt
F: include/keys/encrypted-type.h
F: security/keys/encrypted.c
F: security/keys/encrypted.h
@@ -5439,6 +5448,13 @@ L: linux-serial@vger.kernel.org
S: Maintained
F: drivers/tty/serial
+SYNOPSYS DESIGNWARE DMAC DRIVER
+M: Viresh Kumar <viresh.kumar@st.com>
+S: Maintained
+F: include/linux/dw_dmac.h
+F: drivers/dma/dw_dmac_regs.h
+F: drivers/dma/dw_dmac.c
+
TIMEKEEPING, NTP
M: John Stultz <johnstul@us.ibm.com>
M: Thomas Gleixner <tglx@linutronix.de>
@@ -5503,7 +5519,7 @@ F: drivers/scsi/sg.c
F: include/scsi/sg.h
SCSI SUBSYSTEM
-M: "James E.J. Bottomley" <James.Bottomley@suse.de>
+M: "James E.J. Bottomley" <JBottomley@parallels.com>
L: linux-scsi@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-rc-fixes-2.6.git
@@ -6072,6 +6088,17 @@ F: Documentation/filesystems/sysv-fs.txt
F: fs/sysv/
F: include/linux/sysv_fs.h
+TARGET SUBSYSTEM
+M: Nicholas A. Bellinger <nab@linux-iscsi.org>
+L: linux-scsi@vger.kernel.org
+L: http://groups.google.com/group/linux-iscsi-target-dev
+W: http://www.linux-iscsi.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/nab/lio-core-2.6.git master
+S: Supported
+F: drivers/target/
+F: include/target/
+F: Documentation/target/
+
TASKSTATS STATISTICS INTERFACE
M: Balbir Singh <balbir@linux.vnet.ibm.com>
S: Maintained
diff --git a/arch/alpha/include/asm/unistd.h b/arch/alpha/include/asm/unistd.h
index b1834166922d..4ac48a095f3a 100644
--- a/arch/alpha/include/asm/unistd.h
+++ b/arch/alpha/include/asm/unistd.h
@@ -456,10 +456,11 @@
#define __NR_open_by_handle_at 498
#define __NR_clock_adjtime 499
#define __NR_syncfs 500
+#define __NR_setns 501
#ifdef __KERNEL__
-#define NR_SYSCALLS 501
+#define NR_SYSCALLS 502
#define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_OLD_READDIR
diff --git a/arch/alpha/kernel/systbls.S b/arch/alpha/kernel/systbls.S
index 15f999d41c75..b9c28f3f1956 100644
--- a/arch/alpha/kernel/systbls.S
+++ b/arch/alpha/kernel/systbls.S
@@ -519,6 +519,7 @@ sys_call_table:
.quad sys_open_by_handle_at
.quad sys_clock_adjtime
.quad sys_syncfs /* 500 */
+ .quad sys_setns
.size sys_call_table, . - sys_call_table
.type sys_call_table, @object
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 7275009686e6..9adc278a22ab 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -294,6 +294,8 @@ config ARCH_AT91
bool "Atmel AT91"
select ARCH_REQUIRE_GPIOLIB
select HAVE_CLK
+ select CLKDEV_LOOKUP
+ select ARM_PATCH_PHYS_VIRT if MMU
help
This enables support for systems based on the Atmel AT91RM9200,
AT91SAM9 and AT91CAP9 processors.
@@ -730,16 +732,6 @@ config ARCH_S5P64X0
Samsung S5P64X0 CPU based systems, such as the Samsung SMDK6440,
SMDK6450.
-config ARCH_S5P6442
- bool "Samsung S5P6442"
- select CPU_V6
- select GENERIC_GPIO
- select HAVE_CLK
- select ARCH_USES_GETTIMEOFFSET
- select HAVE_S3C2410_WATCHDOG if WATCHDOG
- help
- Samsung S5P6442 CPU based systems
-
config ARCH_S5PC100
bool "Samsung S5PC100"
select GENERIC_GPIO
@@ -991,8 +983,6 @@ endif
source "arch/arm/mach-s5p64x0/Kconfig"
-source "arch/arm/mach-s5p6442/Kconfig"
-
source "arch/arm/mach-s5pc100/Kconfig"
source "arch/arm/mach-s5pv210/Kconfig"
@@ -1399,7 +1389,6 @@ config NR_CPUS
config HOTPLUG_CPU
bool "Support for hot-pluggable CPUs (EXPERIMENTAL)"
depends on SMP && HOTPLUG && EXPERIMENTAL
- depends on !ARCH_MSM
help
Say Y here to experiment with turning CPUs off and on. CPUs
can be controlled through /sys/devices/system/cpu.
@@ -1420,7 +1409,7 @@ source kernel/Kconfig.preempt
config HZ
int
default 200 if ARCH_EBSA110 || ARCH_S3C2410 || ARCH_S5P64X0 || \
- ARCH_S5P6442 || ARCH_S5PV210 || ARCH_EXYNOS4
+ ARCH_S5PV210 || ARCH_EXYNOS4
default OMAP_32K_TIMER_HZ if ARCH_OMAP && OMAP_32K_TIMER
default AT91_TIMER_HZ if ARCH_AT91
default SHMOBILE_TIMER_HZ if ARCH_SHMOBILE
@@ -1516,6 +1505,9 @@ config ARCH_SPARSEMEM_DEFAULT
config ARCH_SELECT_MEMORY_MODEL
def_bool ARCH_SPARSEMEM_ENABLE
+config HAVE_ARCH_PFN_VALID
+ def_bool ARCH_HAS_HOLES_MEMORYMODEL || !SPARSEMEM
+
config HIGHMEM
bool "High Memory Support"
depends on MMU
@@ -1683,6 +1675,13 @@ endmenu
menu "Boot options"
+config USE_OF
+ bool "Flattened Device Tree support"
+ select OF
+ select OF_EARLY_FLATTREE
+ help
+ Include support for flattened device tree machine descriptions.
+
# Compressed boot loader in ROM. Yes, we really want to ask about
# TEXT and BSS so we preserve their values in the config files.
config ZBOOT_ROM_TEXT
@@ -2021,7 +2020,7 @@ menu "Power management options"
source "kernel/power/Kconfig"
config ARCH_SUSPEND_POSSIBLE
- depends on !ARCH_S5P64X0 && !ARCH_S5P6442 && !ARCH_S5PC100
+ depends on !ARCH_S5P64X0 && !ARCH_S5PC100
depends on CPU_ARM920T || CPU_ARM926T || CPU_SA1100 || \
CPU_V6 || CPU_V6K || CPU_V7 || CPU_XSC3 || CPU_XSCALE
def_bool y
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 25750bcb3397..f5b2b390c8f2 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -176,7 +176,6 @@ machine-$(CONFIG_ARCH_S3C2410) := s3c2410 s3c2400 s3c2412 s3c2416 s3c2440 s3c24
machine-$(CONFIG_ARCH_S3C24A0) := s3c24a0
machine-$(CONFIG_ARCH_S3C64XX) := s3c64xx
machine-$(CONFIG_ARCH_S5P64X0) := s5p64x0
-machine-$(CONFIG_ARCH_S5P6442) := s5p6442
machine-$(CONFIG_ARCH_S5PC100) := s5pc100
machine-$(CONFIG_ARCH_S5PV210) := s5pv210
machine-$(CONFIG_ARCH_EXYNOS4) := exynos4
diff --git a/arch/arm/common/Kconfig b/arch/arm/common/Kconfig
index ea5ee4d067f3..4b71766fb21d 100644
--- a/arch/arm/common/Kconfig
+++ b/arch/arm/common/Kconfig
@@ -7,7 +7,7 @@ config ARM_VIC
config ARM_VIC_NR
int
default 4 if ARCH_S5PV210
- default 3 if ARCH_S5P6442 || ARCH_S5PC100
+ default 3 if ARCH_S5PC100
default 2
depends on ARM_VIC
help
diff --git a/arch/arm/configs/at572d940hfek_defconfig b/arch/arm/configs/at572d940hfek_defconfig
deleted file mode 100644
index 1b1158ae8f82..000000000000
--- a/arch/arm/configs/at572d940hfek_defconfig
+++ /dev/null
@@ -1,358 +0,0 @@
-CONFIG_EXPERIMENTAL=y
-CONFIG_LOCALVERSION="-AT572D940HF"
-# CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_SYSVIPC=y
-CONFIG_POSIX_MQUEUE=y
-CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_BSD_PROCESS_ACCT_V3=y
-CONFIG_TASKSTATS=y
-CONFIG_TASK_XACCT=y
-CONFIG_TASK_IO_ACCOUNTING=y
-CONFIG_AUDIT=y
-CONFIG_CGROUPS=y
-CONFIG_CGROUP_CPUACCT=y
-CONFIG_CGROUP_SCHED=y
-CONFIG_RT_GROUP_SCHED=y
-CONFIG_SYSFS_DEPRECATED_V2=y
-CONFIG_RELAY=y
-CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EXPERT=y
-CONFIG_SLAB=y
-CONFIG_PROFILING=y
-CONFIG_OPROFILE=m
-CONFIG_KPROBES=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODVERSIONS=y
-CONFIG_MODULE_SRCVERSION_ALL=y
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_ARCH_AT91=y
-CONFIG_ARCH_AT572D940HF=y
-CONFIG_MACH_AT572D940HFEB=y
-CONFIG_AT91_PROGRAMMABLE_CLOCKS=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_PREEMPT=y
-CONFIG_CMDLINE="mem=48M console=ttyS0 initrd=0x21100000,3145728 root=/dev/ram0 rw ip=172.16.1.181"
-CONFIG_KEXEC=y
-CONFIG_FPE_NWFPE=y
-CONFIG_FPE_NWFPE_XP=y
-CONFIG_NET=y
-CONFIG_PACKET=m
-CONFIG_UNIX=y
-CONFIG_INET=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
-# CONFIG_INET_DIAG is not set
-# CONFIG_IPV6 is not set
-CONFIG_NET_PKTGEN=m
-CONFIG_NET_TCPPROBE=m
-CONFIG_CAN=m
-CONFIG_CAN_RAW=m
-CONFIG_CAN_BCM=m
-CONFIG_CAN_VCAN=m
-CONFIG_CAN_DEBUG_DEVICES=y
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-CONFIG_CONNECTOR=m
-CONFIG_MTD=m
-CONFIG_MTD_DEBUG=y
-CONFIG_MTD_DEBUG_VERBOSE=1
-CONFIG_MTD_CONCAT=m
-CONFIG_MTD_PARTITIONS=y
-CONFIG_MTD_CHAR=m
-CONFIG_MTD_BLOCK=m
-CONFIG_MTD_BLOCK_RO=m
-CONFIG_FTL=m
-CONFIG_NFTL=m
-CONFIG_NFTL_RW=y
-CONFIG_INFTL=m
-CONFIG_RFD_FTL=m
-CONFIG_SSFDC=m
-CONFIG_MTD_OOPS=m
-CONFIG_MTD_CFI=m
-CONFIG_MTD_JEDECPROBE=m
-CONFIG_MTD_CFI_INTELEXT=m
-CONFIG_MTD_CFI_AMDSTD=m
-CONFIG_MTD_CFI_STAA=m
-CONFIG_MTD_ROM=m
-CONFIG_MTD_ABSENT=m
-CONFIG_MTD_COMPLEX_MAPPINGS=y
-CONFIG_MTD_PHYSMAP=m
-CONFIG_MTD_PLATRAM=m
-CONFIG_MTD_DATAFLASH=m
-CONFIG_MTD_M25P80=m
-CONFIG_MTD_SLRAM=m
-CONFIG_MTD_PHRAM=m
-CONFIG_MTD_MTDRAM=m
-CONFIG_MTD_BLOCK2MTD=m
-CONFIG_MTD_NAND=m
-CONFIG_MTD_NAND_VERIFY_WRITE=y
-CONFIG_MTD_NAND_DISKONCHIP=m
-CONFIG_MTD_NAND_NANDSIM=m
-CONFIG_MTD_NAND_PLATFORM=m
-CONFIG_MTD_ALAUDA=m
-CONFIG_MTD_UBI=m
-CONFIG_MTD_UBI_GLUEBI=m
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_CRYPTOLOOP=m
-CONFIG_BLK_DEV_NBD=m
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=65536
-CONFIG_ATMEL_TCLIB=y
-CONFIG_ATMEL_SSC=m
-CONFIG_SENSORS_TSL2550=m
-CONFIG_DS1682=m
-CONFIG_RAID_ATTRS=m
-CONFIG_SCSI=m
-CONFIG_SCSI_TGT=m
-# CONFIG_SCSI_PROC_FS is not set
-CONFIG_BLK_DEV_SD=m
-CONFIG_BLK_DEV_SR=m
-CONFIG_CHR_DEV_SG=m
-CONFIG_CHR_DEV_SCH=m
-CONFIG_SCSI_MULTI_LUN=y
-CONFIG_SCSI_CONSTANTS=y
-CONFIG_SCSI_LOGGING=y
-CONFIG_SCSI_SCAN_ASYNC=y
-CONFIG_SCSI_ISCSI_ATTRS=m
-CONFIG_NETDEVICES=y
-CONFIG_DUMMY=m
-CONFIG_BONDING=m
-CONFIG_MACVLAN=m
-CONFIG_EQUALIZER=m
-CONFIG_TUN=m
-CONFIG_VETH=m
-CONFIG_PHYLIB=y
-CONFIG_MARVELL_PHY=m
-CONFIG_DAVICOM_PHY=m
-CONFIG_QSEMI_PHY=m
-CONFIG_LXT_PHY=m
-CONFIG_CICADA_PHY=m
-CONFIG_VITESSE_PHY=m
-CONFIG_SMSC_PHY=m
-CONFIG_BROADCOM_PHY=m
-CONFIG_ICPLUS_PHY=m
-CONFIG_MDIO_BITBANG=m
-CONFIG_NET_ETHERNET=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
-CONFIG_USB_ZD1201=m
-CONFIG_HOSTAP=m
-CONFIG_HOSTAP_FIRMWARE=y
-CONFIG_HOSTAP_FIRMWARE_NVRAM=y
-CONFIG_USB_CATC=m
-CONFIG_USB_KAWETH=m
-CONFIG_USB_PEGASUS=m
-CONFIG_USB_RTL8150=m
-CONFIG_USB_USBNET=m
-CONFIG_USB_NET_DM9601=m
-CONFIG_USB_NET_GL620A=m
-CONFIG_USB_NET_PLUSB=m
-CONFIG_USB_NET_MCS7830=m
-CONFIG_USB_NET_RNDIS_HOST=m
-CONFIG_USB_ALI_M5632=y
-CONFIG_USB_AN2720=y
-CONFIG_USB_EPSON2888=y
-CONFIG_USB_KC2190=y
-# CONFIG_USB_NET_ZAURUS is not set
-CONFIG_INPUT_MOUSEDEV=m
-CONFIG_INPUT_EVDEV=m
-CONFIG_INPUT_EVBUG=m
-CONFIG_KEYBOARD_LKKBD=m
-CONFIG_KEYBOARD_GPIO=m
-CONFIG_KEYBOARD_NEWTON=m
-CONFIG_KEYBOARD_STOWAWAY=m
-CONFIG_KEYBOARD_SUNKBD=m
-CONFIG_KEYBOARD_XTKBD=m
-CONFIG_MOUSE_PS2=m
-CONFIG_MOUSE_SERIAL=m
-CONFIG_MOUSE_APPLETOUCH=m
-CONFIG_MOUSE_VSXXXAA=m
-CONFIG_MOUSE_GPIO=m
-CONFIG_INPUT_MISC=y
-CONFIG_INPUT_UINPUT=m
-CONFIG_SERIO_SERPORT=m
-CONFIG_SERIO_RAW=m
-CONFIG_VT_HW_CONSOLE_BINDING=y
-CONFIG_SERIAL_NONSTANDARD=y
-CONFIG_N_HDLC=m
-CONFIG_SPECIALIX=m
-CONFIG_STALDRV=y
-CONFIG_SERIAL_ATMEL=y
-CONFIG_SERIAL_ATMEL_CONSOLE=y
-CONFIG_IPMI_HANDLER=m
-CONFIG_IPMI_DEVICE_INTERFACE=m
-CONFIG_IPMI_SI=m
-CONFIG_IPMI_WATCHDOG=m
-CONFIG_IPMI_POWEROFF=m
-CONFIG_HW_RANDOM=y
-CONFIG_R3964=m
-CONFIG_RAW_DRIVER=m
-CONFIG_TCG_TPM=m
-CONFIG_TCG_NSC=m
-CONFIG_TCG_ATMEL=m
-CONFIG_I2C=m
-CONFIG_I2C_CHARDEV=m
-CONFIG_SPI=y
-CONFIG_SPI_ATMEL=y
-CONFIG_SPI_BITBANG=m
-CONFIG_SPI_SPIDEV=m
-# CONFIG_HWMON is not set
-# CONFIG_VGA_CONSOLE is not set
-CONFIG_SOUND=m
-CONFIG_SND=m
-CONFIG_SND_SEQUENCER=m
-CONFIG_SND_SEQ_DUMMY=m
-CONFIG_SND_MIXER_OSS=m
-CONFIG_SND_PCM_OSS=m
-# CONFIG_SND_PCM_OSS_PLUGINS is not set
-CONFIG_SND_SEQUENCER_OSS=y
-CONFIG_SND_DYNAMIC_MINORS=y
-# CONFIG_SND_VERBOSE_PROCFS is not set
-CONFIG_SND_DUMMY=m
-CONFIG_SND_VIRMIDI=m
-CONFIG_SND_USB_AUDIO=m
-CONFIG_SND_USB_CAIAQ=m
-CONFIG_SND_USB_CAIAQ_INPUT=y
-CONFIG_HID=m
-CONFIG_HIDRAW=y
-CONFIG_USB_HID=m
-CONFIG_USB_HIDDEV=y
-CONFIG_USB_KBD=m
-CONFIG_USB_MOUSE=m
-CONFIG_HID_A4TECH=m
-CONFIG_HID_APPLE=m
-CONFIG_HID_BELKIN=m
-CONFIG_HID_CHERRY=m
-CONFIG_HID_CHICONY=m
-CONFIG_HID_CYPRESS=m
-CONFIG_HID_EZKEY=m
-CONFIG_HID_GYRATION=m
-CONFIG_HID_LOGITECH=m
-CONFIG_HID_MICROSOFT=m
-CONFIG_HID_MONTEREY=m
-CONFIG_HID_PANTHERLORD=m
-CONFIG_HID_PETALYNX=m
-CONFIG_HID_SAMSUNG=m
-CONFIG_HID_SONY=m
-CONFIG_HID_SUNPLUS=m
-CONFIG_USB=y
-CONFIG_USB_DEVICEFS=y
-# CONFIG_USB_DEVICE_CLASS is not set
-CONFIG_USB_DYNAMIC_MINORS=y
-CONFIG_USB_MON=y
-CONFIG_USB_OHCI_HCD=y
-CONFIG_USB_STORAGE=m
-CONFIG_USB_STORAGE_DATAFAB=m
-CONFIG_USB_STORAGE_FREECOM=m
-CONFIG_USB_STORAGE_ISD200=m
-CONFIG_USB_STORAGE_USBAT=m
-CONFIG_USB_STORAGE_SDDR09=m
-CONFIG_USB_STORAGE_SDDR55=m
-CONFIG_USB_STORAGE_JUMPSHOT=m
-CONFIG_USB_STORAGE_ALAUDA=m
-CONFIG_USB_STORAGE_KARMA=m
-CONFIG_USB_LIBUSUAL=y
-CONFIG_USB_SERIAL=m
-CONFIG_USB_EZUSB=y
-CONFIG_USB_SERIAL_GENERIC=y
-CONFIG_USB_SERIAL_PL2303=m
-CONFIG_USB_SERIAL_SPCP8X5=m
-CONFIG_USB_SERIAL_DEBUG=m
-CONFIG_USB_EMI62=m
-CONFIG_USB_EMI26=m
-CONFIG_USB_ADUTUX=m
-CONFIG_USB_TEST=m
-CONFIG_USB_GADGET=m
-CONFIG_USB_GADGET_DEBUG_FILES=y
-CONFIG_USB_GADGET_DEBUG_FS=y
-CONFIG_USB_ZERO=m
-CONFIG_USB_ETH=m
-CONFIG_USB_GADGETFS=m
-CONFIG_USB_FILE_STORAGE=m
-CONFIG_USB_G_SERIAL=m
-CONFIG_USB_MIDI_GADGET=m
-CONFIG_MMC=y
-CONFIG_SDIO_UART=m
-CONFIG_MMC_AT91=m
-CONFIG_MMC_SPI=m
-CONFIG_NEW_LEDS=y
-CONFIG_LEDS_CLASS=m
-CONFIG_LEDS_GPIO=m
-CONFIG_LEDS_TRIGGERS=y
-CONFIG_LEDS_TRIGGER_TIMER=m
-CONFIG_LEDS_TRIGGER_HEARTBEAT=m
-CONFIG_RTC_CLASS=y
-CONFIG_RTC_INTF_DEV_UIE_EMUL=y
-CONFIG_RTC_DRV_DS1307=m
-CONFIG_RTC_DRV_DS1305=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT2_FS_XATTR=y
-CONFIG_EXT2_FS_POSIX_ACL=y
-CONFIG_EXT2_FS_SECURITY=y
-CONFIG_EXT3_FS=y
-CONFIG_EXT3_FS_POSIX_ACL=y
-CONFIG_EXT3_FS_SECURITY=y
-CONFIG_JBD_DEBUG=y
-CONFIG_REISERFS_FS=m
-CONFIG_REISERFS_CHECK=y
-CONFIG_REISERFS_PROC_INFO=y
-CONFIG_REISERFS_FS_XATTR=y
-CONFIG_REISERFS_FS_POSIX_ACL=y
-CONFIG_REISERFS_FS_SECURITY=y
-CONFIG_INOTIFY=y
-CONFIG_FUSE_FS=m
-CONFIG_MSDOS_FS=m
-CONFIG_VFAT_FS=y
-CONFIG_NTFS_FS=m
-CONFIG_NTFS_RW=y
-CONFIG_TMPFS=y
-CONFIG_TMPFS_POSIX_ACL=y
-CONFIG_JFFS2_FS=m
-CONFIG_JFFS2_COMPRESSION_OPTIONS=y
-CONFIG_JFFS2_LZO=y
-CONFIG_JFFS2_CMODE_FAVOURLZO=y
-CONFIG_CRAMFS=m
-CONFIG_NFS_FS=m
-CONFIG_NFS_V3=y
-CONFIG_NFS_V3_ACL=y
-CONFIG_NFS_V4=y
-CONFIG_NFSD=m
-CONFIG_NFSD_V3_ACL=y
-CONFIG_NFSD_V4=y
-CONFIG_CIFS=m
-CONFIG_CIFS_WEAK_PW_HASH=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_MAC_PARTITION=y
-CONFIG_BSD_DISKLABEL=y
-CONFIG_MINIX_SUBPARTITION=y
-CONFIG_SOLARIS_X86_PARTITION=y
-CONFIG_UNIXWARE_DISKLABEL=y
-CONFIG_LDM_PARTITION=y
-CONFIG_LDM_DEBUG=y
-CONFIG_SGI_PARTITION=y
-CONFIG_SUN_PARTITION=y
-CONFIG_NLS_DEFAULT="cp437"
-CONFIG_NLS_CODEPAGE_437=y
-CONFIG_NLS_CODEPAGE_850=m
-CONFIG_NLS_ASCII=y
-CONFIG_NLS_ISO8859_1=y
-CONFIG_NLS_UTF8=m
-CONFIG_DLM=m
-CONFIG_PRINTK_TIME=y
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_UNUSED_SYMBOLS=y
-CONFIG_DEBUG_FS=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
-CONFIG_CRYPTO=y
-CONFIG_CRYPTO_GF128MUL=m
-CONFIG_CRYPTO_HMAC=y
-CONFIG_CRYPTO_MD5=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
-# CONFIG_CRYPTO_HW is not set
-CONFIG_CRC_CCITT=m
-CONFIG_CRC16=m
diff --git a/arch/arm/configs/at91sam9261ek_defconfig b/arch/arm/configs/at91sam9261_defconfig
index b46025b66b64..ade6b2f23116 100644
--- a/arch/arm/configs/at91sam9261ek_defconfig
+++ b/arch/arm/configs/at91sam9261_defconfig
@@ -1,9 +1,13 @@
CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_KERNEL_LZMA=y
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_BLK_DEV_INITRD=y
+CONFIG_NAMESPACES=y
+CONFIG_EMBEDDED=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
@@ -15,18 +19,27 @@ CONFIG_ARCH_AT91SAM9261=y
CONFIG_MACH_AT91SAM9261EK=y
CONFIG_AT91_PROGRAMMABLE_CLOCKS=y
# CONFIG_ARM_THUMB is not set
+CONFIG_AEABI=y
+# CONFIG_OABI_COMPAT is not set
CONFIG_ZBOOT_ROM_TEXT=0x0
CONFIG_ZBOOT_ROM_BSS=0x0
CONFIG_CMDLINE="mem=64M console=ttyS0,115200 initrd=0x21100000,3145728 root=/dev/ram0 rw"
-CONFIG_FPE_NWFPE=y
+CONFIG_AUTO_ZRELADDR=y
+CONFIG_VFP=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
+CONFIG_CFG80211=y
+CONFIG_LIB80211=y
+CONFIG_MAC80211=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_MTD=y
CONFIG_MTD_PARTITIONS=y
@@ -34,8 +47,12 @@ CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_NAND=y
CONFIG_MTD_NAND_ATMEL=y
+CONFIG_MTD_UBI=y
+CONFIG_MTD_UBI_GLUEBI=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_MISC_DEVICES=y
+CONFIG_ATMEL_TCLIB=y
CONFIG_ATMEL_SSC=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
@@ -45,12 +62,27 @@ CONFIG_NET_ETHERNET=y
CONFIG_DM9000=y
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
+CONFIG_USB_ZD1201=m
+CONFIG_RTL8187=m
+CONFIG_LIBERTAS=m
+CONFIG_LIBERTAS_USB=m
+CONFIG_LIBERTAS_SDIO=m
+CONFIG_LIBERTAS_SPI=m
+CONFIG_RT2X00=m
+CONFIG_RT2500USB=m
+CONFIG_RT73USB=m
+CONFIG_ZD1211RW=m
+CONFIG_INPUT_POLLDEV=m
# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=240
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=320
+CONFIG_INPUT_EVDEV=y
# CONFIG_KEYBOARD_ATKBD is not set
CONFIG_KEYBOARD_GPIO=y
# CONFIG_INPUT_MOUSE is not set
CONFIG_INPUT_TOUCHSCREEN=y
CONFIG_TOUCHSCREEN_ADS7846=y
+CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
CONFIG_SERIAL_ATMEL=y
CONFIG_SERIAL_ATMEL_CONSOLE=y
CONFIG_HW_RANDOM=y
@@ -65,31 +97,62 @@ CONFIG_WATCHDOG_NOWAYOUT=y
CONFIG_AT91SAM9X_WATCHDOG=y
CONFIG_FB=y
CONFIG_FB_ATMEL=y
-# CONFIG_VGA_CONSOLE is not set
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+# CONFIG_LCD_CLASS_DEVICE is not set
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+CONFIG_BACKLIGHT_ATMEL_LCDC=y
+# CONFIG_BACKLIGHT_GENERIC is not set
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
+CONFIG_LOGO=y
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_SEQUENCER=y
+CONFIG_SND_MIXER_OSS=y
+CONFIG_SND_PCM_OSS=y
+# CONFIG_SND_SUPPORT_OLD_API is not set
+# CONFIG_SND_VERBOSE_PROCFS is not set
+# CONFIG_SND_DRIVERS is not set
+# CONFIG_SND_ARM is not set
+CONFIG_SND_AT73C213=y
+CONFIG_SND_USB_AUDIO=m
# CONFIG_USB_HID is not set
CONFIG_USB=y
CONFIG_USB_DEVICEFS=y
-CONFIG_USB_MON=y
CONFIG_USB_OHCI_HCD=y
CONFIG_USB_STORAGE=y
-CONFIG_USB_STORAGE_DEBUG=y
CONFIG_USB_GADGET=y
CONFIG_USB_ZERO=m
+CONFIG_USB_ETH=m
CONFIG_USB_GADGETFS=m
CONFIG_USB_FILE_STORAGE=m
CONFIG_USB_G_SERIAL=m
CONFIG_MMC=y
CONFIG_MMC_AT91=m
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_GPIO=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_AT91SAM9=y
-CONFIG_EXT2_FS=y
-CONFIG_INOTIFY=y
+CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_TMPFS=y
-CONFIG_CRAMFS=y
+CONFIG_UBIFS_FS=y
+CONFIG_UBIFS_FS_ADVANCED_COMPR=y
+CONFIG_SQUASHFS=y
+CONFIG_SQUASHFS_LZO=y
+CONFIG_SQUASHFS_XZ=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+CONFIG_ROOT_NFS=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_CODEPAGE_850=y
CONFIG_NLS_ISO8859_1=y
-CONFIG_DEBUG_KERNEL=y
-CONFIG_DEBUG_USER=y
-CONFIG_DEBUG_LL=y
+CONFIG_NLS_ISO8859_15=y
+CONFIG_NLS_UTF8=y
+CONFIG_FTRACE=y
+CONFIG_CRC_CCITT=m
diff --git a/arch/arm/configs/at91sam9263ek_defconfig b/arch/arm/configs/at91sam9263_defconfig
index 8a04d6f4e065..1cf96264cba1 100644
--- a/arch/arm/configs/at91sam9263ek_defconfig
+++ b/arch/arm/configs/at91sam9263_defconfig
@@ -1,9 +1,13 @@
CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_KERNEL_LZMA=y
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_BLK_DEV_INITRD=y
+CONFIG_NAMESPACES=y
+CONFIG_EMBEDDED=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
@@ -13,53 +17,81 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_ARCH_AT91=y
CONFIG_ARCH_AT91SAM9263=y
CONFIG_MACH_AT91SAM9263EK=y
+CONFIG_MACH_USB_A9263=y
+CONFIG_MACH_NEOCORE926=y
CONFIG_MTD_AT91_DATAFLASH_CARD=y
# CONFIG_ARM_THUMB is not set
+CONFIG_AEABI=y
+# CONFIG_OABI_COMPAT is not set
CONFIG_ZBOOT_ROM_TEXT=0x0
CONFIG_ZBOOT_ROM_BSS=0x0
CONFIG_CMDLINE="mem=64M console=ttyS0,115200 initrd=0x21100000,3145728 root=/dev/ram0 rw"
-CONFIG_FPE_NWFPE=y
+CONFIG_AUTO_ZRELADDR=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
+CONFIG_NET_KEY=y
CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_ROUTE_VERBOSE=y
CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_IP_PNP_RARP=y
+CONFIG_NET_IPIP=y
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_LRO is not set
# CONFIG_INET_DIAG is not set
-# CONFIG_IPV6 is not set
+CONFIG_IPV6=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_MTD=y
CONFIG_MTD_PARTITIONS=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
+CONFIG_NFTL=y
+CONFIG_NFTL_RW=y
CONFIG_MTD_DATAFLASH=y
+CONFIG_MTD_BLOCK2MTD=y
CONFIG_MTD_NAND=y
CONFIG_MTD_NAND_ATMEL=y
+CONFIG_MTD_NAND_ATMEL_ECC_SOFT=y
+CONFIG_MTD_UBI=y
+CONFIG_MTD_UBI_GLUEBI=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=8192
-CONFIG_ATMEL_SSC=y
+CONFIG_MISC_DEVICES=y
+CONFIG_ATMEL_PWM=y
+CONFIG_ATMEL_TCLIB=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_SCSI_MULTI_LUN=y
CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
CONFIG_MII=y
+CONFIG_SMSC_PHY=y
+CONFIG_NET_ETHERNET=y
CONFIG_MACB=y
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+CONFIG_USB_ZD1201=m
+CONFIG_INPUT_POLLDEV=m
# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=240
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=320
CONFIG_INPUT_EVDEV=y
# CONFIG_KEYBOARD_ATKBD is not set
CONFIG_KEYBOARD_GPIO=y
# CONFIG_INPUT_MOUSE is not set
CONFIG_INPUT_TOUCHSCREEN=y
CONFIG_TOUCHSCREEN_ADS7846=y
-# CONFIG_SERIO is not set
+CONFIG_LEGACY_PTY_COUNT=4
CONFIG_SERIAL_ATMEL=y
CONFIG_SERIAL_ATMEL_CONSOLE=y
CONFIG_HW_RANDOM=y
@@ -74,8 +106,25 @@ CONFIG_WATCHDOG_NOWAYOUT=y
CONFIG_AT91SAM9X_WATCHDOG=y
CONFIG_FB=y
CONFIG_FB_ATMEL=y
-# CONFIG_VGA_CONSOLE is not set
-# CONFIG_USB_HID is not set
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_LCD_CLASS_DEVICE=y
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+CONFIG_BACKLIGHT_ATMEL_LCDC=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
+CONFIG_LOGO=y
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_SEQUENCER=y
+CONFIG_SND_MIXER_OSS=y
+CONFIG_SND_PCM_OSS=y
+# CONFIG_SND_SUPPORT_OLD_API is not set
+# CONFIG_SND_VERBOSE_PROCFS is not set
+# CONFIG_SND_DRIVERS is not set
+# CONFIG_SND_ARM is not set
+CONFIG_SND_ATMEL_AC97C=y
+# CONFIG_SND_SPI is not set
+CONFIG_SND_USB_AUDIO=m
CONFIG_USB=y
CONFIG_USB_DEVICEFS=y
CONFIG_USB_MON=y
@@ -83,24 +132,37 @@ CONFIG_USB_OHCI_HCD=y
CONFIG_USB_STORAGE=y
CONFIG_USB_GADGET=y
CONFIG_USB_ZERO=m
+CONFIG_USB_ETH=m
CONFIG_USB_GADGETFS=m
CONFIG_USB_FILE_STORAGE=m
CONFIG_USB_G_SERIAL=m
CONFIG_MMC=y
+CONFIG_SDIO_UART=m
CONFIG_MMC_AT91=m
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_ATMEL_PWM=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_AT91SAM9=y
CONFIG_EXT2_FS=y
-CONFIG_INOTIFY=y
+CONFIG_FUSE_FS=m
CONFIG_VFAT_FS=y
CONFIG_TMPFS=y
CONFIG_JFFS2_FS=y
+CONFIG_UBIFS_FS=y
+CONFIG_UBIFS_FS_ADVANCED_COMPR=y
CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_CODEPAGE_850=y
CONFIG_NLS_ISO8859_1=y
-CONFIG_DEBUG_KERNEL=y
+CONFIG_FTRACE=y
CONFIG_DEBUG_USER=y
-CONFIG_DEBUG_LL=y
+CONFIG_XZ_DEC=y
diff --git a/arch/arm/configs/exynos4_defconfig b/arch/arm/configs/exynos4_defconfig
index 2ffba24d2e2a..da53ff3b4d70 100644
--- a/arch/arm/configs/exynos4_defconfig
+++ b/arch/arm/configs/exynos4_defconfig
@@ -8,7 +8,9 @@ CONFIG_ARCH_EXYNOS4=y
CONFIG_S3C_LOWLEVEL_UART_PORT=1
CONFIG_MACH_SMDKC210=y
CONFIG_MACH_SMDKV310=y
+CONFIG_MACH_ARMLEX4210=y
CONFIG_MACH_UNIVERSAL_C210=y
+CONFIG_MACH_NURI=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_SMP=y
diff --git a/arch/arm/configs/neocore926_defconfig b/arch/arm/configs/neocore926_defconfig
deleted file mode 100644
index 462dd1850d15..000000000000
--- a/arch/arm/configs/neocore926_defconfig
+++ /dev/null
@@ -1,104 +0,0 @@
-CONFIG_EXPERIMENTAL=y
-# CONFIG_LOCALVERSION_AUTO is not set
-# CONFIG_SWAP is not set
-CONFIG_SYSVIPC=y
-CONFIG_BLK_DEV_INITRD=y
-# CONFIG_COMPAT_BRK is not set
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
-CONFIG_ARCH_AT91=y
-CONFIG_ARCH_AT91SAM9263=y
-CONFIG_MACH_NEOCORE926=y
-CONFIG_MTD_AT91_DATAFLASH_CARD=y
-CONFIG_ZBOOT_ROM_TEXT=0x0
-CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_FPE_NWFPE=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_NET_KEY=y
-CONFIG_INET=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-CONFIG_IP_PNP_BOOTP=y
-CONFIG_IP_PNP_RARP=y
-CONFIG_NET_IPIP=y
-# CONFIG_INET_LRO is not set
-CONFIG_IPV6=y
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-# CONFIG_PREVENT_FIRMWARE_BUILD is not set
-CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
-CONFIG_MTD_CHAR=y
-CONFIG_MTD_BLOCK=y
-CONFIG_NFTL=y
-CONFIG_NFTL_RW=y
-CONFIG_MTD_BLOCK2MTD=y
-CONFIG_MTD_NAND=y
-CONFIG_MTD_NAND_ECC_SMC=y
-CONFIG_MTD_NAND_VERIFY_WRITE=y
-CONFIG_MTD_NAND_ATMEL=y
-CONFIG_MTD_NAND_PLATFORM=y
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_NBD=y
-CONFIG_ATMEL_PWM=y
-CONFIG_ATMEL_TCLIB=y
-CONFIG_SCSI=y
-CONFIG_CHR_DEV_SG=y
-CONFIG_NETDEVICES=y
-CONFIG_SMSC_PHY=y
-CONFIG_NET_ETHERNET=y
-CONFIG_MACB=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
-CONFIG_INPUT_EVDEV=y
-CONFIG_INPUT_TOUCHSCREEN=y
-CONFIG_TOUCHSCREEN_ADS7846=y
-CONFIG_VT_HW_CONSOLE_BINDING=y
-# CONFIG_DEVKMEM is not set
-CONFIG_SERIAL_NONSTANDARD=y
-CONFIG_SERIAL_ATMEL=y
-CONFIG_SERIAL_ATMEL_CONSOLE=y
-# CONFIG_SERIAL_ATMEL_PDC is not set
-# CONFIG_HW_RANDOM is not set
-CONFIG_I2C=y
-CONFIG_I2C_CHARDEV=y
-CONFIG_SPI=y
-CONFIG_SPI_ATMEL=y
-# CONFIG_HWMON is not set
-CONFIG_VIDEO_OUTPUT_CONTROL=y
-CONFIG_FB=y
-CONFIG_FB_ATMEL=y
-CONFIG_BACKLIGHT_LCD_SUPPORT=y
-CONFIG_LCD_CLASS_DEVICE=y
-CONFIG_BACKLIGHT_CLASS_DEVICE=y
-CONFIG_BACKLIGHT_ATMEL_LCDC=y
-# CONFIG_VGA_CONSOLE is not set
-CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
-CONFIG_LOGO=y
-CONFIG_USB=y
-CONFIG_USB_DEVICEFS=y
-CONFIG_USB_MON=y
-CONFIG_USB_OHCI_HCD=y
-CONFIG_USB_STORAGE=y
-CONFIG_MMC=y
-CONFIG_SDIO_UART=y
-CONFIG_MMC_AT91=m
-CONFIG_EXT2_FS=y
-# CONFIG_DNOTIFY is not set
-CONFIG_AUTOFS_FS=y
-CONFIG_VFAT_FS=y
-CONFIG_TMPFS=y
-CONFIG_JFFS2_FS=y
-CONFIG_JFFS2_FS_WBUF_VERIFY=y
-CONFIG_NFS_FS=y
-CONFIG_ROOT_NFS=y
-# CONFIG_ENABLE_WARN_DEPRECATED is not set
-# CONFIG_ENABLE_MUST_CHECK is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
-# CONFIG_CRYPTO_HW is not set
diff --git a/arch/arm/configs/s5p6442_defconfig b/arch/arm/configs/s5p6442_defconfig
deleted file mode 100644
index 0e92a784af66..000000000000
--- a/arch/arm/configs/s5p6442_defconfig
+++ /dev/null
@@ -1,65 +0,0 @@
-CONFIG_EXPERIMENTAL=y
-CONFIG_SYSFS_DEPRECATED_V2=y
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_KALLSYMS_ALL=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_ARCH_S5P6442=y
-CONFIG_S3C_LOWLEVEL_UART_PORT=1
-CONFIG_MACH_SMDK6442=y
-CONFIG_CPU_32v6K=y
-CONFIG_AEABI=y
-CONFIG_CMDLINE="root=/dev/ram0 rw ramdisk=8192 initrd=0x20800000,8M console=ttySAC1,115200 init=/linuxrc"
-CONFIG_FPE_NWFPE=y
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-# CONFIG_PREVENT_FIRMWARE_BUILD is not set
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=8192
-# CONFIG_MISC_DEVICES is not set
-CONFIG_SCSI=y
-CONFIG_BLK_DEV_SD=y
-CONFIG_CHR_DEV_SG=y
-CONFIG_INPUT_EVDEV=y
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-CONFIG_INPUT_TOUCHSCREEN=y
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_NR_UARTS=3
-CONFIG_SERIAL_SAMSUNG=y
-CONFIG_SERIAL_SAMSUNG_CONSOLE=y
-CONFIG_HW_RANDOM=y
-# CONFIG_HWMON is not set
-# CONFIG_VGA_CONSOLE is not set
-# CONFIG_HID_SUPPORT is not set
-# CONFIG_USB_SUPPORT is not set
-CONFIG_EXT2_FS=y
-CONFIG_INOTIFY=y
-CONFIG_MSDOS_FS=y
-CONFIG_VFAT_FS=y
-CONFIG_TMPFS=y
-CONFIG_TMPFS_POSIX_ACL=y
-CONFIG_CRAMFS=y
-CONFIG_ROMFS_FS=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_BSD_DISKLABEL=y
-CONFIG_SOLARIS_X86_PARTITION=y
-CONFIG_NLS_CODEPAGE_437=y
-CONFIG_NLS_ASCII=y
-CONFIG_NLS_ISO8859_1=y
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_KERNEL=y
-CONFIG_DEBUG_RT_MUTEXES=y
-CONFIG_DEBUG_SPINLOCK=y
-CONFIG_DEBUG_MUTEXES=y
-CONFIG_DEBUG_SPINLOCK_SLEEP=y
-CONFIG_DEBUG_INFO=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
-# CONFIG_ARM_UNWIND is not set
-CONFIG_DEBUG_USER=y
-CONFIG_DEBUG_ERRORS=y
-CONFIG_DEBUG_LL=y
-CONFIG_DEBUG_S3C_UART=1
-CONFIG_CRC_CCITT=y
diff --git a/arch/arm/configs/usb-a9263_defconfig b/arch/arm/configs/usb-a9263_defconfig
deleted file mode 100644
index ee82d09249c6..000000000000
--- a/arch/arm/configs/usb-a9263_defconfig
+++ /dev/null
@@ -1,106 +0,0 @@
-CONFIG_EXPERIMENTAL=y
-# CONFIG_LOCALVERSION_AUTO is not set
-# CONFIG_SWAP is not set
-CONFIG_SYSVIPC=y
-CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_SLAB=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
-CONFIG_ARCH_AT91=y
-CONFIG_ARCH_AT91SAM9263=y
-CONFIG_MACH_USB_A9263=y
-CONFIG_AT91_SLOW_CLOCK=y
-# CONFIG_ARM_THUMB is not set
-CONFIG_AEABI=y
-CONFIG_ZBOOT_ROM_TEXT=0x0
-CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_CMDLINE="mem=64M console=ttyS0,115200"
-CONFIG_FPE_NWFPE=y
-CONFIG_PM=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_IP_ADVANCED_ROUTER=y
-CONFIG_IP_ROUTE_VERBOSE=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_BOOTP=y
-CONFIG_IP_PNP_RARP=y
-CONFIG_IP_MROUTE=y
-CONFIG_IP_PIMSM_V1=y
-CONFIG_IP_PIMSM_V2=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
-# CONFIG_INET_DIAG is not set
-# CONFIG_IPV6 is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
-CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
-CONFIG_MTD_BLOCK=y
-CONFIG_MTD_DATAFLASH=y
-CONFIG_MTD_NAND=y
-CONFIG_MTD_NAND_ATMEL=y
-CONFIG_MTD_NAND_ATMEL_ECC_SOFT=y
-CONFIG_BLK_DEV_LOOP=y
-# CONFIG_MISC_DEVICES is not set
-CONFIG_SCSI=y
-CONFIG_BLK_DEV_SD=y
-CONFIG_SCSI_MULTI_LUN=y
-CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_MII=y
-CONFIG_MACB=y
-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
-CONFIG_INPUT_EVDEV=y
-CONFIG_INPUT_EVBUG=y
-# CONFIG_KEYBOARD_ATKBD is not set
-CONFIG_KEYBOARD_GPIO=y
-# CONFIG_INPUT_MOUSE is not set
-# CONFIG_SERIO is not set
-CONFIG_SERIAL_ATMEL=y
-CONFIG_SERIAL_ATMEL_CONSOLE=y
-CONFIG_HW_RANDOM=y
-CONFIG_SPI=y
-CONFIG_SPI_ATMEL=y
-# CONFIG_HWMON is not set
-# CONFIG_VGA_CONSOLE is not set
-# CONFIG_USB_HID is not set
-CONFIG_USB=y
-CONFIG_USB_DEVICEFS=y
-CONFIG_USB_MON=y
-CONFIG_USB_OHCI_HCD=y
-CONFIG_USB_STORAGE=y
-CONFIG_USB_GADGET=y
-CONFIG_USB_ETH=m
-CONFIG_NEW_LEDS=y
-CONFIG_LEDS_CLASS=y
-CONFIG_LEDS_GPIO=y
-CONFIG_LEDS_TRIGGERS=y
-CONFIG_LEDS_TRIGGER_HEARTBEAT=y
-CONFIG_EXT2_FS=y
-CONFIG_INOTIFY=y
-CONFIG_FUSE_FS=m
-CONFIG_VFAT_FS=y
-CONFIG_TMPFS=y
-CONFIG_JFFS2_FS=y
-CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
-CONFIG_NFS_V3_ACL=y
-CONFIG_NFS_V4=y
-CONFIG_ROOT_NFS=y
-CONFIG_NLS_CODEPAGE_437=y
-CONFIG_NLS_CODEPAGE_850=y
-CONFIG_NLS_ISO8859_1=y
-CONFIG_DEBUG_KERNEL=y
-CONFIG_DEBUG_USER=y
-CONFIG_DEBUG_LL=y
-# CONFIG_CRYPTO_HW is not set
diff --git a/arch/arm/include/asm/fiq.h b/arch/arm/include/asm/fiq.h
index 2242ce22ec6c..d493d0b742a1 100644
--- a/arch/arm/include/asm/fiq.h
+++ b/arch/arm/include/asm/fiq.h
@@ -4,6 +4,13 @@
* Support for FIQ on ARM architectures.
* Written by Philip Blundell <philb@gnu.org>, 1998
* Re-written by Russell King
+ *
+ * NOTE: The FIQ mode registers are not magically preserved across
+ * suspend/resume.
+ *
+ * Drivers which require these registers to be preserved across power
+ * management operations must implement appropriate suspend/resume handlers to
+ * save and restore them.
*/
#ifndef __ASM_FIQ_H
@@ -29,9 +36,21 @@ struct fiq_handler {
extern int claim_fiq(struct fiq_handler *f);
extern void release_fiq(struct fiq_handler *f);
extern void set_fiq_handler(void *start, unsigned int length);
-extern void set_fiq_regs(struct pt_regs *regs);
-extern void get_fiq_regs(struct pt_regs *regs);
extern void enable_fiq(int fiq);
extern void disable_fiq(int fiq);
+/* helpers defined in fiqasm.S: */
+extern void __set_fiq_regs(unsigned long const *regs);
+extern void __get_fiq_regs(unsigned long *regs);
+
+static inline void set_fiq_regs(struct pt_regs const *regs)
+{
+ __set_fiq_regs(&regs->ARM_r8);
+}
+
+static inline void get_fiq_regs(struct pt_regs *regs)
+{
+ __get_fiq_regs(&regs->ARM_r8);
+}
+
#endif
diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h
index bf13b814c1b8..946f4d778f71 100644
--- a/arch/arm/include/asm/mach/arch.h
+++ b/arch/arm/include/asm/mach/arch.h
@@ -18,6 +18,8 @@ struct machine_desc {
unsigned int nr; /* architecture number */
const char *name; /* architecture name */
unsigned long boot_params; /* tagged list */
+ const char **dt_compat; /* array of device tree
+ * 'compatible' strings */
unsigned int nr_irqs; /* number of IRQs */
@@ -48,6 +50,13 @@ struct machine_desc {
extern struct machine_desc *machine_desc;
/*
+ * Machine type table - also only accessible during boot
+ */
+extern struct machine_desc __arch_info_begin[], __arch_info_end[];
+#define for_each_machine_desc(p) \
+ for (p = __arch_info_begin; p < __arch_info_end; p++)
+
+/*
* Set of macros to define architecture features. This is built into
* a table by the linker.
*/
diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
index f51a69595f6e..ac75d0848889 100644
--- a/arch/arm/include/asm/page.h
+++ b/arch/arm/include/asm/page.h
@@ -197,7 +197,7 @@ typedef unsigned long pgprot_t;
typedef struct page *pgtable_t;
-#ifndef CONFIG_SPARSEMEM
+#ifdef CONFIG_HAVE_ARCH_PFN_VALID
extern int pfn_valid(unsigned long);
#endif
diff --git a/arch/arm/include/asm/prom.h b/arch/arm/include/asm/prom.h
new file mode 100644
index 000000000000..11b8708fc4db
--- /dev/null
+++ b/arch/arm/include/asm/prom.h
@@ -0,0 +1,37 @@
+/*
+ * arch/arm/include/asm/prom.h
+ *
+ * Copyright (C) 2009 Canonical Ltd. <jeremy.kerr@canonical.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#ifndef __ASMARM_PROM_H
+#define __ASMARM_PROM_H
+
+#ifdef CONFIG_OF
+
+#include <asm/setup.h>
+#include <asm/irq.h>
+
+static inline void irq_dispose_mapping(unsigned int virq)
+{
+ return;
+}
+
+extern struct machine_desc *setup_machine_fdt(unsigned int dt_phys);
+extern void arm_dt_memblock_reserve(void);
+
+#else /* CONFIG_OF */
+
+static inline struct machine_desc *setup_machine_fdt(unsigned int dt_phys)
+{
+ return NULL;
+}
+
+static inline void arm_dt_memblock_reserve(void) { }
+
+#endif /* CONFIG_OF */
+#endif /* ASMARM_PROM_H */
diff --git a/arch/arm/include/asm/setup.h b/arch/arm/include/asm/setup.h
index 95176af3df8c..ee2ad8ae07af 100644
--- a/arch/arm/include/asm/setup.h
+++ b/arch/arm/include/asm/setup.h
@@ -217,6 +217,10 @@ extern struct meminfo meminfo;
#define bank_phys_end(bank) ((bank)->start + (bank)->size)
#define bank_phys_size(bank) (bank)->size
+extern int arm_add_memory(phys_addr_t start, unsigned long size);
+extern void early_print(const char *str, ...);
+extern void dump_machine_table(void);
+
#endif /* __KERNEL__ */
#endif
diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
index d2b514fd76f4..e42d96a45d3e 100644
--- a/arch/arm/include/asm/smp.h
+++ b/arch/arm/include/asm/smp.h
@@ -70,6 +70,7 @@ extern void platform_smp_prepare_cpus(unsigned int);
*/
struct secondary_data {
unsigned long pgdir;
+ unsigned long swapper_pg_dir;
void *stack;
};
extern struct secondary_data secondary_data;
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
index 87dbe3e21970..2c04ed5efeb5 100644
--- a/arch/arm/include/asm/unistd.h
+++ b/arch/arm/include/asm/unistd.h
@@ -400,6 +400,8 @@
#define __NR_open_by_handle_at (__NR_SYSCALL_BASE+371)
#define __NR_clock_adjtime (__NR_SYSCALL_BASE+372)
#define __NR_syncfs (__NR_SYSCALL_BASE+373)
+#define __NR_sendmmsg (__NR_SYSCALL_BASE+374)
+#define __NR_setns (__NR_SYSCALL_BASE+375)
/*
* The following SWIs are ARM private.
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 8d95446150a3..a5b31af5c2b8 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -24,7 +24,7 @@ obj-$(CONFIG_OC_ETM) += etm.o
obj-$(CONFIG_ISA_DMA_API) += dma.o
obj-$(CONFIG_ARCH_ACORN) += ecard.o
-obj-$(CONFIG_FIQ) += fiq.o
+obj-$(CONFIG_FIQ) += fiq.o fiqasm.o
obj-$(CONFIG_MODULES) += armksyms.o module.o
obj-$(CONFIG_ARTHUR) += arthur.o
obj-$(CONFIG_ISA_DMA) += dma-isa.o
@@ -44,6 +44,7 @@ obj-$(CONFIG_ARM_THUMBEE) += thumbee.o
obj-$(CONFIG_KGDB) += kgdb.o
obj-$(CONFIG_ARM_UNWIND) += unwind.o
obj-$(CONFIG_HAVE_TCM) += tcm.o
+obj-$(CONFIG_OF) += devtree.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
obj-$(CONFIG_SWP_EMULATE) += swp_emulate.o
CFLAGS_swp_emulate.o := -Wa,-march=armv7-a
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
index 7fbf28c35bb2..80f7896cc016 100644
--- a/arch/arm/kernel/calls.S
+++ b/arch/arm/kernel/calls.S
@@ -383,6 +383,8 @@
CALL(sys_open_by_handle_at)
CALL(sys_clock_adjtime)
CALL(sys_syncfs)
+ CALL(sys_sendmmsg)
+/* 375 */ CALL(sys_setns)
#ifndef syscalls_counted
.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
#define syscalls_counted
diff --git a/arch/arm/kernel/devtree.c b/arch/arm/kernel/devtree.c
new file mode 100644
index 000000000000..a701e4226a6c
--- /dev/null
+++ b/arch/arm/kernel/devtree.c
@@ -0,0 +1,145 @@
+/*
+ * linux/arch/arm/kernel/devtree.c
+ *
+ * Copyright (C) 2009 Canonical Ltd. <jeremy.kerr@canonical.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/bootmem.h>
+#include <linux/memblock.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+
+#include <asm/setup.h>
+#include <asm/page.h>
+#include <asm/mach/arch.h>
+#include <asm/mach-types.h>
+
+void __init early_init_dt_add_memory_arch(u64 base, u64 size)
+{
+ arm_add_memory(base, size);
+}
+
+void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
+{
+ return alloc_bootmem_align(size, align);
+}
+
+void __init arm_dt_memblock_reserve(void)
+{
+ u64 *reserve_map, base, size;
+
+ if (!initial_boot_params)
+ return;
+
+ /* Reserve the dtb region */
+ memblock_reserve(virt_to_phys(initial_boot_params),
+ be32_to_cpu(initial_boot_params->totalsize));
+
+ /*
+ * Process the reserve map. This will probably overlap the initrd
+ * and dtb locations which are already reserved, but overlaping
+ * doesn't hurt anything
+ */
+ reserve_map = ((void*)initial_boot_params) +
+ be32_to_cpu(initial_boot_params->off_mem_rsvmap);
+ while (1) {
+ base = be64_to_cpup(reserve_map++);
+ size = be64_to_cpup(reserve_map++);
+ if (!size)
+ break;
+ memblock_reserve(base, size);
+ }
+}
+
+/**
+ * setup_machine_fdt - Machine setup when an dtb was passed to the kernel
+ * @dt_phys: physical address of dt blob
+ *
+ * If a dtb was passed to the kernel in r2, then use it to choose the
+ * correct machine_desc and to setup the system.
+ */
+struct machine_desc * __init setup_machine_fdt(unsigned int dt_phys)
+{
+ struct boot_param_header *devtree;
+ struct machine_desc *mdesc, *mdesc_best = NULL;
+ unsigned int score, mdesc_score = ~1;
+ unsigned long dt_root;
+ const char *model;
+
+ devtree = phys_to_virt(dt_phys);
+
+ /* check device tree validity */
+ if (be32_to_cpu(devtree->magic) != OF_DT_HEADER)
+ return NULL;
+
+ /* Search the mdescs for the 'best' compatible value match */
+ initial_boot_params = devtree;
+ dt_root = of_get_flat_dt_root();
+ for_each_machine_desc(mdesc) {
+ score = of_flat_dt_match(dt_root, mdesc->dt_compat);
+ if (score > 0 && score < mdesc_score) {
+ mdesc_best = mdesc;
+ mdesc_score = score;
+ }
+ }
+ if (!mdesc_best) {
+ const char *prop;
+ long size;
+
+ early_print("\nError: unrecognized/unsupported "
+ "device tree compatible list:\n[ ");
+
+ prop = of_get_flat_dt_prop(dt_root, "compatible", &size);
+ while (size > 0) {
+ early_print("'%s' ", prop);
+ size -= strlen(prop) + 1;
+ prop += strlen(prop) + 1;
+ }
+ early_print("]\n\n");
+
+ dump_machine_table(); /* does not return */
+ }
+
+ model = of_get_flat_dt_prop(dt_root, "model", NULL);
+ if (!model)
+ model = of_get_flat_dt_prop(dt_root, "compatible", NULL);
+ if (!model)
+ model = "<unknown>";
+ pr_info("Machine: %s, model: %s\n", mdesc_best->name, model);
+
+ /* Retrieve various information from the /chosen node */
+ of_scan_flat_dt(early_init_dt_scan_chosen, boot_command_line);
+ /* Initialize {size,address}-cells info */
+ of_scan_flat_dt(early_init_dt_scan_root, NULL);
+ /* Setup memory, calling early_init_dt_add_memory_arch */
+ of_scan_flat_dt(early_init_dt_scan_memory, NULL);
+
+ /* Change machine number to match the mdesc we're using */
+ __machine_arch_type = mdesc_best->nr;
+
+ return mdesc_best;
+}
+
+/**
+ * irq_create_of_mapping - Hook to resolve OF irq specifier into a Linux irq#
+ *
+ * Currently the mapping mechanism is trivial; simple flat hwirq numbers are
+ * mapped 1:1 onto Linux irq numbers. Cascaded irq controllers are not
+ * supported.
+ */
+unsigned int irq_create_of_mapping(struct device_node *controller,
+ const u32 *intspec, unsigned int intsize)
+{
+ return intspec[0];
+}
+EXPORT_SYMBOL_GPL(irq_create_of_mapping);
diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
index e72dc34eea1c..4c164ece5891 100644
--- a/arch/arm/kernel/fiq.c
+++ b/arch/arm/kernel/fiq.c
@@ -89,47 +89,6 @@ void set_fiq_handler(void *start, unsigned int length)
flush_icache_range(0x1c, 0x1c + length);
}
-/*
- * Taking an interrupt in FIQ mode is death, so both these functions
- * disable irqs for the duration. Note - these functions are almost
- * entirely coded in assembly.
- */
-void __naked set_fiq_regs(struct pt_regs *regs)
-{
- register unsigned long tmp;
- asm volatile (
- "mov ip, sp\n\
- stmfd sp!, {fp, ip, lr, pc}\n\
- sub fp, ip, #4\n\
- mrs %0, cpsr\n\
- msr cpsr_c, %2 @ select FIQ mode\n\
- mov r0, r0\n\
- ldmia %1, {r8 - r14}\n\
- msr cpsr_c, %0 @ return to SVC mode\n\
- mov r0, r0\n\
- ldmfd sp, {fp, sp, pc}"
- : "=&r" (tmp)
- : "r" (&regs->ARM_r8), "I" (PSR_I_BIT | PSR_F_BIT | FIQ_MODE));
-}
-
-void __naked get_fiq_regs(struct pt_regs *regs)
-{
- register unsigned long tmp;
- asm volatile (
- "mov ip, sp\n\
- stmfd sp!, {fp, ip, lr, pc}\n\
- sub fp, ip, #4\n\
- mrs %0, cpsr\n\
- msr cpsr_c, %2 @ select FIQ mode\n\
- mov r0, r0\n\
- stmia %1, {r8 - r14}\n\
- msr cpsr_c, %0 @ return to SVC mode\n\
- mov r0, r0\n\
- ldmfd sp, {fp, sp, pc}"
- : "=&r" (tmp)
- : "r" (&regs->ARM_r8), "I" (PSR_I_BIT | PSR_F_BIT | FIQ_MODE));
-}
-
int claim_fiq(struct fiq_handler *f)
{
int ret = 0;
@@ -174,8 +133,8 @@ void disable_fiq(int fiq)
}
EXPORT_SYMBOL(set_fiq_handler);
-EXPORT_SYMBOL(set_fiq_regs);
-EXPORT_SYMBOL(get_fiq_regs);
+EXPORT_SYMBOL(__set_fiq_regs); /* defined in fiqasm.S */
+EXPORT_SYMBOL(__get_fiq_regs); /* defined in fiqasm.S */
EXPORT_SYMBOL(claim_fiq);
EXPORT_SYMBOL(release_fiq);
EXPORT_SYMBOL(enable_fiq);
diff --git a/arch/arm/kernel/fiqasm.S b/arch/arm/kernel/fiqasm.S
new file mode 100644
index 000000000000..207f9d652010
--- /dev/null
+++ b/arch/arm/kernel/fiqasm.S
@@ -0,0 +1,49 @@
+/*
+ * linux/arch/arm/kernel/fiqasm.S
+ *
+ * Derived from code originally in linux/arch/arm/kernel/fiq.c:
+ *
+ * Copyright (C) 1998 Russell King
+ * Copyright (C) 1998, 1999 Phil Blundell
+ * Copyright (C) 2011, Linaro Limited
+ *
+ * FIQ support written by Philip Blundell <philb@gnu.org>, 1998.
+ *
+ * FIQ support re-written by Russell King to be more generic
+ *
+ * v7/Thumb-2 compatibility modifications by Linaro Limited, 2011.
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+/*
+ * Taking an interrupt in FIQ mode is death, so both these functions
+ * disable irqs for the duration.
+ */
+
+ENTRY(__set_fiq_regs)
+ mov r2, #PSR_I_BIT | PSR_F_BIT | FIQ_MODE
+ mrs r1, cpsr
+ msr cpsr_c, r2 @ select FIQ mode
+ mov r0, r0 @ avoid hazard prior to ARMv4
+ ldmia r0!, {r8 - r12}
+ ldr sp, [r0], #4
+ ldr lr, [r0]
+ msr cpsr_c, r1 @ return to SVC mode
+ mov r0, r0 @ avoid hazard prior to ARMv4
+ mov pc, lr
+ENDPROC(__set_fiq_regs)
+
+ENTRY(__get_fiq_regs)
+ mov r2, #PSR_I_BIT | PSR_F_BIT | FIQ_MODE
+ mrs r1, cpsr
+ msr cpsr_c, r2 @ select FIQ mode
+ mov r0, r0 @ avoid hazard prior to ARMv4
+ stmia r0!, {r8 - r12}
+ str sp, [r0], #4
+ str lr, [r0]
+ msr cpsr_c, r1 @ return to SVC mode
+ mov r0, r0 @ avoid hazard prior to ARMv4
+ mov pc, lr
+ENDPROC(__get_fiq_regs)
diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S
index c84b57d27d07..854bd22380d3 100644
--- a/arch/arm/kernel/head-common.S
+++ b/arch/arm/kernel/head-common.S
@@ -15,6 +15,12 @@
#define ATAG_CORE_SIZE ((2*4 + 3*4) >> 2)
#define ATAG_CORE_SIZE_EMPTY ((2*4) >> 2)
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#define OF_DT_MAGIC 0xd00dfeed
+#else
+#define OF_DT_MAGIC 0xedfe0dd0 /* 0xd00dfeed in big-endian */
+#endif
+
/*
* Exception handling. Something went wrong and we can't proceed. We
* ought to tell the user, but since we don't have any guarantee that
@@ -28,20 +34,26 @@
/* Determine validity of the r2 atags pointer. The heuristic requires
* that the pointer be aligned, in the first 16k of physical RAM and
- * that the ATAG_CORE marker is first and present. Future revisions
+ * that the ATAG_CORE marker is first and present. If CONFIG_OF_FLATTREE
+ * is selected, then it will also accept a dtb pointer. Future revisions
* of this function may be more lenient with the physical address and
* may also be able to move the ATAGS block if necessary.
*
* Returns:
- * r2 either valid atags pointer, or zero
+ * r2 either valid atags pointer, valid dtb pointer, or zero
* r5, r6 corrupted
*/
__vet_atags:
tst r2, #0x3 @ aligned?
bne 1f
- ldr r5, [r2, #0] @ is first tag ATAG_CORE?
- cmp r5, #ATAG_CORE_SIZE
+ ldr r5, [r2, #0]
+#ifdef CONFIG_OF_FLATTREE
+ ldr r6, =OF_DT_MAGIC @ is it a DTB?
+ cmp r5, r6
+ beq 2f
+#endif
+ cmp r5, #ATAG_CORE_SIZE @ is first tag ATAG_CORE?
cmpne r5, #ATAG_CORE_SIZE_EMPTY
bne 1f
ldr r5, [r2, #4]
@@ -49,7 +61,7 @@ __vet_atags:
cmp r5, r6
bne 1f
- mov pc, lr @ atag pointer is ok
+2: mov pc, lr @ atag/dtb pointer is ok
1: mov r2, #0
mov pc, lr
@@ -61,7 +73,7 @@ ENDPROC(__vet_atags)
*
* r0 = cp#15 control register
* r1 = machine ID
- * r2 = atags pointer
+ * r2 = atags/dtb pointer
* r9 = processor ID
*/
__INIT
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index c9173cfbbc74..278c1b0ebb2e 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -59,7 +59,7 @@
*
* This is normally called from the decompressor code. The requirements
* are: MMU = off, D-cache = off, I-cache = dont care, r0 = 0,
- * r1 = machine nr, r2 = atags pointer.
+ * r1 = machine nr, r2 = atags or dtb pointer.
*
* This code is mostly position independent, so if you link the kernel at
* 0xc0008000, you call this at __pa(0xc0008000).
@@ -91,7 +91,7 @@ ENTRY(stext)
#endif
/*
- * r1 = machine no, r2 = atags,
+ * r1 = machine no, r2 = atags or dtb,
* r8 = phys_offset, r9 = cpuid, r10 = procinfo
*/
bl __vet_atags
@@ -113,6 +113,7 @@ ENTRY(stext)
ldr r13, =__mmap_switched @ address to jump to after
@ mmu has been enabled
adr lr, BSYM(1f) @ return (PIC) address
+ mov r8, r4 @ set TTBR1 to swapper_pg_dir
ARM( add pc, r10, #PROCINFO_INITFUNC )
THUMB( add r12, r10, #PROCINFO_INITFUNC )
THUMB( mov pc, r12 )
@@ -302,8 +303,10 @@ ENTRY(secondary_startup)
*/
adr r4, __secondary_data
ldmia r4, {r5, r7, r12} @ address to jump to after
- sub r4, r4, r5 @ mmu has been enabled
- ldr r4, [r7, r4] @ get secondary_data.pgdir
+ sub lr, r4, r5 @ mmu has been enabled
+ ldr r4, [r7, lr] @ get secondary_data.pgdir
+ add r7, r7, #4
+ ldr r8, [r7, lr] @ get secondary_data.swapper_pg_dir
adr lr, BSYM(__enable_mmu) @ return address
mov r13, r12 @ __secondary_switched address
ARM( add pc, r10, #PROCINFO_INITFUNC ) @ initialise processor
@@ -339,7 +342,7 @@ __secondary_data:
*
* r0 = cp#15 control register
* r1 = machine ID
- * r2 = atags pointer
+ * r2 = atags or dtb pointer
* r4 = page table pointer
* r9 = processor ID
* r13 = *virtual* address to jump to upon completion
@@ -376,7 +379,7 @@ ENDPROC(__enable_mmu)
*
* r0 = cp#15 control register
* r1 = machine ID
- * r2 = atags pointer
+ * r2 = atags or dtb pointer
* r9 = processor ID
* r13 = *virtual* address to jump to upon completion
*
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 6dce209a623b..ed11fb08b05a 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -20,6 +20,7 @@
#include <linux/screen_info.h>
#include <linux/init.h>
#include <linux/kexec.h>
+#include <linux/of_fdt.h>
#include <linux/crash_dump.h>
#include <linux/root_dev.h>
#include <linux/cpu.h>
@@ -42,6 +43,7 @@
#include <asm/cachetype.h>
#include <asm/tlbflush.h>
+#include <asm/prom.h>
#include <asm/mach/arch.h>
#include <asm/mach/irq.h>
#include <asm/mach/time.h>
@@ -309,7 +311,7 @@ static void __init cacheid_init(void)
*/
extern struct proc_info_list *lookup_processor_type(unsigned int);
-static void __init early_print(const char *str, ...)
+void __init early_print(const char *str, ...)
{
extern void printascii(const char *);
char buf[256];
@@ -439,25 +441,12 @@ void cpu_init(void)
: "r14");
}
-static struct machine_desc * __init setup_machine(unsigned int nr)
+void __init dump_machine_table(void)
{
- extern struct machine_desc __arch_info_begin[], __arch_info_end[];
struct machine_desc *p;
- /*
- * locate machine in the list of supported machines.
- */
- for (p = __arch_info_begin; p < __arch_info_end; p++)
- if (nr == p->nr) {
- printk("Machine: %s\n", p->name);
- return p;
- }
-
- early_print("\n"
- "Error: unrecognized/unsupported machine ID (r1 = 0x%08x).\n\n"
- "Available machine support:\n\nID (hex)\tNAME\n", nr);
-
- for (p = __arch_info_begin; p < __arch_info_end; p++)
+ early_print("Available machine support:\n\nID (hex)\tNAME\n");
+ for_each_machine_desc(p)
early_print("%08x\t%s\n", p->nr, p->name);
early_print("\nPlease check your kernel config and/or bootloader.\n");
@@ -466,7 +455,7 @@ static struct machine_desc * __init setup_machine(unsigned int nr)
/* can't use cpu_relax() here as it may require MMU setup */;
}
-static int __init arm_add_memory(phys_addr_t start, unsigned long size)
+int __init arm_add_memory(phys_addr_t start, unsigned long size)
{
struct membank *bank = &meminfo.bank[meminfo.nr_banks];
@@ -801,23 +790,29 @@ static void __init squash_mem_tags(struct tag *tag)
tag->hdr.tag = ATAG_NONE;
}
-void __init setup_arch(char **cmdline_p)
+static struct machine_desc * __init setup_machine_tags(unsigned int nr)
{
struct tag *tags = (struct tag *)&init_tags;
- struct machine_desc *mdesc;
+ struct machine_desc *mdesc = NULL, *p;
char *from = default_command_line;
init_tags.mem.start = PHYS_OFFSET;
- unwind_init();
-
- setup_processor();
- mdesc = setup_machine(machine_arch_type);
- machine_desc = mdesc;
- machine_name = mdesc->name;
+ /*
+ * locate machine in the list of supported machines.
+ */
+ for_each_machine_desc(p)
+ if (nr == p->nr) {
+ printk("Machine: %s\n", p->name);
+ mdesc = p;
+ break;
+ }
- if (mdesc->soft_reboot)
- reboot_setup("s");
+ if (!mdesc) {
+ early_print("\nError: unrecognized/unsupported machine ID"
+ " (r1 = 0x%08x).\n\n", nr);
+ dump_machine_table(); /* does not return */
+ }
if (__atags_pointer)
tags = phys_to_virt(__atags_pointer);
@@ -849,8 +844,17 @@ void __init setup_arch(char **cmdline_p)
if (tags->hdr.tag != ATAG_CORE)
convert_to_tag_list(tags);
#endif
- if (tags->hdr.tag != ATAG_CORE)
+
+ if (tags->hdr.tag != ATAG_CORE) {
+#if defined(CONFIG_OF)
+ /*
+ * If CONFIG_OF is set, then assume this is a reasonably
+ * modern system that should pass boot parameters
+ */
+ early_print("Warning: Neither atags nor dtb found\n");
+#endif
tags = (struct tag *)&init_tags;
+ }
if (mdesc->fixup)
mdesc->fixup(mdesc, tags, &from, &meminfo);
@@ -862,14 +866,34 @@ void __init setup_arch(char **cmdline_p)
parse_tags(tags);
}
+ /* parse_early_param needs a boot_command_line */
+ strlcpy(boot_command_line, from, COMMAND_LINE_SIZE);
+
+ return mdesc;
+}
+
+
+void __init setup_arch(char **cmdline_p)
+{
+ struct machine_desc *mdesc;
+
+ unwind_init();
+
+ setup_processor();
+ mdesc = setup_machine_fdt(__atags_pointer);
+ if (!mdesc)
+ mdesc = setup_machine_tags(machine_arch_type);
+ machine_desc = mdesc;
+ machine_name = mdesc->name;
+
+ if (mdesc->soft_reboot)
+ reboot_setup("s");
+
init_mm.start_code = (unsigned long) _text;
init_mm.end_code = (unsigned long) _etext;
init_mm.end_data = (unsigned long) _edata;
init_mm.brk = (unsigned long) _end;
- /* parse_early_param needs a boot_command_line */
- strlcpy(boot_command_line, from, COMMAND_LINE_SIZE);
-
/* populate cmd_line too for later use, preserving boot_command_line */
strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
*cmdline_p = cmd_line;
@@ -881,6 +905,8 @@ void __init setup_arch(char **cmdline_p)
paging_init(mdesc);
request_standard_resources(mdesc);
+ unflatten_device_tree();
+
#ifdef CONFIG_SMP
if (is_smp())
smp_init_cpus();
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index d439a8f4c078..344e52b16c8c 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -105,6 +105,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
*/
secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
secondary_data.pgdir = virt_to_phys(pgd);
+ secondary_data.swapper_pg_dir = virt_to_phys(swapper_pg_dir);
__cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data));
outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1));
diff --git a/arch/arm/lib/lib1funcs.S b/arch/arm/lib/lib1funcs.S
index 6dc06487f3c3..c562f649734c 100644
--- a/arch/arm/lib/lib1funcs.S
+++ b/arch/arm/lib/lib1funcs.S
@@ -35,7 +35,7 @@ Boston, MA 02111-1307, USA. */
#include <linux/linkage.h>
#include <asm/assembler.h>
-
+#include <asm/unwind.h>
.macro ARM_DIV_BODY dividend, divisor, result, curbit
@@ -207,6 +207,7 @@ Boston, MA 02111-1307, USA. */
ENTRY(__udivsi3)
ENTRY(__aeabi_uidiv)
+UNWIND(.fnstart)
subs r2, r1, #1
moveq pc, lr
@@ -230,10 +231,12 @@ ENTRY(__aeabi_uidiv)
mov r0, r0, lsr r2
mov pc, lr
+UNWIND(.fnend)
ENDPROC(__udivsi3)
ENDPROC(__aeabi_uidiv)
ENTRY(__umodsi3)
+UNWIND(.fnstart)
subs r2, r1, #1 @ compare divisor with 1
bcc Ldiv0
@@ -247,10 +250,12 @@ ENTRY(__umodsi3)
mov pc, lr
+UNWIND(.fnend)
ENDPROC(__umodsi3)
ENTRY(__divsi3)
ENTRY(__aeabi_idiv)
+UNWIND(.fnstart)
cmp r1, #0
eor ip, r0, r1 @ save the sign of the result.
@@ -287,10 +292,12 @@ ENTRY(__aeabi_idiv)
rsbmi r0, r0, #0
mov pc, lr
+UNWIND(.fnend)
ENDPROC(__divsi3)
ENDPROC(__aeabi_idiv)
ENTRY(__modsi3)
+UNWIND(.fnstart)
cmp r1, #0
beq Ldiv0
@@ -310,11 +317,14 @@ ENTRY(__modsi3)
rsbmi r0, r0, #0
mov pc, lr
+UNWIND(.fnend)
ENDPROC(__modsi3)
#ifdef CONFIG_AEABI
ENTRY(__aeabi_uidivmod)
+UNWIND(.fnstart)
+UNWIND(.save {r0, r1, ip, lr} )
stmfd sp!, {r0, r1, ip, lr}
bl __aeabi_uidiv
@@ -323,10 +333,12 @@ ENTRY(__aeabi_uidivmod)
sub r1, r1, r3
mov pc, lr
+UNWIND(.fnend)
ENDPROC(__aeabi_uidivmod)
ENTRY(__aeabi_idivmod)
-
+UNWIND(.fnstart)
+UNWIND(.save {r0, r1, ip, lr} )
stmfd sp!, {r0, r1, ip, lr}
bl __aeabi_idiv
ldmfd sp!, {r1, r2, ip, lr}
@@ -334,15 +346,18 @@ ENTRY(__aeabi_idivmod)
sub r1, r1, r3
mov pc, lr
+UNWIND(.fnend)
ENDPROC(__aeabi_idivmod)
#endif
Ldiv0:
-
+UNWIND(.fnstart)
+UNWIND(.pad #4)
+UNWIND(.save {lr})
str lr, [sp, #-8]!
bl __div0
mov r0, #0 @ About as wrong as it could be.
ldr pc, [sp], #8
-
-
+UNWIND(.fnend)
+ENDPROC(Ldiv0)
diff --git a/arch/arm/mach-at91/Kconfig b/arch/arm/mach-at91/Kconfig
index 2d299bf5d72f..22484670e7ba 100644
--- a/arch/arm/mach-at91/Kconfig
+++ b/arch/arm/mach-at91/Kconfig
@@ -3,9 +3,6 @@ if ARCH_AT91
config HAVE_AT91_DATAFLASH_CARD
bool
-config HAVE_NAND_ATMEL_BUSWIDTH_16
- bool
-
config HAVE_AT91_USART3
bool
@@ -85,11 +82,6 @@ config ARCH_AT91CAP9
select HAVE_FB_ATMEL
select HAVE_NET_MACB
-config ARCH_AT572D940HF
- bool "AT572D940HF"
- select CPU_ARM926T
- select GENERIC_CLOCKEVENTS
-
config ARCH_AT91X40
bool "AT91x40"
select ARCH_USES_GETTIMEOFFSET
@@ -209,7 +201,6 @@ comment "AT91SAM9260 / AT91SAM9XE Board Type"
config MACH_AT91SAM9260EK
bool "Atmel AT91SAM9260-EK / AT91SAM9XE Evaluation Kit"
select HAVE_AT91_DATAFLASH_CARD
- select HAVE_NAND_ATMEL_BUSWIDTH_16
help
Select this if you are using Atmel's AT91SAM9260-EK or AT91SAM9XE Evaluation Kit
<http://www.atmel.com/dyn/products/tools_card.asp?tool_id=3933>
@@ -270,7 +261,6 @@ comment "AT91SAM9261 Board Type"
config MACH_AT91SAM9261EK
bool "Atmel AT91SAM9261-EK Evaluation Kit"
select HAVE_AT91_DATAFLASH_CARD
- select HAVE_NAND_ATMEL_BUSWIDTH_16
help
Select this if you are using Atmel's AT91SAM9261-EK Evaluation Kit.
<http://www.atmel.com/dyn/products/tools_card.asp?tool_id=3820>
@@ -286,7 +276,6 @@ comment "AT91SAM9G10 Board Type"
config MACH_AT91SAM9G10EK
bool "Atmel AT91SAM9G10-EK Evaluation Kit"
select HAVE_AT91_DATAFLASH_CARD
- select HAVE_NAND_ATMEL_BUSWIDTH_16
help
Select this if you are using Atmel's AT91SAM9G10-EK Evaluation Kit.
<http://www.atmel.com/dyn/products/tools_card.asp?tool_id=4588>
@@ -302,7 +291,6 @@ comment "AT91SAM9263 Board Type"
config MACH_AT91SAM9263EK
bool "Atmel AT91SAM9263-EK Evaluation Kit"
select HAVE_AT91_DATAFLASH_CARD
- select HAVE_NAND_ATMEL_BUSWIDTH_16
help
Select this if you are using Atmel's AT91SAM9263-EK Evaluation Kit.
<http://www.atmel.com/dyn/products/tools_card.asp?tool_id=4057>
@@ -343,7 +331,6 @@ comment "AT91SAM9G20 Board Type"
config MACH_AT91SAM9G20EK
bool "Atmel AT91SAM9G20-EK Evaluation Kit"
select HAVE_AT91_DATAFLASH_CARD
- select HAVE_NAND_ATMEL_BUSWIDTH_16
help
Select this if you are using Atmel's AT91SAM9G20-EK Evaluation Kit
that embeds only one SD/MMC slot.
@@ -351,7 +338,6 @@ config MACH_AT91SAM9G20EK
config MACH_AT91SAM9G20EK_2MMC
depends on MACH_AT91SAM9G20EK
bool "Atmel AT91SAM9G20-EK Evaluation Kit with 2 SD/MMC Slots"
- select HAVE_NAND_ATMEL_BUSWIDTH_16
help
Select this if you are using an Atmel AT91SAM9G20-EK Evaluation Kit
with 2 SD/MMC Slots. This is the case for AT91SAM9G20-EK rev. C and
@@ -416,7 +402,6 @@ comment "AT91SAM9G45 Board Type"
config MACH_AT91SAM9M10G45EK
bool "Atmel AT91SAM9M10G45-EK Evaluation Kits"
- select HAVE_NAND_ATMEL_BUSWIDTH_16
help
Select this if you are using Atmel's AT91SAM9G45-EKES Evaluation Kit.
"ES" at the end of the name means that this board is an
@@ -433,7 +418,6 @@ comment "AT91CAP9 Board Type"
config MACH_AT91CAP9ADK
bool "Atmel AT91CAP9A-DK Evaluation Kit"
select HAVE_AT91_DATAFLASH_CARD
- select HAVE_NAND_ATMEL_BUSWIDTH_16
help
Select this if you are using Atmel's AT91CAP9A-DK Evaluation Kit.
<http://www.atmel.com/dyn/products/tools_card.asp?tool_id=4138>
@@ -442,23 +426,6 @@ endif
# ----------------------------------------------------------
-if ARCH_AT572D940HF
-
-comment "AT572D940HF Board Type"
-
-config MACH_AT572D940HFEB
- bool "AT572D940HF-EK"
- depends on ARCH_AT572D940HF
- select HAVE_AT91_DATAFLASH_CARD
- select HAVE_NAND_ATMEL_BUSWIDTH_16
- help
- Select this if you are using Atmel's AT572D940HF-EK evaluation kit.
- <http://www.atmel.com/products/diopsis/default.asp>
-
-endif
-
-# ----------------------------------------------------------
-
if ARCH_AT91X40
comment "AT91X40 Board Type"
@@ -483,13 +450,6 @@ config MTD_AT91_DATAFLASH_CARD
help
Enable support for the DataFlash card.
-config MTD_NAND_ATMEL_BUSWIDTH_16
- bool "Enable 16-bit data bus interface to NAND flash"
- depends on HAVE_NAND_ATMEL_BUSWIDTH_16
- help
- On AT91SAM926x boards both types of NAND flash can be present
- (8 and 16 bit data bus width).
-
# ----------------------------------------------------------
comment "AT91 Feature Selections"
diff --git a/arch/arm/mach-at91/Makefile b/arch/arm/mach-at91/Makefile
index a83835e0c185..96966231920c 100644
--- a/arch/arm/mach-at91/Makefile
+++ b/arch/arm/mach-at91/Makefile
@@ -19,7 +19,6 @@ obj-$(CONFIG_ARCH_AT91SAM9RL) += at91sam9rl.o at91sam926x_time.o at91sam9rl_devi
obj-$(CONFIG_ARCH_AT91SAM9G20) += at91sam9260.o at91sam926x_time.o at91sam9260_devices.o sam9_smc.o at91sam9_alt_reset.o
obj-$(CONFIG_ARCH_AT91SAM9G45) += at91sam9g45.o at91sam926x_time.o at91sam9g45_devices.o sam9_smc.o
obj-$(CONFIG_ARCH_AT91CAP9) += at91cap9.o at91sam926x_time.o at91cap9_devices.o sam9_smc.o
-obj-$(CONFIG_ARCH_AT572D940HF) += at572d940hf.o at91sam926x_time.o at572d940hf_devices.o sam9_smc.o
obj-$(CONFIG_ARCH_AT91X40) += at91x40.o at91x40_time.o
# AT91RM9200 board-specific support
@@ -78,9 +77,6 @@ obj-$(CONFIG_MACH_AT91SAM9M10G45EK) += board-sam9m10g45ek.o
# AT91CAP9 board-specific support
obj-$(CONFIG_MACH_AT91CAP9ADK) += board-cap9adk.o
-# AT572D940HF board-specific support
-obj-$(CONFIG_MACH_AT572D940HFEB) += board-at572d940hf_ek.o
-
# AT91X40 board-specific support
obj-$(CONFIG_MACH_AT91EB01) += board-eb01.o
diff --git a/arch/arm/mach-at91/at572d940hf.c b/arch/arm/mach-at91/at572d940hf.c
deleted file mode 100644
index a6b9c68c003a..000000000000
--- a/arch/arm/mach-at91/at572d940hf.c
+++ /dev/null
@@ -1,377 +0,0 @@
-/*
- * arch/arm/mach-at91/at572d940hf.c
- *
- * Antonio R. Costa <costa.antonior@gmail.com>
- * Copyright (C) 2008 Atmel
- *
- * Copyright (C) 2005 SAN People
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
-
-#include <linux/module.h>
-
-#include <asm/mach/irq.h>
-#include <asm/mach/arch.h>
-#include <asm/mach/map.h>
-#include <mach/at572d940hf.h>
-#include <mach/at91_pmc.h>
-#include <mach/at91_rstc.h>
-
-#include "generic.h"
-#include "clock.h"
-
-static struct map_desc at572d940hf_io_desc[] __initdata = {
- {
- .virtual = AT91_VA_BASE_SYS,
- .pfn = __phys_to_pfn(AT91_BASE_SYS),
- .length = SZ_16K,
- .type = MT_DEVICE,
- }, {
- .virtual = AT91_IO_VIRT_BASE - AT572D940HF_SRAM_SIZE,
- .pfn = __phys_to_pfn(AT572D940HF_SRAM_BASE),
- .length = AT572D940HF_SRAM_SIZE,
- .type = MT_DEVICE,
- },
-};
-
-/* --------------------------------------------------------------------
- * Clocks
- * -------------------------------------------------------------------- */
-
-/*
- * The peripheral clocks.
- */
-static struct clk pioA_clk = {
- .name = "pioA_clk",
- .pmc_mask = 1 << AT572D940HF_ID_PIOA,
- .type = CLK_TYPE_PERIPHERAL,
-};
-static struct clk pioB_clk = {
- .name = "pioB_clk",
- .pmc_mask = 1 << AT572D940HF_ID_PIOB,
- .type = CLK_TYPE_PERIPHERAL,
-};
-static struct clk pioC_clk = {
- .name = "pioC_clk",
- .pmc_mask = 1 << AT572D940HF_ID_PIOC,
- .type = CLK_TYPE_PERIPHERAL,
-};
-static struct clk macb_clk = {
- .name = "macb_clk",
- .pmc_mask = 1 << AT572D940HF_ID_EMAC,
- .type = CLK_TYPE_PERIPHERAL,
-};
-static struct clk usart0_clk = {
- .name = "usart0_clk",
- .pmc_mask = 1 << AT572D940HF_ID_US0,
- .type = CLK_TYPE_PERIPHERAL,
-};
-static struct clk usart1_clk = {
- .name = "usart1_clk",
- .pmc_mask = 1 << AT572D940HF_ID_US1,
- .type = CLK_TYPE_PERIPHERAL,
-};
-static struct clk usart2_clk = {
- .name = "usart2_clk",
- .pmc_mask = 1 << AT572D940HF_ID_US2,
- .type = CLK_TYPE_PERIPHERAL,
-};
-static struct clk mmc_clk = {
- .name = "mci_clk",
- .pmc_mask = 1 << AT572D940HF_ID_MCI,
- .type = CLK_TYPE_PERIPHERAL,
-};
-static struct clk udc_clk = {
- .name = "udc_clk",
- .pmc_mask = 1 << AT572D940HF_ID_UDP,
- .type = CLK_TYPE_PERIPHERAL,
-};
-static struct clk twi0_clk = {
- .name = "twi0_clk",
- .pmc_mask = 1 << AT572D940HF_ID_TWI0,
- .type = CLK_TYPE_PERIPHERAL,
-};
-static struct clk spi0_clk = {
- .name = "spi0_clk",
- .pmc_mask = 1 << AT572D940HF_ID_SPI0,
- .type = CLK_TYPE_PERIPHERAL,
-};
-static struct clk spi1_clk = {
- .name = "spi1_clk",
- .pmc_mask = 1 << AT572D940HF_ID_SPI1,
- .type = CLK_TYPE_PERIPHERAL,
-};
-static struct clk ssc0_clk = {
- .name = "ssc0_clk",
- .pmc_mask = 1 << AT572D940HF_ID_SSC0,
- .type = CLK_TYPE_PERIPHERAL,
-};
-static struct clk ssc1_clk = {
- .name = "ssc1_clk",
- .pmc_mask = 1 << AT572D940HF_ID_SSC1,
- .type = CLK_TYPE_PERIPHERAL,
-};
-static struct clk ssc2_clk = {
- .name = "ssc2_clk",
- .pmc_mask = 1 << AT572D940HF_ID_SSC2,
- .type = CLK_TYPE_PERIPHERAL,
-};
-static struct clk tc0_clk = {
- .name = "tc0_clk",
- .pmc_mask = 1 << AT572D940HF_ID_TC0,
- .type = CLK_TYPE_PERIPHERAL,
-};
-static struct clk tc1_clk = {
- .name = "tc1_clk",
- .pmc_mask = 1 << AT572D940HF_ID_TC1,
- .type = CLK_TYPE_PERIPHERAL,
-};
-static struct clk tc2_clk = {
- .name = "tc2_clk",
- .pmc_mask = 1 << AT572D940HF_ID_TC2,
- .type = CLK_TYPE_PERIPHERAL,
-};
-static struct clk ohci_clk = {
- .name = "ohci_clk",
- .pmc_mask = 1 << AT572D940HF_ID_UHP,
- .type = CLK_TYPE_PERIPHERAL,
-};
-static struct clk ssc3_clk = {
- .name = "ssc3_clk",
- .pmc_mask = 1 << AT572D940HF_ID_SSC3,
- .type = CLK_TYPE_PERIPHERAL,
-};
-static struct clk twi1_clk = {
- .name = "twi1_clk",
- .pmc_mask = 1 << AT572D940HF_ID_TWI1,
- .type = CLK_TYPE_PERIPHERAL,
-};
-static struct clk can0_clk = {
- .name = "can0_clk",
- .pmc_mask = 1 << AT572D940HF_ID_CAN0,
- .type = CLK_TYPE_PERIPHERAL,
-};
-static struct clk can1_clk = {
- .name = "can1_clk",
- .pmc_mask = 1 << AT572D940HF_ID_CAN1,
- .type = CLK_TYPE_PERIPHERAL,
-};
-static struct clk mAgicV_clk = {
- .name = "mAgicV_clk",
- .pmc_mask = 1 << AT572D940HF_ID_MSIRQ0,
- .type = CLK_TYPE_PERIPHERAL,
-};
-
-
-static struct clk *periph_clocks[] __initdata = {
- &pioA_clk,
- &pioB_clk,
- &pioC_clk,
- &macb_clk,
- &usart0_clk,
- &usart1_clk,
- &usart2_clk,
- &mmc_clk,
- &udc_clk,
- &twi0_clk,
- &spi0_clk,
- &spi1_clk,
- &ssc0_clk,
- &ssc1_clk,
- &ssc2_clk,
- &tc0_clk,
- &tc1_clk,
- &tc2_clk,
- &ohci_clk,
- &ssc3_clk,
- &twi1_clk,
- &can0_clk,
- &can1_clk,
- &mAgicV_clk,
- /* irq0 .. irq2 */
-};
-
-/*
- * The five programmable clocks.
- * You must configure pin multiplexing to bring these signals out.
- */
-static struct clk pck0 = {
- .name = "pck0",
- .pmc_mask = AT91_PMC_PCK0,
- .type = CLK_TYPE_PROGRAMMABLE,
- .id = 0,
-};
-static struct clk pck1 = {
- .name = "pck1",
- .pmc_mask = AT91_PMC_PCK1,
- .type = CLK_TYPE_PROGRAMMABLE,
- .id = 1,
-};
-static struct clk pck2 = {
- .name = "pck2",
- .pmc_mask = AT91_PMC_PCK2,
- .type = CLK_TYPE_PROGRAMMABLE,
- .id = 2,
-};
-static struct clk pck3 = {
- .name = "pck3",
- .pmc_mask = AT91_PMC_PCK3,
- .type = CLK_TYPE_PROGRAMMABLE,
- .id = 3,
-};
-
-static struct clk mAgicV_mem_clk = {
- .name = "mAgicV_mem_clk",
- .pmc_mask = AT91_PMC_PCK4,
- .type = CLK_TYPE_PROGRAMMABLE,
- .id = 4,
-};
-
-/* HClocks */
-static struct clk hck0 = {
- .name = "hck0",
- .pmc_mask = AT91_PMC_HCK0,
- .type = CLK_TYPE_SYSTEM,
- .id = 0,
-};
-static struct clk hck1 = {
- .name = "hck1",
- .pmc_mask = AT91_PMC_HCK1,
- .type = CLK_TYPE_SYSTEM,
- .id = 1,
-};
-
-static void __init at572d940hf_register_clocks(void)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(periph_clocks); i++)
- clk_register(periph_clocks[i]);
-
- clk_register(&pck0);
- clk_register(&pck1);
- clk_register(&pck2);
- clk_register(&pck3);
- clk_register(&mAgicV_mem_clk);
-
- clk_register(&hck0);
- clk_register(&hck1);
-}
-
-/* --------------------------------------------------------------------
- * GPIO
- * -------------------------------------------------------------------- */
-
-static struct at91_gpio_bank at572d940hf_gpio[] = {
- {
- .id = AT572D940HF_ID_PIOA,
- .offset = AT91_PIOA,
- .clock = &pioA_clk,
- }, {
- .id = AT572D940HF_ID_PIOB,
- .offset = AT91_PIOB,
- .clock = &pioB_clk,
- }, {
- .id = AT572D940HF_ID_PIOC,
- .offset = AT91_PIOC,
- .clock = &pioC_clk,
- }
-};
-
-static void at572d940hf_reset(void)
-{
- at91_sys_write(AT91_RSTC_CR, AT91_RSTC_KEY | AT91_RSTC_PROCRST | AT91_RSTC_PERRST);
-}
-
-
-/* --------------------------------------------------------------------
- * AT572D940HF processor initialization
- * -------------------------------------------------------------------- */
-
-void __init at572d940hf_initialize(unsigned long main_clock)
-{
- /* Map peripherals */
- iotable_init(at572d940hf_io_desc, ARRAY_SIZE(at572d940hf_io_desc));
-
- at91_arch_reset = at572d940hf_reset;
- at91_extern_irq = (1 << AT572D940HF_ID_IRQ0) | (1 << AT572D940HF_ID_IRQ1)
- | (1 << AT572D940HF_ID_IRQ2);
-
- /* Init clock subsystem */
- at91_clock_init(main_clock);
-
- /* Register the processor-specific clocks */
- at572d940hf_register_clocks();
-
- /* Register GPIO subsystem */
- at91_gpio_init(at572d940hf_gpio, 3);
-}
-
-/* --------------------------------------------------------------------
- * Interrupt initialization
- * -------------------------------------------------------------------- */
-
-/*
- * The default interrupt priority levels (0 = lowest, 7 = highest).
- */
-static unsigned int at572d940hf_default_irq_priority[NR_AIC_IRQS] __initdata = {
- 7, /* Advanced Interrupt Controller */
- 7, /* System Peripherals */
- 0, /* Parallel IO Controller A */
- 0, /* Parallel IO Controller B */
- 0, /* Parallel IO Controller C */
- 3, /* Ethernet */
- 6, /* USART 0 */
- 6, /* USART 1 */
- 6, /* USART 2 */
- 0, /* Multimedia Card Interface */
- 4, /* USB Device Port */
- 0, /* Two-Wire Interface 0 */
- 6, /* Serial Peripheral Interface 0 */
- 6, /* Serial Peripheral Interface 1 */
- 5, /* Serial Synchronous Controller 0 */
- 5, /* Serial Synchronous Controller 1 */
- 5, /* Serial Synchronous Controller 2 */
- 0, /* Timer Counter 0 */
- 0, /* Timer Counter 1 */
- 0, /* Timer Counter 2 */
- 3, /* USB Host port */
- 3, /* Serial Synchronous Controller 3 */
- 0, /* Two-Wire Interface 1 */
- 0, /* CAN Controller 0 */
- 0, /* CAN Controller 1 */
- 0, /* mAgicV HALT line */
- 0, /* mAgicV SIRQ0 line */
- 0, /* mAgicV exception line */
- 0, /* mAgicV end of DMA line */
- 0, /* Advanced Interrupt Controller */
- 0, /* Advanced Interrupt Controller */
- 0, /* Advanced Interrupt Controller */
-};
-
-void __init at572d940hf_init_interrupts(unsigned int priority[NR_AIC_IRQS])
-{
- if (!priority)
- priority = at572d940hf_default_irq_priority;
-
- /* Initialize the AIC interrupt controller */
- at91_aic_init(priority);
-
- /* Enable GPIO interrupts */
- at91_gpio_irq_setup();
-}
-
diff --git a/arch/arm/mach-at91/at572d940hf_devices.c b/arch/arm/mach-at91/at572d940hf_devices.c
deleted file mode 100644
index 0fc20a240782..000000000000
--- a/arch/arm/mach-at91/at572d940hf_devices.c
+++ /dev/null
@@ -1,970 +0,0 @@
-/*
- * arch/arm/mach-at91/at572d940hf_devices.c
- *
- * Copyright (C) 2008 Atmel Antonio R. Costa <costa.antonior@gmail.com>
- * Copyright (C) 2005 Thibaut VARENE <varenet@parisc-linux.org>
- * Copyright (C) 2005 David Brownell
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
-
-#include <asm/mach/arch.h>
-#include <asm/mach/map.h>
-
-#include <linux/dma-mapping.h>
-#include <linux/platform_device.h>
-
-#include <mach/board.h>
-#include <mach/gpio.h>
-#include <mach/at572d940hf.h>
-#include <mach/at572d940hf_matrix.h>
-#include <mach/at91sam9_smc.h>
-
-#include "generic.h"
-#include "sam9_smc.h"
-
-
-/* --------------------------------------------------------------------
- * USB Host
- * -------------------------------------------------------------------- */
-
-#if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
-static u64 ohci_dmamask = DMA_BIT_MASK(32);
-static struct at91_usbh_data usbh_data;
-
-static struct resource usbh_resources[] = {
- [0] = {
- .start = AT572D940HF_UHP_BASE,
- .end = AT572D940HF_UHP_BASE + SZ_1M - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = AT572D940HF_ID_UHP,
- .end = AT572D940HF_ID_UHP,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct platform_device at572d940hf_usbh_device = {
- .name = "at91_ohci",
- .id = -1,
- .dev = {
- .dma_mask = &ohci_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- .platform_data = &usbh_data,
- },
- .resource = usbh_resources,
- .num_resources = ARRAY_SIZE(usbh_resources),
-};
-
-void __init at91_add_device_usbh(struct at91_usbh_data *data)
-{
- if (!data)
- return;
-
- usbh_data = *data;
- platform_device_register(&at572d940hf_usbh_device);
-
-}
-#else
-void __init at91_add_device_usbh(struct at91_usbh_data *data) {}
-#endif
-
-
-/* --------------------------------------------------------------------
- * USB Device (Gadget)
- * -------------------------------------------------------------------- */
-
-#ifdef CONFIG_USB_GADGET_AT91
-static struct at91_udc_data udc_data;
-
-static struct resource udc_resources[] = {
- [0] = {
- .start = AT572D940HF_BASE_UDP,
- .end = AT572D940HF_BASE_UDP + SZ_16K - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = AT572D940HF_ID_UDP,
- .end = AT572D940HF_ID_UDP,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct platform_device at572d940hf_udc_device = {
- .name = "at91_udc",
- .id = -1,
- .dev = {
- .platform_data = &udc_data,
- },
- .resource = udc_resources,
- .num_resources = ARRAY_SIZE(udc_resources),
-};
-
-void __init at91_add_device_udc(struct at91_udc_data *data)
-{
- if (!data)
- return;
-
- if (data->vbus_pin) {
- at91_set_gpio_input(data->vbus_pin, 0);
- at91_set_deglitch(data->vbus_pin, 1);
- }
-
- /* Pullup pin is handled internally */
-
- udc_data = *data;
- platform_device_register(&at572d940hf_udc_device);
-}
-#else
-void __init at91_add_device_udc(struct at91_udc_data *data) {}
-#endif
-
-
-/* --------------------------------------------------------------------
- * Ethernet
- * -------------------------------------------------------------------- */
-
-#if defined(CONFIG_MACB) || defined(CONFIG_MACB_MODULE)
-static u64 eth_dmamask = DMA_BIT_MASK(32);
-static struct at91_eth_data eth_data;
-
-static struct resource eth_resources[] = {
- [0] = {
- .start = AT572D940HF_BASE_EMAC,
- .end = AT572D940HF_BASE_EMAC + SZ_16K - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = AT572D940HF_ID_EMAC,
- .end = AT572D940HF_ID_EMAC,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct platform_device at572d940hf_eth_device = {
- .name = "macb",
- .id = -1,
- .dev = {
- .dma_mask = &eth_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- .platform_data = &eth_data,
- },
- .resource = eth_resources,
- .num_resources = ARRAY_SIZE(eth_resources),
-};
-
-void __init at91_add_device_eth(struct at91_eth_data *data)
-{
- if (!data)
- return;
-
- if (data->phy_irq_pin) {
- at91_set_gpio_input(data->phy_irq_pin, 0);
- at91_set_deglitch(data->phy_irq_pin, 1);
- }
-
- /* Only RMII is supported */
- data->is_rmii = 1;
-
- /* Pins used for RMII */
- at91_set_A_periph(AT91_PIN_PA16, 0); /* ETXCK_EREFCK */
- at91_set_A_periph(AT91_PIN_PA17, 0); /* ERXDV */
- at91_set_A_periph(AT91_PIN_PA18, 0); /* ERX0 */
- at91_set_A_periph(AT91_PIN_PA19, 0); /* ERX1 */
- at91_set_A_periph(AT91_PIN_PA20, 0); /* ERXER */
- at91_set_A_periph(AT91_PIN_PA23, 0); /* ETXEN */
- at91_set_A_periph(AT91_PIN_PA21, 0); /* ETX0 */
- at91_set_A_periph(AT91_PIN_PA22, 0); /* ETX1 */
- at91_set_A_periph(AT91_PIN_PA13, 0); /* EMDIO */
- at91_set_A_periph(AT91_PIN_PA14, 0); /* EMDC */
-
- eth_data = *data;
- platform_device_register(&at572d940hf_eth_device);
-}
-#else
-void __init at91_add_device_eth(struct at91_eth_data *data) {}
-#endif
-
-
-/* --------------------------------------------------------------------
- * MMC / SD
- * -------------------------------------------------------------------- */
-
-#if defined(CONFIG_MMC_AT91) || defined(CONFIG_MMC_AT91_MODULE)
-static u64 mmc_dmamask = DMA_BIT_MASK(32);
-static struct at91_mmc_data mmc_data;
-
-static struct resource mmc_resources[] = {
- [0] = {
- .start = AT572D940HF_BASE_MCI,
- .end = AT572D940HF_BASE_MCI + SZ_16K - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = AT572D940HF_ID_MCI,
- .end = AT572D940HF_ID_MCI,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct platform_device at572d940hf_mmc_device = {
- .name = "at91_mci",
- .id = -1,
- .dev = {
- .dma_mask = &mmc_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- .platform_data = &mmc_data,
- },
- .resource = mmc_resources,
- .num_resources = ARRAY_SIZE(mmc_resources),
-};
-
-void __init at91_add_device_mmc(short mmc_id, struct at91_mmc_data *data)
-{
- if (!data)
- return;
-
- /* input/irq */
- if (data->det_pin) {
- at91_set_gpio_input(data->det_pin, 1);
- at91_set_deglitch(data->det_pin, 1);
- }
- if (data->wp_pin)
- at91_set_gpio_input(data->wp_pin, 1);
- if (data->vcc_pin)
- at91_set_gpio_output(data->vcc_pin, 0);
-
- /* CLK */
- at91_set_A_periph(AT91_PIN_PC22, 0);
-
- /* CMD */
- at91_set_A_periph(AT91_PIN_PC23, 1);
-
- /* DAT0, maybe DAT1..DAT3 */
- at91_set_A_periph(AT91_PIN_PC24, 1);
- if (data->wire4) {
- at91_set_A_periph(AT91_PIN_PC25, 1);
- at91_set_A_periph(AT91_PIN_PC26, 1);
- at91_set_A_periph(AT91_PIN_PC27, 1);
- }
-
- mmc_data = *data;
- platform_device_register(&at572d940hf_mmc_device);
-}
-#else
-void __init at91_add_device_mmc(short mmc_id, struct at91_mmc_data *data) {}
-#endif
-
-
-/* --------------------------------------------------------------------
- * NAND / SmartMedia
- * -------------------------------------------------------------------- */
-
-#if defined(CONFIG_MTD_NAND_ATMEL) || defined(CONFIG_MTD_NAND_ATMEL_MODULE)
-static struct atmel_nand_data nand_data;
-
-#define NAND_BASE AT91_CHIPSELECT_3
-
-static struct resource nand_resources[] = {
- {
- .start = NAND_BASE,
- .end = NAND_BASE + SZ_256M - 1,
- .flags = IORESOURCE_MEM,
- }
-};
-
-static struct platform_device at572d940hf_nand_device = {
- .name = "atmel_nand",
- .id = -1,
- .dev = {
- .platform_data = &nand_data,
- },
- .resource = nand_resources,
- .num_resources = ARRAY_SIZE(nand_resources),
-};
-
-void __init at91_add_device_nand(struct atmel_nand_data *data)
-{
- unsigned long csa;
-
- if (!data)
- return;
-
- csa = at91_sys_read(AT91_MATRIX_EBICSA);
- at91_sys_write(AT91_MATRIX_EBICSA, csa | AT91_MATRIX_CS3A_SMC_SMARTMEDIA);
-
- /* enable pin */
- if (data->enable_pin)
- at91_set_gpio_output(data->enable_pin, 1);
-
- /* ready/busy pin */
- if (data->rdy_pin)
- at91_set_gpio_input(data->rdy_pin, 1);
-
- /* card detect pin */
- if (data->det_pin)
- at91_set_gpio_input(data->det_pin, 1);
-
- at91_set_A_periph(AT91_PIN_PB28, 0); /* A[22] */
- at91_set_B_periph(AT91_PIN_PA28, 0); /* NANDOE */
- at91_set_B_periph(AT91_PIN_PA29, 0); /* NANDWE */
-
- nand_data = *data;
- platform_device_register(&at572d940hf_nand_device);
-}
-
-#else
-void __init at91_add_device_nand(struct atmel_nand_data *data) {}
-#endif
-
-
-/* --------------------------------------------------------------------
- * TWI (i2c)
- * -------------------------------------------------------------------- */
-
-/*
- * Prefer the GPIO code since the TWI controller isn't robust
- * (gets overruns and underruns under load) and can only issue
- * repeated STARTs in one scenario (the driver doesn't yet handle them).
- */
-
-#if defined(CONFIG_I2C_GPIO) || defined(CONFIG_I2C_GPIO_MODULE)
-
-static struct i2c_gpio_platform_data pdata = {
- .sda_pin = AT91_PIN_PC7,
- .sda_is_open_drain = 1,
- .scl_pin = AT91_PIN_PC8,
- .scl_is_open_drain = 1,
- .udelay = 2, /* ~100 kHz */
-};
-
-static struct platform_device at572d940hf_twi_device {
- .name = "i2c-gpio",
- .id = -1,
- .dev.platform_data = &pdata,
-};
-
-void __init at91_add_device_i2c(struct i2c_board_info *devices, int nr_devices)
-{
- at91_set_GPIO_periph(AT91_PIN_PC7, 1); /* TWD (SDA) */
- at91_set_multi_drive(AT91_PIN_PC7, 1);
-
- at91_set_GPIO_periph(AT91_PIN_PA8, 1); /* TWCK (SCL) */
- at91_set_multi_drive(AT91_PIN_PC8, 1);
-
- i2c_register_board_info(0, devices, nr_devices);
- platform_device_register(&at572d940hf_twi_device);
-}
-
-#elif defined(CONFIG_I2C_AT91) || defined(CONFIG_I2C_AT91_MODULE)
-
-static struct resource twi0_resources[] = {
- [0] = {
- .start = AT572D940HF_BASE_TWI0,
- .end = AT572D940HF_BASE_TWI0 + SZ_16K - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = AT572D940HF_ID_TWI0,
- .end = AT572D940HF_ID_TWI0,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct platform_device at572d940hf_twi0_device = {
- .name = "at91_i2c",
- .id = 0,
- .resource = twi0_resources,
- .num_resources = ARRAY_SIZE(twi0_resources),
-};
-
-static struct resource twi1_resources[] = {
- [0] = {
- .start = AT572D940HF_BASE_TWI1,
- .end = AT572D940HF_BASE_TWI1 + SZ_16K - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = AT572D940HF_ID_TWI1,
- .end = AT572D940HF_ID_TWI1,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct platform_device at572d940hf_twi1_device = {
- .name = "at91_i2c",
- .id = 1,
- .resource = twi1_resources,
- .num_resources = ARRAY_SIZE(twi1_resources),
-};
-
-void __init at91_add_device_i2c(struct i2c_board_info *devices, int nr_devices)
-{
- /* pins used for TWI0 interface */
- at91_set_A_periph(AT91_PIN_PC7, 0); /* TWD */
- at91_set_multi_drive(AT91_PIN_PC7, 1);
-
- at91_set_A_periph(AT91_PIN_PC8, 0); /* TWCK */
- at91_set_multi_drive(AT91_PIN_PC8, 1);
-
- /* pins used for TWI1 interface */
- at91_set_A_periph(AT91_PIN_PC20, 0); /* TWD */
- at91_set_multi_drive(AT91_PIN_PC20, 1);
-
- at91_set_A_periph(AT91_PIN_PC21, 0); /* TWCK */
- at91_set_multi_drive(AT91_PIN_PC21, 1);
-
- i2c_register_board_info(0, devices, nr_devices);
- platform_device_register(&at572d940hf_twi0_device);
- platform_device_register(&at572d940hf_twi1_device);
-}
-#else
-void __init at91_add_device_i2c(struct i2c_board_info *devices, int nr_devices) {}
-#endif
-
-
-/* --------------------------------------------------------------------
- * SPI
- * -------------------------------------------------------------------- */
-
-#if defined(CONFIG_SPI_ATMEL) || defined(CONFIG_SPI_ATMEL_MODULE)
-static u64 spi_dmamask = DMA_BIT_MASK(32);
-
-static struct resource spi0_resources[] = {
- [0] = {
- .start = AT572D940HF_BASE_SPI0,
- .end = AT572D940HF_BASE_SPI0 + SZ_16K - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = AT572D940HF_ID_SPI0,
- .end = AT572D940HF_ID_SPI0,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct platform_device at572d940hf_spi0_device = {
- .name = "atmel_spi",
- .id = 0,
- .dev = {
- .dma_mask = &spi_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- },
- .resource = spi0_resources,
- .num_resources = ARRAY_SIZE(spi0_resources),
-};
-
-static const unsigned spi0_standard_cs[4] = { AT91_PIN_PA3, AT91_PIN_PA4, AT91_PIN_PA5, AT91_PIN_PA6 };
-
-static struct resource spi1_resources[] = {
- [0] = {
- .start = AT572D940HF_BASE_SPI1,
- .end = AT572D940HF_BASE_SPI1 + SZ_16K - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = AT572D940HF_ID_SPI1,
- .end = AT572D940HF_ID_SPI1,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct platform_device at572d940hf_spi1_device = {
- .name = "atmel_spi",
- .id = 1,
- .dev = {
- .dma_mask = &spi_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- },
- .resource = spi1_resources,
- .num_resources = ARRAY_SIZE(spi1_resources),
-};
-
-static const unsigned spi1_standard_cs[4] = { AT91_PIN_PC3, AT91_PIN_PC4, AT91_PIN_PC5, AT91_PIN_PC6 };
-
-void __init at91_add_device_spi(struct spi_board_info *devices, int nr_devices)
-{
- int i;
- unsigned long cs_pin;
- short enable_spi0 = 0;
- short enable_spi1 = 0;
-
- /* Choose SPI chip-selects */
- for (i = 0; i < nr_devices; i++) {
- if (devices[i].controller_data)
- cs_pin = (unsigned long) devices[i].controller_data;
- else if (devices[i].bus_num == 0)
- cs_pin = spi0_standard_cs[devices[i].chip_select];
- else
- cs_pin = spi1_standard_cs[devices[i].chip_select];
-
- if (devices[i].bus_num == 0)
- enable_spi0 = 1;
- else
- enable_spi1 = 1;
-
- /* enable chip-select pin */
- at91_set_gpio_output(cs_pin, 1);
-
- /* pass chip-select pin to driver */
- devices[i].controller_data = (void *) cs_pin;
- }
-
- spi_register_board_info(devices, nr_devices);
-
- /* Configure SPI bus(es) */
- if (enable_spi0) {
- at91_set_A_periph(AT91_PIN_PA0, 0); /* SPI0_MISO */
- at91_set_A_periph(AT91_PIN_PA1, 0); /* SPI0_MOSI */
- at91_set_A_periph(AT91_PIN_PA2, 0); /* SPI0_SPCK */
-
- at91_clock_associate("spi0_clk", &at572d940hf_spi0_device.dev, "spi_clk");
- platform_device_register(&at572d940hf_spi0_device);
- }
- if (enable_spi1) {
- at91_set_A_periph(AT91_PIN_PC0, 0); /* SPI1_MISO */
- at91_set_A_periph(AT91_PIN_PC1, 0); /* SPI1_MOSI */
- at91_set_A_periph(AT91_PIN_PC2, 0); /* SPI1_SPCK */
-
- at91_clock_associate("spi1_clk", &at572d940hf_spi1_device.dev, "spi_clk");
- platform_device_register(&at572d940hf_spi1_device);
- }
-}
-#else
-void __init at91_add_device_spi(struct spi_board_info *devices, int nr_devices) {}
-#endif
-
-
-/* --------------------------------------------------------------------
- * Timer/Counter blocks
- * -------------------------------------------------------------------- */
-
-#ifdef CONFIG_ATMEL_TCLIB
-
-static struct resource tcb_resources[] = {
- [0] = {
- .start = AT572D940HF_BASE_TCB,
- .end = AT572D940HF_BASE_TCB + SZ_16K - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = AT572D940HF_ID_TC0,
- .end = AT572D940HF_ID_TC0,
- .flags = IORESOURCE_IRQ,
- },
- [2] = {
- .start = AT572D940HF_ID_TC1,
- .end = AT572D940HF_ID_TC1,
- .flags = IORESOURCE_IRQ,
- },
- [3] = {
- .start = AT572D940HF_ID_TC2,
- .end = AT572D940HF_ID_TC2,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct platform_device at572d940hf_tcb_device = {
- .name = "atmel_tcb",
- .id = 0,
- .resource = tcb_resources,
- .num_resources = ARRAY_SIZE(tcb_resources),
-};
-
-static void __init at91_add_device_tc(void)
-{
- /* this chip has a separate clock and irq for each TC channel */
- at91_clock_associate("tc0_clk", &at572d940hf_tcb_device.dev, "t0_clk");
- at91_clock_associate("tc1_clk", &at572d940hf_tcb_device.dev, "t1_clk");
- at91_clock_associate("tc2_clk", &at572d940hf_tcb_device.dev, "t2_clk");
- platform_device_register(&at572d940hf_tcb_device);
-}
-#else
-static void __init at91_add_device_tc(void) { }
-#endif
-
-
-/* --------------------------------------------------------------------
- * RTT
- * -------------------------------------------------------------------- */
-
-static struct resource rtt_resources[] = {
- {
- .start = AT91_BASE_SYS + AT91_RTT,
- .end = AT91_BASE_SYS + AT91_RTT + SZ_16 - 1,
- .flags = IORESOURCE_MEM,
- }
-};
-
-static struct platform_device at572d940hf_rtt_device = {
- .name = "at91_rtt",
- .id = 0,
- .resource = rtt_resources,
- .num_resources = ARRAY_SIZE(rtt_resources),
-};
-
-static void __init at91_add_device_rtt(void)
-{
- platform_device_register(&at572d940hf_rtt_device);
-}
-
-
-/* --------------------------------------------------------------------
- * Watchdog
- * -------------------------------------------------------------------- */
-
-#if defined(CONFIG_AT91SAM9X_WATCHDOG) || defined(CONFIG_AT91SAM9X_WATCHDOG_MODULE)
-static struct platform_device at572d940hf_wdt_device = {
- .name = "at91_wdt",
- .id = -1,
- .num_resources = 0,
-};
-
-static void __init at91_add_device_watchdog(void)
-{
- platform_device_register(&at572d940hf_wdt_device);
-}
-#else
-static void __init at91_add_device_watchdog(void) {}
-#endif
-
-
-/* --------------------------------------------------------------------
- * UART
- * -------------------------------------------------------------------- */
-
-#if defined(CONFIG_SERIAL_ATMEL)
-static struct resource dbgu_resources[] = {
- [0] = {
- .start = AT91_VA_BASE_SYS + AT91_DBGU,
- .end = AT91_VA_BASE_SYS + AT91_DBGU + SZ_512 - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = AT91_ID_SYS,
- .end = AT91_ID_SYS,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct atmel_uart_data dbgu_data = {
- .use_dma_tx = 0,
- .use_dma_rx = 0, /* DBGU not capable of receive DMA */
- .regs = (void __iomem *)(AT91_VA_BASE_SYS + AT91_DBGU),
-};
-
-static u64 dbgu_dmamask = DMA_BIT_MASK(32);
-
-static struct platform_device at572d940hf_dbgu_device = {
- .name = "atmel_usart",
- .id = 0,
- .dev = {
- .dma_mask = &dbgu_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- .platform_data = &dbgu_data,
- },
- .resource = dbgu_resources,
- .num_resources = ARRAY_SIZE(dbgu_resources),
-};
-
-static inline void configure_dbgu_pins(void)
-{
- at91_set_A_periph(AT91_PIN_PC31, 1); /* DTXD */
- at91_set_A_periph(AT91_PIN_PC30, 0); /* DRXD */
-}
-
-static struct resource uart0_resources[] = {
- [0] = {
- .start = AT572D940HF_BASE_US0,
- .end = AT572D940HF_BASE_US0 + SZ_16K - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = AT572D940HF_ID_US0,
- .end = AT572D940HF_ID_US0,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct atmel_uart_data uart0_data = {
- .use_dma_tx = 1,
- .use_dma_rx = 1,
-};
-
-static u64 uart0_dmamask = DMA_BIT_MASK(32);
-
-static struct platform_device at572d940hf_uart0_device = {
- .name = "atmel_usart",
- .id = 1,
- .dev = {
- .dma_mask = &uart0_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- .platform_data = &uart0_data,
- },
- .resource = uart0_resources,
- .num_resources = ARRAY_SIZE(uart0_resources),
-};
-
-static inline void configure_usart0_pins(unsigned pins)
-{
- at91_set_A_periph(AT91_PIN_PA8, 1); /* TXD0 */
- at91_set_A_periph(AT91_PIN_PA7, 0); /* RXD0 */
-
- if (pins & ATMEL_UART_RTS)
- at91_set_A_periph(AT91_PIN_PA10, 0); /* RTS0 */
- if (pins & ATMEL_UART_CTS)
- at91_set_A_periph(AT91_PIN_PA9, 0); /* CTS0 */
-}
-
-static struct resource uart1_resources[] = {
- [0] = {
- .start = AT572D940HF_BASE_US1,
- .end = AT572D940HF_BASE_US1 + SZ_16K - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = AT572D940HF_ID_US1,
- .end = AT572D940HF_ID_US1,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct atmel_uart_data uart1_data = {
- .use_dma_tx = 1,
- .use_dma_rx = 1,
-};
-
-static u64 uart1_dmamask = DMA_BIT_MASK(32);
-
-static struct platform_device at572d940hf_uart1_device = {
- .name = "atmel_usart",
- .id = 2,
- .dev = {
- .dma_mask = &uart1_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- .platform_data = &uart1_data,
- },
- .resource = uart1_resources,
- .num_resources = ARRAY_SIZE(uart1_resources),
-};
-
-static inline void configure_usart1_pins(unsigned pins)
-{
- at91_set_A_periph(AT91_PIN_PC10, 1); /* TXD1 */
- at91_set_A_periph(AT91_PIN_PC9 , 0); /* RXD1 */
-
- if (pins & ATMEL_UART_RTS)
- at91_set_A_periph(AT91_PIN_PC12, 0); /* RTS1 */
- if (pins & ATMEL_UART_CTS)
- at91_set_A_periph(AT91_PIN_PC11, 0); /* CTS1 */
-}
-
-static struct resource uart2_resources[] = {
- [0] = {
- .start = AT572D940HF_BASE_US2,
- .end = AT572D940HF_BASE_US2 + SZ_16K - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = AT572D940HF_ID_US2,
- .end = AT572D940HF_ID_US2,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct atmel_uart_data uart2_data = {
- .use_dma_tx = 1,
- .use_dma_rx = 1,
-};
-
-static u64 uart2_dmamask = DMA_BIT_MASK(32);
-
-static struct platform_device at572d940hf_uart2_device = {
- .name = "atmel_usart",
- .id = 3,
- .dev = {
- .dma_mask = &uart2_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- .platform_data = &uart2_data,
- },
- .resource = uart2_resources,
- .num_resources = ARRAY_SIZE(uart2_resources),
-};
-
-static inline void configure_usart2_pins(unsigned pins)
-{
- at91_set_A_periph(AT91_PIN_PC15, 1); /* TXD2 */
- at91_set_A_periph(AT91_PIN_PC14, 0); /* RXD2 */
-
- if (pins & ATMEL_UART_RTS)
- at91_set_A_periph(AT91_PIN_PC17, 0); /* RTS2 */
- if (pins & ATMEL_UART_CTS)
- at91_set_A_periph(AT91_PIN_PC16, 0); /* CTS2 */
-}
-
-static struct platform_device *__initdata at91_uarts[ATMEL_MAX_UART]; /* the UARTs to use */
-struct platform_device *atmel_default_console_device; /* the serial console device */
-
-void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins)
-{
- struct platform_device *pdev;
-
- switch (id) {
- case 0: /* DBGU */
- pdev = &at572d940hf_dbgu_device;
- configure_dbgu_pins();
- at91_clock_associate("mck", &pdev->dev, "usart");
- break;
- case AT572D940HF_ID_US0:
- pdev = &at572d940hf_uart0_device;
- configure_usart0_pins(pins);
- at91_clock_associate("usart0_clk", &pdev->dev, "usart");
- break;
- case AT572D940HF_ID_US1:
- pdev = &at572d940hf_uart1_device;
- configure_usart1_pins(pins);
- at91_clock_associate("usart1_clk", &pdev->dev, "usart");
- break;
- case AT572D940HF_ID_US2:
- pdev = &at572d940hf_uart2_device;
- configure_usart2_pins(pins);
- at91_clock_associate("usart2_clk", &pdev->dev, "usart");
- break;
- default:
- return;
- }
- pdev->id = portnr; /* update to mapped ID */
-
- if (portnr < ATMEL_MAX_UART)
- at91_uarts[portnr] = pdev;
-}
-
-void __init at91_set_serial_console(unsigned portnr)
-{
- if (portnr < ATMEL_MAX_UART)
- atmel_default_console_device = at91_uarts[portnr];
-}
-
-void __init at91_add_device_serial(void)
-{
- int i;
-
- for (i = 0; i < ATMEL_MAX_UART; i++) {
- if (at91_uarts[i])
- platform_device_register(at91_uarts[i]);
- }
-
- if (!atmel_default_console_device)
- printk(KERN_INFO "AT91: No default serial console defined.\n");
-}
-
-#else
-void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins) {}
-void __init at91_set_serial_console(unsigned portnr) {}
-void __init at91_add_device_serial(void) {}
-#endif
-
-
-/* --------------------------------------------------------------------
- * mAgic
- * -------------------------------------------------------------------- */
-
-#ifdef CONFIG_MAGICV
-static struct resource mAgic_resources[] = {
- {
- .start = AT91_MAGIC_PM_BASE,
- .end = AT91_MAGIC_PM_BASE + AT91_MAGIC_PM_SIZE - 1,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = AT91_MAGIC_DM_I_BASE,
- .end = AT91_MAGIC_DM_I_BASE + AT91_MAGIC_DM_I_SIZE - 1,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = AT91_MAGIC_DM_F_BASE,
- .end = AT91_MAGIC_DM_F_BASE + AT91_MAGIC_DM_F_SIZE - 1,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = AT91_MAGIC_DM_DB_BASE,
- .end = AT91_MAGIC_DM_DB_BASE + AT91_MAGIC_DM_DB_SIZE - 1,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = AT91_MAGIC_REGS_BASE,
- .end = AT91_MAGIC_REGS_BASE + AT91_MAGIC_REGS_SIZE - 1,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = AT91_MAGIC_EXTPAGE_BASE,
- .end = AT91_MAGIC_EXTPAGE_BASE + AT91_MAGIC_EXTPAGE_SIZE - 1,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = AT572D940HF_ID_MSIRQ0,
- .end = AT572D940HF_ID_MSIRQ0,
- .flags = IORESOURCE_IRQ,
- },
- {
- .start = AT572D940HF_ID_MHALT,
- .end = AT572D940HF_ID_MHALT,
- .flags = IORESOURCE_IRQ,
- },
- {
- .start = AT572D940HF_ID_MEXC,
- .end = AT572D940HF_ID_MEXC,
- .flags = IORESOURCE_IRQ,
- },
- {
- .start = AT572D940HF_ID_MEDMA,
- .end = AT572D940HF_ID_MEDMA,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct platform_device mAgic_device = {
- .name = "mAgic",
- .id = -1,
- .num_resources = ARRAY_SIZE(mAgic_resources),
- .resource = mAgic_resources,
-};
-
-void __init at91_add_device_mAgic(void)
-{
- platform_device_register(&mAgic_device);
-}
-#else
-void __init at91_add_device_mAgic(void) {}
-#endif
-
-
-/* -------------------------------------------------------------------- */
-
-/*
- * These devices are always present and don't need any board-specific
- * setup.
- */
-static int __init at91_add_standard_devices(void)
-{
- at91_add_device_rtt();
- at91_add_device_watchdog();
- at91_add_device_tc();
- return 0;
-}
-
-arch_initcall(at91_add_standard_devices);
diff --git a/arch/arm/mach-at91/at91cap9.c b/arch/arm/mach-at91/at91cap9.c
index 73376170fb91..17fae4a42ab5 100644
--- a/arch/arm/mach-at91/at91cap9.c
+++ b/arch/arm/mach-at91/at91cap9.c
@@ -222,6 +222,25 @@ static struct clk *periph_clocks[] __initdata = {
// irq0 .. irq1
};
+static struct clk_lookup periph_clocks_lookups[] = {
+ CLKDEV_CON_DEV_ID("hclk", "atmel_usba_udc.0", &utmi_clk),
+ CLKDEV_CON_DEV_ID("pclk", "atmel_usba_udc.0", &udphs_clk),
+ CLKDEV_CON_DEV_ID("mci_clk", "at91_mci.0", &mmc0_clk),
+ CLKDEV_CON_DEV_ID("mci_clk", "at91_mci.1", &mmc1_clk),
+ CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.0", &spi0_clk),
+ CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.1", &spi1_clk),
+ CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.0", &tcb_clk),
+ CLKDEV_CON_DEV_ID("ssc", "ssc.0", &ssc0_clk),
+ CLKDEV_CON_DEV_ID("ssc", "ssc.1", &ssc1_clk),
+};
+
+static struct clk_lookup usart_clocks_lookups[] = {
+ CLKDEV_CON_DEV_ID("usart", "atmel_usart.0", &mck),
+ CLKDEV_CON_DEV_ID("usart", "atmel_usart.1", &usart0_clk),
+ CLKDEV_CON_DEV_ID("usart", "atmel_usart.2", &usart1_clk),
+ CLKDEV_CON_DEV_ID("usart", "atmel_usart.3", &usart2_clk),
+};
+
/*
* The four programmable clocks.
* You must configure pin multiplexing to bring these signals out.
@@ -258,12 +277,29 @@ static void __init at91cap9_register_clocks(void)
for (i = 0; i < ARRAY_SIZE(periph_clocks); i++)
clk_register(periph_clocks[i]);
+ clkdev_add_table(periph_clocks_lookups,
+ ARRAY_SIZE(periph_clocks_lookups));
+ clkdev_add_table(usart_clocks_lookups,
+ ARRAY_SIZE(usart_clocks_lookups));
+
clk_register(&pck0);
clk_register(&pck1);
clk_register(&pck2);
clk_register(&pck3);
}
+static struct clk_lookup console_clock_lookup;
+
+void __init at91cap9_set_console_clock(int id)
+{
+ if (id >= ARRAY_SIZE(usart_clocks_lookups))
+ return;
+
+ console_clock_lookup.con_id = "usart";
+ console_clock_lookup.clk = usart_clocks_lookups[id].clk;
+ clkdev_add(&console_clock_lookup);
+}
+
/* --------------------------------------------------------------------
* GPIO
* -------------------------------------------------------------------- */
@@ -303,11 +339,14 @@ static void at91cap9_poweroff(void)
* AT91CAP9 processor initialization
* -------------------------------------------------------------------- */
-void __init at91cap9_initialize(unsigned long main_clock)
+void __init at91cap9_map_io(void)
{
/* Map peripherals */
iotable_init(at91cap9_io_desc, ARRAY_SIZE(at91cap9_io_desc));
+}
+void __init at91cap9_initialize(unsigned long main_clock)
+{
at91_arch_reset = at91cap9_reset;
pm_power_off = at91cap9_poweroff;
at91_extern_irq = (1 << AT91CAP9_ID_IRQ0) | (1 << AT91CAP9_ID_IRQ1);
diff --git a/arch/arm/mach-at91/at91cap9_devices.c b/arch/arm/mach-at91/at91cap9_devices.c
index 21020ceb2f3a..cd850ed6f335 100644
--- a/arch/arm/mach-at91/at91cap9_devices.c
+++ b/arch/arm/mach-at91/at91cap9_devices.c
@@ -181,10 +181,6 @@ void __init at91_add_device_usba(struct usba_platform_data *data)
/* Pullup pin is handled internally by USB device peripheral */
- /* Clocks */
- at91_clock_associate("utmi_clk", &at91_usba_udc_device.dev, "hclk");
- at91_clock_associate("udphs_clk", &at91_usba_udc_device.dev, "pclk");
-
platform_device_register(&at91_usba_udc_device);
}
#else
@@ -355,7 +351,6 @@ void __init at91_add_device_mmc(short mmc_id, struct at91_mmc_data *data)
}
mmc0_data = *data;
- at91_clock_associate("mci0_clk", &at91cap9_mmc0_device.dev, "mci_clk");
platform_device_register(&at91cap9_mmc0_device);
} else { /* MCI1 */
/* CLK */
@@ -373,7 +368,6 @@ void __init at91_add_device_mmc(short mmc_id, struct at91_mmc_data *data)
}
mmc1_data = *data;
- at91_clock_associate("mci1_clk", &at91cap9_mmc1_device.dev, "mci_clk");
platform_device_register(&at91cap9_mmc1_device);
}
}
@@ -614,7 +608,6 @@ void __init at91_add_device_spi(struct spi_board_info *devices, int nr_devices)
at91_set_B_periph(AT91_PIN_PA1, 0); /* SPI0_MOSI */
at91_set_B_periph(AT91_PIN_PA2, 0); /* SPI0_SPCK */
- at91_clock_associate("spi0_clk", &at91cap9_spi0_device.dev, "spi_clk");
platform_device_register(&at91cap9_spi0_device);
}
if (enable_spi1) {
@@ -622,7 +615,6 @@ void __init at91_add_device_spi(struct spi_board_info *devices, int nr_devices)
at91_set_A_periph(AT91_PIN_PB13, 0); /* SPI1_MOSI */
at91_set_A_periph(AT91_PIN_PB14, 0); /* SPI1_SPCK */
- at91_clock_associate("spi1_clk", &at91cap9_spi1_device.dev, "spi_clk");
platform_device_register(&at91cap9_spi1_device);
}
}
@@ -659,8 +651,6 @@ static struct platform_device at91cap9_tcb_device = {
static void __init at91_add_device_tc(void)
{
- /* this chip has one clock and irq for all three TC channels */
- at91_clock_associate("tcb_clk", &at91cap9_tcb_device.dev, "t0_clk");
platform_device_register(&at91cap9_tcb_device);
}
#else
@@ -1001,12 +991,10 @@ void __init at91_add_device_ssc(unsigned id, unsigned pins)
case AT91CAP9_ID_SSC0:
pdev = &at91cap9_ssc0_device;
configure_ssc0_pins(pins);
- at91_clock_associate("ssc0_clk", &pdev->dev, "ssc");
break;
case AT91CAP9_ID_SSC1:
pdev = &at91cap9_ssc1_device;
configure_ssc1_pins(pins);
- at91_clock_associate("ssc1_clk", &pdev->dev, "ssc");
break;
default:
return;
@@ -1199,32 +1187,30 @@ struct platform_device *atmel_default_console_device; /* the serial console devi
void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins)
{
struct platform_device *pdev;
+ struct atmel_uart_data *pdata;
switch (id) {
case 0: /* DBGU */
pdev = &at91cap9_dbgu_device;
configure_dbgu_pins();
- at91_clock_associate("mck", &pdev->dev, "usart");
break;
case AT91CAP9_ID_US0:
pdev = &at91cap9_uart0_device;
configure_usart0_pins(pins);
- at91_clock_associate("usart0_clk", &pdev->dev, "usart");
break;
case AT91CAP9_ID_US1:
pdev = &at91cap9_uart1_device;
configure_usart1_pins(pins);
- at91_clock_associate("usart1_clk", &pdev->dev, "usart");
break;
case AT91CAP9_ID_US2:
pdev = &at91cap9_uart2_device;
configure_usart2_pins(pins);
- at91_clock_associate("usart2_clk", &pdev->dev, "usart");
break;
default:
return;
}
- pdev->id = portnr; /* update to mapped ID */
+ pdata = pdev->dev.platform_data;
+ pdata->num = portnr; /* update to mapped ID */
if (portnr < ATMEL_MAX_UART)
at91_uarts[portnr] = pdev;
@@ -1232,8 +1218,10 @@ void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins)
void __init at91_set_serial_console(unsigned portnr)
{
- if (portnr < ATMEL_MAX_UART)
+ if (portnr < ATMEL_MAX_UART) {
atmel_default_console_device = at91_uarts[portnr];
+ at91cap9_set_console_clock(portnr);
+ }
}
void __init at91_add_device_serial(void)
diff --git a/arch/arm/mach-at91/at91rm9200.c b/arch/arm/mach-at91/at91rm9200.c
index 2e9ecad97f3d..b228ce9e21a1 100644
--- a/arch/arm/mach-at91/at91rm9200.c
+++ b/arch/arm/mach-at91/at91rm9200.c
@@ -18,6 +18,7 @@
#include <mach/at91rm9200.h>
#include <mach/at91_pmc.h>
#include <mach/at91_st.h>
+#include <mach/cpu.h>
#include "generic.h"
#include "clock.h"
@@ -191,6 +192,26 @@ static struct clk *periph_clocks[] __initdata = {
// irq0 .. irq6
};
+static struct clk_lookup periph_clocks_lookups[] = {
+ CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.0", &tc0_clk),
+ CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.0", &tc1_clk),
+ CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.0", &tc2_clk),
+ CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.1", &tc3_clk),
+ CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.1", &tc4_clk),
+ CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.1", &tc5_clk),
+ CLKDEV_CON_DEV_ID("ssc", "ssc.0", &ssc0_clk),
+ CLKDEV_CON_DEV_ID("ssc", "ssc.1", &ssc1_clk),
+ CLKDEV_CON_DEV_ID("ssc", "ssc.2", &ssc2_clk),
+};
+
+static struct clk_lookup usart_clocks_lookups[] = {
+ CLKDEV_CON_DEV_ID("usart", "atmel_usart.0", &mck),
+ CLKDEV_CON_DEV_ID("usart", "atmel_usart.1", &usart0_clk),
+ CLKDEV_CON_DEV_ID("usart", "atmel_usart.2", &usart1_clk),
+ CLKDEV_CON_DEV_ID("usart", "atmel_usart.3", &usart2_clk),
+ CLKDEV_CON_DEV_ID("usart", "atmel_usart.4", &usart3_clk),
+};
+
/*
* The four programmable clocks.
* You must configure pin multiplexing to bring these signals out.
@@ -227,12 +248,29 @@ static void __init at91rm9200_register_clocks(void)
for (i = 0; i < ARRAY_SIZE(periph_clocks); i++)
clk_register(periph_clocks[i]);
+ clkdev_add_table(periph_clocks_lookups,
+ ARRAY_SIZE(periph_clocks_lookups));
+ clkdev_add_table(usart_clocks_lookups,
+ ARRAY_SIZE(usart_clocks_lookups));
+
clk_register(&pck0);
clk_register(&pck1);
clk_register(&pck2);
clk_register(&pck3);
}
+static struct clk_lookup console_clock_lookup;
+
+void __init at91rm9200_set_console_clock(int id)
+{
+ if (id >= ARRAY_SIZE(usart_clocks_lookups))
+ return;
+
+ console_clock_lookup.con_id = "usart";
+ console_clock_lookup.clk = usart_clocks_lookups[id].clk;
+ clkdev_add(&console_clock_lookup);
+}
+
/* --------------------------------------------------------------------
* GPIO
* -------------------------------------------------------------------- */
@@ -266,15 +304,25 @@ static void at91rm9200_reset(void)
at91_sys_write(AT91_ST_CR, AT91_ST_WDRST);
}
+int rm9200_type;
+EXPORT_SYMBOL(rm9200_type);
+
+void __init at91rm9200_set_type(int type)
+{
+ rm9200_type = type;
+}
/* --------------------------------------------------------------------
* AT91RM9200 processor initialization
* -------------------------------------------------------------------- */
-void __init at91rm9200_initialize(unsigned long main_clock, unsigned short banks)
+void __init at91rm9200_map_io(void)
{
/* Map peripherals */
iotable_init(at91rm9200_io_desc, ARRAY_SIZE(at91rm9200_io_desc));
+}
+void __init at91rm9200_initialize(unsigned long main_clock)
+{
at91_arch_reset = at91rm9200_reset;
at91_extern_irq = (1 << AT91RM9200_ID_IRQ0) | (1 << AT91RM9200_ID_IRQ1)
| (1 << AT91RM9200_ID_IRQ2) | (1 << AT91RM9200_ID_IRQ3)
@@ -288,7 +336,8 @@ void __init at91rm9200_initialize(unsigned long main_clock, unsigned short banks
at91rm9200_register_clocks();
/* Initialize GPIO subsystem */
- at91_gpio_init(at91rm9200_gpio, banks);
+ at91_gpio_init(at91rm9200_gpio,
+ cpu_is_at91rm9200_bga() ? AT91RM9200_BGA : AT91RM9200_PQFP);
}
diff --git a/arch/arm/mach-at91/at91rm9200_devices.c b/arch/arm/mach-at91/at91rm9200_devices.c
index 7b539228e0ef..a0ba475be04c 100644
--- a/arch/arm/mach-at91/at91rm9200_devices.c
+++ b/arch/arm/mach-at91/at91rm9200_devices.c
@@ -644,15 +644,7 @@ static struct platform_device at91rm9200_tcb1_device = {
static void __init at91_add_device_tc(void)
{
- /* this chip has a separate clock and irq for each TC channel */
- at91_clock_associate("tc0_clk", &at91rm9200_tcb0_device.dev, "t0_clk");
- at91_clock_associate("tc1_clk", &at91rm9200_tcb0_device.dev, "t1_clk");
- at91_clock_associate("tc2_clk", &at91rm9200_tcb0_device.dev, "t2_clk");
platform_device_register(&at91rm9200_tcb0_device);
-
- at91_clock_associate("tc3_clk", &at91rm9200_tcb1_device.dev, "t0_clk");
- at91_clock_associate("tc4_clk", &at91rm9200_tcb1_device.dev, "t1_clk");
- at91_clock_associate("tc5_clk", &at91rm9200_tcb1_device.dev, "t2_clk");
platform_device_register(&at91rm9200_tcb1_device);
}
#else
@@ -849,17 +841,14 @@ void __init at91_add_device_ssc(unsigned id, unsigned pins)
case AT91RM9200_ID_SSC0:
pdev = &at91rm9200_ssc0_device;
configure_ssc0_pins(pins);
- at91_clock_associate("ssc0_clk", &pdev->dev, "ssc");
break;
case AT91RM9200_ID_SSC1:
pdev = &at91rm9200_ssc1_device;
configure_ssc1_pins(pins);
- at91_clock_associate("ssc1_clk", &pdev->dev, "ssc");
break;
case AT91RM9200_ID_SSC2:
pdev = &at91rm9200_ssc2_device;
configure_ssc2_pins(pins);
- at91_clock_associate("ssc2_clk", &pdev->dev, "ssc");
break;
default:
return;
@@ -1109,37 +1098,34 @@ struct platform_device *atmel_default_console_device; /* the serial console devi
void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins)
{
struct platform_device *pdev;
+ struct atmel_uart_data *pdata;
switch (id) {
case 0: /* DBGU */
pdev = &at91rm9200_dbgu_device;
configure_dbgu_pins();
- at91_clock_associate("mck", &pdev->dev, "usart");
break;
case AT91RM9200_ID_US0:
pdev = &at91rm9200_uart0_device;
configure_usart0_pins(pins);
- at91_clock_associate("usart0_clk", &pdev->dev, "usart");
break;
case AT91RM9200_ID_US1:
pdev = &at91rm9200_uart1_device;
configure_usart1_pins(pins);
- at91_clock_associate("usart1_clk", &pdev->dev, "usart");
break;
case AT91RM9200_ID_US2:
pdev = &at91rm9200_uart2_device;
configure_usart2_pins(pins);
- at91_clock_associate("usart2_clk", &pdev->dev, "usart");
break;
case AT91RM9200_ID_US3:
pdev = &at91rm9200_uart3_device;
configure_usart3_pins(pins);
- at91_clock_associate("usart3_clk", &pdev->dev, "usart");
break;
default:
return;
}
- pdev->id = portnr; /* update to mapped ID */
+ pdata = pdev->dev.platform_data;
+ pdata->num = portnr; /* update to mapped ID */
if (portnr < ATMEL_MAX_UART)
at91_uarts[portnr] = pdev;
@@ -1147,8 +1133,10 @@ void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins)
void __init at91_set_serial_console(unsigned portnr)
{
- if (portnr < ATMEL_MAX_UART)
+ if (portnr < ATMEL_MAX_UART) {
atmel_default_console_device = at91_uarts[portnr];
+ at91rm9200_set_console_clock(portnr);
+ }
}
void __init at91_add_device_serial(void)
diff --git a/arch/arm/mach-at91/at91sam9260.c b/arch/arm/mach-at91/at91sam9260.c
index 195208b30024..7d606b04d313 100644
--- a/arch/arm/mach-at91/at91sam9260.c
+++ b/arch/arm/mach-at91/at91sam9260.c
@@ -231,6 +231,28 @@ static struct clk *periph_clocks[] __initdata = {
// irq0 .. irq2
};
+static struct clk_lookup periph_clocks_lookups[] = {
+ CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.0", &spi0_clk),
+ CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.1", &spi1_clk),
+ CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.0", &tc0_clk),
+ CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.0", &tc1_clk),
+ CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.0", &tc2_clk),
+ CLKDEV_CON_DEV_ID("t3_clk", "atmel_tcb.1", &tc3_clk),
+ CLKDEV_CON_DEV_ID("t4_clk", "atmel_tcb.1", &tc4_clk),
+ CLKDEV_CON_DEV_ID("t5_clk", "atmel_tcb.1", &tc5_clk),
+ CLKDEV_CON_DEV_ID("pclk", "ssc.0", &ssc_clk),
+};
+
+static struct clk_lookup usart_clocks_lookups[] = {
+ CLKDEV_CON_DEV_ID("usart", "atmel_usart.0", &mck),
+ CLKDEV_CON_DEV_ID("usart", "atmel_usart.1", &usart0_clk),
+ CLKDEV_CON_DEV_ID("usart", "atmel_usart.2", &usart1_clk),
+ CLKDEV_CON_DEV_ID("usart", "atmel_usart.3", &usart2_clk),
+ CLKDEV_CON_DEV_ID("usart", "atmel_usart.4", &usart3_clk),
+ CLKDEV_CON_DEV_ID("usart", "atmel_usart.5", &usart4_clk),
+ CLKDEV_CON_DEV_ID("usart", "atmel_usart.6", &usart5_clk),
+};
+
/*
* The two programmable clocks.
* You must configure pin multiplexing to bring these signals out.
@@ -255,10 +277,27 @@ static void __init at91sam9260_register_clocks(void)
for (i = 0; i < ARRAY_SIZE(periph_clocks); i++)
clk_register(periph_clocks[i]);
+ clkdev_add_table(periph_clocks_lookups,
+ ARRAY_SIZE(periph_clocks_lookups));
+ clkdev_add_table(usart_clocks_lookups,
+ ARRAY_SIZE(usart_clocks_lookups));
+
clk_register(&pck0);
clk_register(&pck1);
}
+static struct clk_lookup console_clock_lookup;
+
+void __init at91sam9260_set_console_clock(int id)
+{
+ if (id >= ARRAY_SIZE(usart_clocks_lookups))
+ return;
+
+ console_clock_lookup.con_id = "usart";
+ console_clock_lookup.clk = usart_clocks_lookups[id].clk;
+ clkdev_add(&console_clock_lookup);
+}
+
/* --------------------------------------------------------------------
* GPIO
* -------------------------------------------------------------------- */
@@ -289,7 +328,7 @@ static void at91sam9260_poweroff(void)
* AT91SAM9260 processor initialization
* -------------------------------------------------------------------- */
-static void __init at91sam9xe_initialize(void)
+static void __init at91sam9xe_map_io(void)
{
unsigned long cidr, sram_size;
@@ -310,18 +349,21 @@ static void __init at91sam9xe_initialize(void)
iotable_init(at91sam9xe_sram_desc, ARRAY_SIZE(at91sam9xe_sram_desc));
}
-void __init at91sam9260_initialize(unsigned long main_clock)
+void __init at91sam9260_map_io(void)
{
/* Map peripherals */
iotable_init(at91sam9260_io_desc, ARRAY_SIZE(at91sam9260_io_desc));
if (cpu_is_at91sam9xe())
- at91sam9xe_initialize();
+ at91sam9xe_map_io();
else if (cpu_is_at91sam9g20())
iotable_init(at91sam9g20_sram_desc, ARRAY_SIZE(at91sam9g20_sram_desc));
else
iotable_init(at91sam9260_sram_desc, ARRAY_SIZE(at91sam9260_sram_desc));
+}
+void __init at91sam9260_initialize(unsigned long main_clock)
+{
at91_arch_reset = at91sam9_alt_reset;
pm_power_off = at91sam9260_poweroff;
at91_extern_irq = (1 << AT91SAM9260_ID_IRQ0) | (1 << AT91SAM9260_ID_IRQ1)
diff --git a/arch/arm/mach-at91/at91sam9260_devices.c b/arch/arm/mach-at91/at91sam9260_devices.c
index 07eb7b07e442..1fdeb9058a76 100644
--- a/arch/arm/mach-at91/at91sam9260_devices.c
+++ b/arch/arm/mach-at91/at91sam9260_devices.c
@@ -609,7 +609,6 @@ void __init at91_add_device_spi(struct spi_board_info *devices, int nr_devices)
at91_set_A_periph(AT91_PIN_PA1, 0); /* SPI0_MOSI */
at91_set_A_periph(AT91_PIN_PA2, 0); /* SPI1_SPCK */
- at91_clock_associate("spi0_clk", &at91sam9260_spi0_device.dev, "spi_clk");
platform_device_register(&at91sam9260_spi0_device);
}
if (enable_spi1) {
@@ -617,7 +616,6 @@ void __init at91_add_device_spi(struct spi_board_info *devices, int nr_devices)
at91_set_A_periph(AT91_PIN_PB1, 0); /* SPI1_MOSI */
at91_set_A_periph(AT91_PIN_PB2, 0); /* SPI1_SPCK */
- at91_clock_associate("spi1_clk", &at91sam9260_spi1_device.dev, "spi_clk");
platform_device_register(&at91sam9260_spi1_device);
}
}
@@ -694,15 +692,7 @@ static struct platform_device at91sam9260_tcb1_device = {
static void __init at91_add_device_tc(void)
{
- /* this chip has a separate clock and irq for each TC channel */
- at91_clock_associate("tc0_clk", &at91sam9260_tcb0_device.dev, "t0_clk");
- at91_clock_associate("tc1_clk", &at91sam9260_tcb0_device.dev, "t1_clk");
- at91_clock_associate("tc2_clk", &at91sam9260_tcb0_device.dev, "t2_clk");
platform_device_register(&at91sam9260_tcb0_device);
-
- at91_clock_associate("tc3_clk", &at91sam9260_tcb1_device.dev, "t0_clk");
- at91_clock_associate("tc4_clk", &at91sam9260_tcb1_device.dev, "t1_clk");
- at91_clock_associate("tc5_clk", &at91sam9260_tcb1_device.dev, "t2_clk");
platform_device_register(&at91sam9260_tcb1_device);
}
#else
@@ -820,7 +810,6 @@ void __init at91_add_device_ssc(unsigned id, unsigned pins)
case AT91SAM9260_ID_SSC:
pdev = &at91sam9260_ssc_device;
configure_ssc_pins(pins);
- at91_clock_associate("ssc_clk", &pdev->dev, "pclk");
break;
default:
return;
@@ -1139,47 +1128,42 @@ struct platform_device *atmel_default_console_device; /* the serial console devi
void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins)
{
struct platform_device *pdev;
+ struct atmel_uart_data *pdata;
switch (id) {
case 0: /* DBGU */
pdev = &at91sam9260_dbgu_device;
configure_dbgu_pins();
- at91_clock_associate("mck", &pdev->dev, "usart");
break;
case AT91SAM9260_ID_US0:
pdev = &at91sam9260_uart0_device;
configure_usart0_pins(pins);
- at91_clock_associate("usart0_clk", &pdev->dev, "usart");
break;
case AT91SAM9260_ID_US1:
pdev = &at91sam9260_uart1_device;
configure_usart1_pins(pins);
- at91_clock_associate("usart1_clk", &pdev->dev, "usart");
break;
case AT91SAM9260_ID_US2:
pdev = &at91sam9260_uart2_device;
configure_usart2_pins(pins);
- at91_clock_associate("usart2_clk", &pdev->dev, "usart");
break;
case AT91SAM9260_ID_US3:
pdev = &at91sam9260_uart3_device;
configure_usart3_pins(pins);
- at91_clock_associate("usart3_clk", &pdev->dev, "usart");
break;
case AT91SAM9260_ID_US4:
pdev = &at91sam9260_uart4_device;
configure_usart4_pins();
- at91_clock_associate("usart4_clk", &pdev->dev, "usart");
break;
case AT91SAM9260_ID_US5:
pdev = &at91sam9260_uart5_device;
configure_usart5_pins();
- at91_clock_associate("usart5_clk", &pdev->dev, "usart");
break;
default:
return;
}
- pdev->id = portnr; /* update to mapped ID */
+ pdata = pdev->dev.platform_data;
+ pdata->num = portnr; /* update to mapped ID */
if (portnr < ATMEL_MAX_UART)
at91_uarts[portnr] = pdev;
@@ -1187,8 +1171,10 @@ void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins)
void __init at91_set_serial_console(unsigned portnr)
{
- if (portnr < ATMEL_MAX_UART)
+ if (portnr < ATMEL_MAX_UART) {
atmel_default_console_device = at91_uarts[portnr];
+ at91sam9260_set_console_clock(portnr);
+ }
}
void __init at91_add_device_serial(void)
diff --git a/arch/arm/mach-at91/at91sam9261.c b/arch/arm/mach-at91/at91sam9261.c
index fcad88668504..c1483168c97a 100644
--- a/arch/arm/mach-at91/at91sam9261.c
+++ b/arch/arm/mach-at91/at91sam9261.c
@@ -178,6 +178,24 @@ static struct clk *periph_clocks[] __initdata = {
// irq0 .. irq2
};
+static struct clk_lookup periph_clocks_lookups[] = {
+ CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.0", &spi0_clk),
+ CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.1", &spi1_clk),
+ CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.0", &tc0_clk),
+ CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.0", &tc1_clk),
+ CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.0", &tc1_clk),
+ CLKDEV_CON_DEV_ID("pclk", "ssc.0", &ssc0_clk),
+ CLKDEV_CON_DEV_ID("pclk", "ssc.1", &ssc1_clk),
+ CLKDEV_CON_DEV_ID("pclk", "ssc.2", &ssc2_clk),
+};
+
+static struct clk_lookup usart_clocks_lookups[] = {
+ CLKDEV_CON_DEV_ID("usart", "atmel_usart.0", &mck),
+ CLKDEV_CON_DEV_ID("usart", "atmel_usart.1", &usart0_clk),
+ CLKDEV_CON_DEV_ID("usart", "atmel_usart.2", &usart1_clk),
+ CLKDEV_CON_DEV_ID("usart", "atmel_usart.3", &usart2_clk),
+};
+
/*
* The four programmable clocks.
* You must configure pin multiplexing to bring these signals out.
@@ -228,6 +246,11 @@ static void __init at91sam9261_register_clocks(void)
for (i = 0; i < ARRAY_SIZE(periph_clocks); i++)
clk_register(periph_clocks[i]);
+ clkdev_add_table(periph_clocks_lookups,
+ ARRAY_SIZE(periph_clocks_lookups));
+ clkdev_add_table(usart_clocks_lookups,
+ ARRAY_SIZE(usart_clocks_lookups));
+
clk_register(&pck0);
clk_register(&pck1);
clk_register(&pck2);
@@ -237,6 +260,18 @@ static void __init at91sam9261_register_clocks(void)
clk_register(&hck1);
}
+static struct clk_lookup console_clock_lookup;
+
+void __init at91sam9261_set_console_clock(int id)
+{
+ if (id >= ARRAY_SIZE(usart_clocks_lookups))
+ return;
+
+ console_clock_lookup.con_id = "usart";
+ console_clock_lookup.clk = usart_clocks_lookups[id].clk;
+ clkdev_add(&console_clock_lookup);
+}
+
/* --------------------------------------------------------------------
* GPIO
* -------------------------------------------------------------------- */
@@ -267,7 +302,7 @@ static void at91sam9261_poweroff(void)
* AT91SAM9261 processor initialization
* -------------------------------------------------------------------- */
-void __init at91sam9261_initialize(unsigned long main_clock)
+void __init at91sam9261_map_io(void)
{
/* Map peripherals */
iotable_init(at91sam9261_io_desc, ARRAY_SIZE(at91sam9261_io_desc));
@@ -276,8 +311,10 @@ void __init at91sam9261_initialize(unsigned long main_clock)
iotable_init(at91sam9g10_sram_desc, ARRAY_SIZE(at91sam9g10_sram_desc));
else
iotable_init(at91sam9261_sram_desc, ARRAY_SIZE(at91sam9261_sram_desc));
+}
-
+void __init at91sam9261_initialize(unsigned long main_clock)
+{
at91_arch_reset = at91sam9_alt_reset;
pm_power_off = at91sam9261_poweroff;
at91_extern_irq = (1 << AT91SAM9261_ID_IRQ0) | (1 << AT91SAM9261_ID_IRQ1)
diff --git a/arch/arm/mach-at91/at91sam9261_devices.c b/arch/arm/mach-at91/at91sam9261_devices.c
index 59fc48311fb0..3eb4538fceeb 100644
--- a/arch/arm/mach-at91/at91sam9261_devices.c
+++ b/arch/arm/mach-at91/at91sam9261_devices.c
@@ -426,7 +426,6 @@ void __init at91_add_device_spi(struct spi_board_info *devices, int nr_devices)
at91_set_A_periph(AT91_PIN_PA1, 0); /* SPI0_MOSI */
at91_set_A_periph(AT91_PIN_PA2, 0); /* SPI0_SPCK */
- at91_clock_associate("spi0_clk", &at91sam9261_spi0_device.dev, "spi_clk");
platform_device_register(&at91sam9261_spi0_device);
}
if (enable_spi1) {
@@ -434,7 +433,6 @@ void __init at91_add_device_spi(struct spi_board_info *devices, int nr_devices)
at91_set_A_periph(AT91_PIN_PB31, 0); /* SPI1_MOSI */
at91_set_A_periph(AT91_PIN_PB29, 0); /* SPI1_SPCK */
- at91_clock_associate("spi1_clk", &at91sam9261_spi1_device.dev, "spi_clk");
platform_device_register(&at91sam9261_spi1_device);
}
}
@@ -581,10 +579,6 @@ static struct platform_device at91sam9261_tcb_device = {
static void __init at91_add_device_tc(void)
{
- /* this chip has a separate clock and irq for each TC channel */
- at91_clock_associate("tc0_clk", &at91sam9261_tcb_device.dev, "t0_clk");
- at91_clock_associate("tc1_clk", &at91sam9261_tcb_device.dev, "t1_clk");
- at91_clock_associate("tc2_clk", &at91sam9261_tcb_device.dev, "t2_clk");
platform_device_register(&at91sam9261_tcb_device);
}
#else
@@ -786,17 +780,14 @@ void __init at91_add_device_ssc(unsigned id, unsigned pins)
case AT91SAM9261_ID_SSC0:
pdev = &at91sam9261_ssc0_device;
configure_ssc0_pins(pins);
- at91_clock_associate("ssc0_clk", &pdev->dev, "pclk");
break;
case AT91SAM9261_ID_SSC1:
pdev = &at91sam9261_ssc1_device;
configure_ssc1_pins(pins);
- at91_clock_associate("ssc1_clk", &pdev->dev, "pclk");
break;
case AT91SAM9261_ID_SSC2:
pdev = &at91sam9261_ssc2_device;
configure_ssc2_pins(pins);
- at91_clock_associate("ssc2_clk", &pdev->dev, "pclk");
break;
default:
return;
@@ -989,32 +980,30 @@ struct platform_device *atmel_default_console_device; /* the serial console devi
void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins)
{
struct platform_device *pdev;
+ struct atmel_uart_data *pdata;
switch (id) {
case 0: /* DBGU */
pdev = &at91sam9261_dbgu_device;
configure_dbgu_pins();
- at91_clock_associate("mck", &pdev->dev, "usart");
break;
case AT91SAM9261_ID_US0:
pdev = &at91sam9261_uart0_device;
configure_usart0_pins(pins);
- at91_clock_associate("usart0_clk", &pdev->dev, "usart");
break;
case AT91SAM9261_ID_US1:
pdev = &at91sam9261_uart1_device;
configure_usart1_pins(pins);
- at91_clock_associate("usart1_clk", &pdev->dev, "usart");
break;
case AT91SAM9261_ID_US2:
pdev = &at91sam9261_uart2_device;
configure_usart2_pins(pins);
- at91_clock_associate("usart2_clk", &pdev->dev, "usart");
break;
default:
return;
}
- pdev->id = portnr; /* update to mapped ID */
+ pdata = pdev->dev.platform_data;
+ pdata->num = portnr; /* update to mapped ID */
if (portnr < ATMEL_MAX_UART)
at91_uarts[portnr] = pdev;
@@ -1022,8 +1011,10 @@ void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins)
void __init at91_set_serial_console(unsigned portnr)
{
- if (portnr < ATMEL_MAX_UART)
+ if (portnr < ATMEL_MAX_UART) {
atmel_default_console_device = at91_uarts[portnr];
+ at91sam9261_set_console_clock(portnr);
+ }
}
void __init at91_add_device_serial(void)
diff --git a/arch/arm/mach-at91/at91sam9263.c b/arch/arm/mach-at91/at91sam9263.c
index 249f900954d8..dc28477d14ff 100644
--- a/arch/arm/mach-at91/at91sam9263.c
+++ b/arch/arm/mach-at91/at91sam9263.c
@@ -199,6 +199,23 @@ static struct clk *periph_clocks[] __initdata = {
// irq0 .. irq1
};
+static struct clk_lookup periph_clocks_lookups[] = {
+ CLKDEV_CON_DEV_ID("pclk", "ssc.0", &ssc0_clk),
+ CLKDEV_CON_DEV_ID("pclk", "ssc.1", &ssc1_clk),
+ CLKDEV_CON_DEV_ID("mci_clk", "at91_mci.0", &mmc0_clk),
+ CLKDEV_CON_DEV_ID("mci_clk", "at91_mci.1", &mmc1_clk),
+ CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.0", &spi0_clk),
+ CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.1", &spi1_clk),
+ CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.0", &tcb_clk),
+};
+
+static struct clk_lookup usart_clocks_lookups[] = {
+ CLKDEV_CON_DEV_ID("usart", "atmel_usart.0", &mck),
+ CLKDEV_CON_DEV_ID("usart", "atmel_usart.1", &usart0_clk),
+ CLKDEV_CON_DEV_ID("usart", "atmel_usart.2", &usart1_clk),
+ CLKDEV_CON_DEV_ID("usart", "atmel_usart.3", &usart2_clk),
+};
+
/*
* The four programmable clocks.
* You must configure pin multiplexing to bring these signals out.
@@ -235,12 +252,29 @@ static void __init at91sam9263_register_clocks(void)
for (i = 0; i < ARRAY_SIZE(periph_clocks); i++)
clk_register(periph_clocks[i]);
+ clkdev_add_table(periph_clocks_lookups,
+ ARRAY_SIZE(periph_clocks_lookups));
+ clkdev_add_table(usart_clocks_lookups,
+ ARRAY_SIZE(usart_clocks_lookups));
+
clk_register(&pck0);
clk_register(&pck1);
clk_register(&pck2);
clk_register(&pck3);
}
+static struct clk_lookup console_clock_lookup;
+
+void __init at91sam9263_set_console_clock(int id)
+{
+ if (id >= ARRAY_SIZE(usart_clocks_lookups))
+ return;
+
+ console_clock_lookup.con_id = "usart";
+ console_clock_lookup.clk = usart_clocks_lookups[id].clk;
+ clkdev_add(&console_clock_lookup);
+}
+
/* --------------------------------------------------------------------
* GPIO
* -------------------------------------------------------------------- */
@@ -279,11 +313,14 @@ static void at91sam9263_poweroff(void)
* AT91SAM9263 processor initialization
* -------------------------------------------------------------------- */
-void __init at91sam9263_initialize(unsigned long main_clock)
+void __init at91sam9263_map_io(void)
{
/* Map peripherals */
iotable_init(at91sam9263_io_desc, ARRAY_SIZE(at91sam9263_io_desc));
+}
+void __init at91sam9263_initialize(unsigned long main_clock)
+{
at91_arch_reset = at91sam9_alt_reset;
pm_power_off = at91sam9263_poweroff;
at91_extern_irq = (1 << AT91SAM9263_ID_IRQ0) | (1 << AT91SAM9263_ID_IRQ1);
diff --git a/arch/arm/mach-at91/at91sam9263_devices.c b/arch/arm/mach-at91/at91sam9263_devices.c
index fb5c23af1017..ffe081b77ed0 100644
--- a/arch/arm/mach-at91/at91sam9263_devices.c
+++ b/arch/arm/mach-at91/at91sam9263_devices.c
@@ -308,7 +308,6 @@ void __init at91_add_device_mmc(short mmc_id, struct at91_mmc_data *data)
}
mmc0_data = *data;
- at91_clock_associate("mci0_clk", &at91sam9263_mmc0_device.dev, "mci_clk");
platform_device_register(&at91sam9263_mmc0_device);
} else { /* MCI1 */
/* CLK */
@@ -339,7 +338,6 @@ void __init at91_add_device_mmc(short mmc_id, struct at91_mmc_data *data)
}
mmc1_data = *data;
- at91_clock_associate("mci1_clk", &at91sam9263_mmc1_device.dev, "mci_clk");
platform_device_register(&at91sam9263_mmc1_device);
}
}
@@ -686,7 +684,6 @@ void __init at91_add_device_spi(struct spi_board_info *devices, int nr_devices)
at91_set_B_periph(AT91_PIN_PA1, 0); /* SPI0_MOSI */
at91_set_B_periph(AT91_PIN_PA2, 0); /* SPI0_SPCK */
- at91_clock_associate("spi0_clk", &at91sam9263_spi0_device.dev, "spi_clk");
platform_device_register(&at91sam9263_spi0_device);
}
if (enable_spi1) {
@@ -694,7 +691,6 @@ void __init at91_add_device_spi(struct spi_board_info *devices, int nr_devices)
at91_set_A_periph(AT91_PIN_PB13, 0); /* SPI1_MOSI */
at91_set_A_periph(AT91_PIN_PB14, 0); /* SPI1_SPCK */
- at91_clock_associate("spi1_clk", &at91sam9263_spi1_device.dev, "spi_clk");
platform_device_register(&at91sam9263_spi1_device);
}
}
@@ -941,8 +937,6 @@ static struct platform_device at91sam9263_tcb_device = {
static void __init at91_add_device_tc(void)
{
- /* this chip has one clock and irq for all three TC channels */
- at91_clock_associate("tcb_clk", &at91sam9263_tcb_device.dev, "t0_clk");
platform_device_register(&at91sam9263_tcb_device);
}
#else
@@ -1171,12 +1165,10 @@ void __init at91_add_device_ssc(unsigned id, unsigned pins)
case AT91SAM9263_ID_SSC0:
pdev = &at91sam9263_ssc0_device;
configure_ssc0_pins(pins);
- at91_clock_associate("ssc0_clk", &pdev->dev, "pclk");
break;
case AT91SAM9263_ID_SSC1:
pdev = &at91sam9263_ssc1_device;
configure_ssc1_pins(pins);
- at91_clock_associate("ssc1_clk", &pdev->dev, "pclk");
break;
default:
return;
@@ -1370,32 +1362,30 @@ struct platform_device *atmel_default_console_device; /* the serial console devi
void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins)
{
struct platform_device *pdev;
+ struct atmel_uart_data *pdata;
switch (id) {
case 0: /* DBGU */
pdev = &at91sam9263_dbgu_device;
configure_dbgu_pins();
- at91_clock_associate("mck", &pdev->dev, "usart");
break;
case AT91SAM9263_ID_US0:
pdev = &at91sam9263_uart0_device;
configure_usart0_pins(pins);
- at91_clock_associate("usart0_clk", &pdev->dev, "usart");
break;
case AT91SAM9263_ID_US1:
pdev = &at91sam9263_uart1_device;
configure_usart1_pins(pins);
- at91_clock_associate("usart1_clk", &pdev->dev, "usart");
break;
case AT91SAM9263_ID_US2:
pdev = &at91sam9263_uart2_device;
configure_usart2_pins(pins);
- at91_clock_associate("usart2_clk", &pdev->dev, "usart");
break;
default:
return;
}
- pdev->id = portnr; /* update to mapped ID */
+ pdata = pdev->dev.platform_data;
+ pdata->num = portnr; /* update to mapped ID */
if (portnr < ATMEL_MAX_UART)
at91_uarts[portnr] = pdev;
@@ -1403,8 +1393,10 @@ void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins)
void __init at91_set_serial_console(unsigned portnr)
{
- if (portnr < ATMEL_MAX_UART)
+ if (portnr < ATMEL_MAX_UART) {
atmel_default_console_device = at91_uarts[portnr];
+ at91sam9263_set_console_clock(portnr);
+ }
}
void __init at91_add_device_serial(void)
diff --git a/arch/arm/mach-at91/at91sam9g45.c b/arch/arm/mach-at91/at91sam9g45.c
index c67b47f1c0fd..2bb6ff9af1c7 100644
--- a/arch/arm/mach-at91/at91sam9g45.c
+++ b/arch/arm/mach-at91/at91sam9g45.c
@@ -184,22 +184,6 @@ static struct clk vdec_clk = {
.type = CLK_TYPE_PERIPHERAL,
};
-/* One additional fake clock for ohci */
-static struct clk ohci_clk = {
- .name = "ohci_clk",
- .pmc_mask = 0,
- .type = CLK_TYPE_PERIPHERAL,
- .parent = &uhphs_clk,
-};
-
-/* One additional fake clock for second TC block */
-static struct clk tcb1_clk = {
- .name = "tcb1_clk",
- .pmc_mask = 0,
- .type = CLK_TYPE_PERIPHERAL,
- .parent = &tcb0_clk,
-};
-
static struct clk *periph_clocks[] __initdata = {
&pioA_clk,
&pioB_clk,
@@ -228,8 +212,30 @@ static struct clk *periph_clocks[] __initdata = {
&udphs_clk,
&mmc1_clk,
// irq0
- &ohci_clk,
- &tcb1_clk,
+};
+
+static struct clk_lookup periph_clocks_lookups[] = {
+ /* One additional fake clock for ohci */
+ CLKDEV_CON_ID("ohci_clk", &uhphs_clk),
+ CLKDEV_CON_DEV_ID("ehci_clk", "atmel-ehci.0", &uhphs_clk),
+ CLKDEV_CON_DEV_ID("hclk", "atmel_usba_udc.0", &utmi_clk),
+ CLKDEV_CON_DEV_ID("pclk", "atmel_usba_udc.0", &udphs_clk),
+ CLKDEV_CON_DEV_ID("mci_clk", "at91_mci.0", &mmc0_clk),
+ CLKDEV_CON_DEV_ID("mci_clk", "at91_mci.1", &mmc1_clk),
+ CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.0", &spi0_clk),
+ CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.1", &spi1_clk),
+ CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.0", &tcb0_clk),
+ CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.1", &tcb0_clk),
+ CLKDEV_CON_DEV_ID("pclk", "ssc.0", &ssc0_clk),
+ CLKDEV_CON_DEV_ID("pclk", "ssc.1", &ssc1_clk),
+};
+
+static struct clk_lookup usart_clocks_lookups[] = {
+ CLKDEV_CON_DEV_ID("usart", "atmel_usart.0", &mck),
+ CLKDEV_CON_DEV_ID("usart", "atmel_usart.1", &usart0_clk),
+ CLKDEV_CON_DEV_ID("usart", "atmel_usart.2", &usart1_clk),
+ CLKDEV_CON_DEV_ID("usart", "atmel_usart.3", &usart2_clk),
+ CLKDEV_CON_DEV_ID("usart", "atmel_usart.4", &usart3_clk),
};
/*
@@ -256,6 +262,11 @@ static void __init at91sam9g45_register_clocks(void)
for (i = 0; i < ARRAY_SIZE(periph_clocks); i++)
clk_register(periph_clocks[i]);
+ clkdev_add_table(periph_clocks_lookups,
+ ARRAY_SIZE(periph_clocks_lookups));
+ clkdev_add_table(usart_clocks_lookups,
+ ARRAY_SIZE(usart_clocks_lookups));
+
if (cpu_is_at91sam9m10() || cpu_is_at91sam9m11())
clk_register(&vdec_clk);
@@ -263,6 +274,18 @@ static void __init at91sam9g45_register_clocks(void)
clk_register(&pck1);
}
+static struct clk_lookup console_clock_lookup;
+
+void __init at91sam9g45_set_console_clock(int id)
+{
+ if (id >= ARRAY_SIZE(usart_clocks_lookups))
+ return;
+
+ console_clock_lookup.con_id = "usart";
+ console_clock_lookup.clk = usart_clocks_lookups[id].clk;
+ clkdev_add(&console_clock_lookup);
+}
+
/* --------------------------------------------------------------------
* GPIO
* -------------------------------------------------------------------- */
@@ -306,11 +329,14 @@ static void at91sam9g45_poweroff(void)
* AT91SAM9G45 processor initialization
* -------------------------------------------------------------------- */
-void __init at91sam9g45_initialize(unsigned long main_clock)
+void __init at91sam9g45_map_io(void)
{
/* Map peripherals */
iotable_init(at91sam9g45_io_desc, ARRAY_SIZE(at91sam9g45_io_desc));
+}
+void __init at91sam9g45_initialize(unsigned long main_clock)
+{
at91_arch_reset = at91sam9g45_reset;
pm_power_off = at91sam9g45_poweroff;
at91_extern_irq = (1 << AT91SAM9G45_ID_IRQ0);
diff --git a/arch/arm/mach-at91/at91sam9g45_devices.c b/arch/arm/mach-at91/at91sam9g45_devices.c
index 5e9f8a4c38df..05674865bc21 100644
--- a/arch/arm/mach-at91/at91sam9g45_devices.c
+++ b/arch/arm/mach-at91/at91sam9g45_devices.c
@@ -180,7 +180,6 @@ void __init at91_add_device_usbh_ehci(struct at91_usbh_data *data)
}
usbh_ehci_data = *data;
- at91_clock_associate("uhphs_clk", &at91_usbh_ehci_device.dev, "ehci_clk");
platform_device_register(&at91_usbh_ehci_device);
}
#else
@@ -266,10 +265,6 @@ void __init at91_add_device_usba(struct usba_platform_data *data)
/* Pullup pin is handled internally by USB device peripheral */
- /* Clocks */
- at91_clock_associate("utmi_clk", &at91_usba_udc_device.dev, "hclk");
- at91_clock_associate("udphs_clk", &at91_usba_udc_device.dev, "pclk");
-
platform_device_register(&at91_usba_udc_device);
}
#else
@@ -478,7 +473,6 @@ void __init at91_add_device_mci(short mmc_id, struct mci_platform_data *data)
}
mmc0_data = *data;
- at91_clock_associate("mci0_clk", &at91sam9g45_mmc0_device.dev, "mci_clk");
platform_device_register(&at91sam9g45_mmc0_device);
} else { /* MCI1 */
@@ -504,7 +498,6 @@ void __init at91_add_device_mci(short mmc_id, struct mci_platform_data *data)
}
mmc1_data = *data;
- at91_clock_associate("mci1_clk", &at91sam9g45_mmc1_device.dev, "mci_clk");
platform_device_register(&at91sam9g45_mmc1_device);
}
@@ -801,7 +794,6 @@ void __init at91_add_device_spi(struct spi_board_info *devices, int nr_devices)
at91_set_A_periph(AT91_PIN_PB1, 0); /* SPI0_MOSI */
at91_set_A_periph(AT91_PIN_PB2, 0); /* SPI0_SPCK */
- at91_clock_associate("spi0_clk", &at91sam9g45_spi0_device.dev, "spi_clk");
platform_device_register(&at91sam9g45_spi0_device);
}
if (enable_spi1) {
@@ -809,7 +801,6 @@ void __init at91_add_device_spi(struct spi_board_info *devices, int nr_devices)
at91_set_A_periph(AT91_PIN_PB15, 0); /* SPI1_MOSI */
at91_set_A_periph(AT91_PIN_PB16, 0); /* SPI1_SPCK */
- at91_clock_associate("spi1_clk", &at91sam9g45_spi1_device.dev, "spi_clk");
platform_device_register(&at91sam9g45_spi1_device);
}
}
@@ -999,10 +990,7 @@ static struct platform_device at91sam9g45_tcb1_device = {
static void __init at91_add_device_tc(void)
{
- /* this chip has one clock and irq for all six TC channels */
- at91_clock_associate("tcb0_clk", &at91sam9g45_tcb0_device.dev, "t0_clk");
platform_device_register(&at91sam9g45_tcb0_device);
- at91_clock_associate("tcb1_clk", &at91sam9g45_tcb1_device.dev, "t0_clk");
platform_device_register(&at91sam9g45_tcb1_device);
}
#else
@@ -1286,12 +1274,10 @@ void __init at91_add_device_ssc(unsigned id, unsigned pins)
case AT91SAM9G45_ID_SSC0:
pdev = &at91sam9g45_ssc0_device;
configure_ssc0_pins(pins);
- at91_clock_associate("ssc0_clk", &pdev->dev, "pclk");
break;
case AT91SAM9G45_ID_SSC1:
pdev = &at91sam9g45_ssc1_device;
configure_ssc1_pins(pins);
- at91_clock_associate("ssc1_clk", &pdev->dev, "pclk");
break;
default:
return;
@@ -1527,37 +1513,34 @@ struct platform_device *atmel_default_console_device; /* the serial console devi
void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins)
{
struct platform_device *pdev;
+ struct atmel_uart_data *pdata;
switch (id) {
case 0: /* DBGU */
pdev = &at91sam9g45_dbgu_device;
configure_dbgu_pins();
- at91_clock_associate("mck", &pdev->dev, "usart");
break;
case AT91SAM9G45_ID_US0:
pdev = &at91sam9g45_uart0_device;
configure_usart0_pins(pins);
- at91_clock_associate("usart0_clk", &pdev->dev, "usart");
break;
case AT91SAM9G45_ID_US1:
pdev = &at91sam9g45_uart1_device;
configure_usart1_pins(pins);
- at91_clock_associate("usart1_clk", &pdev->dev, "usart");
break;
case AT91SAM9G45_ID_US2:
pdev = &at91sam9g45_uart2_device;
configure_usart2_pins(pins);
- at91_clock_associate("usart2_clk", &pdev->dev, "usart");
break;
case AT91SAM9G45_ID_US3:
pdev = &at91sam9g45_uart3_device;
configure_usart3_pins(pins);
- at91_clock_associate("usart3_clk", &pdev->dev, "usart");
break;
default:
return;
}
- pdev->id = portnr; /* update to mapped ID */
+ pdata = pdev->dev.platform_data;
+ pdata->num = portnr; /* update to mapped ID */
if (portnr < ATMEL_MAX_UART)
at91_uarts[portnr] = pdev;
@@ -1565,8 +1548,10 @@ void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins)
void __init at91_set_serial_console(unsigned portnr)
{
- if (portnr < ATMEL_MAX_UART)
+ if (portnr < ATMEL_MAX_UART) {
atmel_default_console_device = at91_uarts[portnr];
+ at91sam9g45_set_console_clock(portnr);
+ }
}
void __init at91_add_device_serial(void)
diff --git a/arch/arm/mach-at91/at91sam9rl.c b/arch/arm/mach-at91/at91sam9rl.c
index 6a9d24e5ed8e..1a40f16b66c8 100644
--- a/arch/arm/mach-at91/at91sam9rl.c
+++ b/arch/arm/mach-at91/at91sam9rl.c
@@ -190,6 +190,24 @@ static struct clk *periph_clocks[] __initdata = {
// irq0
};
+static struct clk_lookup periph_clocks_lookups[] = {
+ CLKDEV_CON_DEV_ID("hclk", "atmel_usba_udc.0", &utmi_clk),
+ CLKDEV_CON_DEV_ID("pclk", "atmel_usba_udc.0", &udphs_clk),
+ CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.0", &tc0_clk),
+ CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.0", &tc1_clk),
+ CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.0", &tc2_clk),
+ CLKDEV_CON_DEV_ID("pclk", "ssc.0", &ssc0_clk),
+ CLKDEV_CON_DEV_ID("pclk", "ssc.1", &ssc1_clk),
+};
+
+static struct clk_lookup usart_clocks_lookups[] = {
+ CLKDEV_CON_DEV_ID("usart", "atmel_usart.0", &mck),
+ CLKDEV_CON_DEV_ID("usart", "atmel_usart.1", &usart0_clk),
+ CLKDEV_CON_DEV_ID("usart", "atmel_usart.2", &usart1_clk),
+ CLKDEV_CON_DEV_ID("usart", "atmel_usart.3", &usart2_clk),
+ CLKDEV_CON_DEV_ID("usart", "atmel_usart.4", &usart3_clk),
+};
+
/*
* The two programmable clocks.
* You must configure pin multiplexing to bring these signals out.
@@ -214,10 +232,27 @@ static void __init at91sam9rl_register_clocks(void)
for (i = 0; i < ARRAY_SIZE(periph_clocks); i++)
clk_register(periph_clocks[i]);
+ clkdev_add_table(periph_clocks_lookups,
+ ARRAY_SIZE(periph_clocks_lookups));
+ clkdev_add_table(usart_clocks_lookups,
+ ARRAY_SIZE(usart_clocks_lookups));
+
clk_register(&pck0);
clk_register(&pck1);
}
+static struct clk_lookup console_clock_lookup;
+
+void __init at91sam9rl_set_console_clock(int id)
+{
+ if (id >= ARRAY_SIZE(usart_clocks_lookups))
+ return;
+
+ console_clock_lookup.con_id = "usart";
+ console_clock_lookup.clk = usart_clocks_lookups[id].clk;
+ clkdev_add(&console_clock_lookup);
+}
+
/* --------------------------------------------------------------------
* GPIO
* -------------------------------------------------------------------- */
@@ -252,7 +287,7 @@ static void at91sam9rl_poweroff(void)
* AT91SAM9RL processor initialization
* -------------------------------------------------------------------- */
-void __init at91sam9rl_initialize(unsigned long main_clock)
+void __init at91sam9rl_map_io(void)
{
unsigned long cidr, sram_size;
@@ -275,7 +310,10 @@ void __init at91sam9rl_initialize(unsigned long main_clock)
/* Map SRAM */
iotable_init(at91sam9rl_sram_desc, ARRAY_SIZE(at91sam9rl_sram_desc));
+}
+void __init at91sam9rl_initialize(unsigned long main_clock)
+{
at91_arch_reset = at91sam9_alt_reset;
pm_power_off = at91sam9rl_poweroff;
at91_extern_irq = (1 << AT91SAM9RL_ID_IRQ0);
diff --git a/arch/arm/mach-at91/at91sam9rl_devices.c b/arch/arm/mach-at91/at91sam9rl_devices.c
index c49262bddd85..c296045f2b6a 100644
--- a/arch/arm/mach-at91/at91sam9rl_devices.c
+++ b/arch/arm/mach-at91/at91sam9rl_devices.c
@@ -155,10 +155,6 @@ void __init at91_add_device_usba(struct usba_platform_data *data)
/* Pullup pin is handled internally by USB device peripheral */
- /* Clocks */
- at91_clock_associate("utmi_clk", &at91_usba_udc_device.dev, "hclk");
- at91_clock_associate("udphs_clk", &at91_usba_udc_device.dev, "pclk");
-
platform_device_register(&at91_usba_udc_device);
}
#else
@@ -605,10 +601,6 @@ static struct platform_device at91sam9rl_tcb_device = {
static void __init at91_add_device_tc(void)
{
- /* this chip has a separate clock and irq for each TC channel */
- at91_clock_associate("tc0_clk", &at91sam9rl_tcb_device.dev, "t0_clk");
- at91_clock_associate("tc1_clk", &at91sam9rl_tcb_device.dev, "t1_clk");
- at91_clock_associate("tc2_clk", &at91sam9rl_tcb_device.dev, "t2_clk");
platform_device_register(&at91sam9rl_tcb_device);
}
#else
@@ -892,12 +884,10 @@ void __init at91_add_device_ssc(unsigned id, unsigned pins)
case AT91SAM9RL_ID_SSC0:
pdev = &at91sam9rl_ssc0_device;
configure_ssc0_pins(pins);
- at91_clock_associate("ssc0_clk", &pdev->dev, "pclk");
break;
case AT91SAM9RL_ID_SSC1:
pdev = &at91sam9rl_ssc1_device;
configure_ssc1_pins(pins);
- at91_clock_associate("ssc1_clk", &pdev->dev, "pclk");
break;
default:
return;
@@ -1141,37 +1131,34 @@ struct platform_device *atmel_default_console_device; /* the serial console devi
void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins)
{
struct platform_device *pdev;
+ struct atmel_uart_data *pdata;
switch (id) {
case 0: /* DBGU */
pdev = &at91sam9rl_dbgu_device;
configure_dbgu_pins();
- at91_clock_associate("mck", &pdev->dev, "usart");
break;
case AT91SAM9RL_ID_US0:
pdev = &at91sam9rl_uart0_device;
configure_usart0_pins(pins);
- at91_clock_associate("usart0_clk", &pdev->dev, "usart");
break;
case AT91SAM9RL_ID_US1:
pdev = &at91sam9rl_uart1_device;
configure_usart1_pins(pins);
- at91_clock_associate("usart1_clk", &pdev->dev, "usart");
break;
case AT91SAM9RL_ID_US2:
pdev = &at91sam9rl_uart2_device;
configure_usart2_pins(pins);
- at91_clock_associate("usart2_clk", &pdev->dev, "usart");
break;
case AT91SAM9RL_ID_US3:
pdev = &at91sam9rl_uart3_device;
configure_usart3_pins(pins);
- at91_clock_associate("usart3_clk", &pdev->dev, "usart");
break;
default:
return;
}
- pdev->id = portnr; /* update to mapped ID */
+ pdata = pdev->dev.platform_data;
+ pdata->num = portnr; /* update to mapped ID */
if (portnr < ATMEL_MAX_UART)
at91_uarts[portnr] = pdev;
@@ -1179,8 +1166,10 @@ void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins)
void __init at91_set_serial_console(unsigned portnr)
{
- if (portnr < ATMEL_MAX_UART)
+ if (portnr < ATMEL_MAX_UART) {
atmel_default_console_device = at91_uarts[portnr];
+ at91sam9rl_set_console_clock(portnr);
+ }
}
void __init at91_add_device_serial(void)
diff --git a/arch/arm/mach-at91/at91x40.c b/arch/arm/mach-at91/at91x40.c
index ad3ec85b2790..56ba3bd035ae 100644
--- a/arch/arm/mach-at91/at91x40.c
+++ b/arch/arm/mach-at91/at91x40.c
@@ -37,11 +37,6 @@ unsigned long clk_get_rate(struct clk *clk)
return AT91X40_MASTER_CLOCK;
}
-struct clk *clk_get(struct device *dev, const char *id)
-{
- return NULL;
-}
-
void __init at91x40_initialize(unsigned long main_clock)
{
at91_extern_irq = (1 << AT91X40_ID_IRQ0) | (1 << AT91X40_ID_IRQ1)
diff --git a/arch/arm/mach-at91/board-1arm.c b/arch/arm/mach-at91/board-1arm.c
index 8a3fc84847c1..ab1d463aa47d 100644
--- a/arch/arm/mach-at91/board-1arm.c
+++ b/arch/arm/mach-at91/board-1arm.c
@@ -35,14 +35,18 @@
#include <mach/board.h>
#include <mach/gpio.h>
+#include <mach/cpu.h>
#include "generic.h"
-static void __init onearm_map_io(void)
+static void __init onearm_init_early(void)
{
+ /* Set cpu type: PQFP */
+ at91rm9200_set_type(ARCH_REVISON_9200_PQFP);
+
/* Initialize processor: 18.432 MHz crystal */
- at91rm9200_initialize(18432000, AT91RM9200_PQFP);
+ at91rm9200_initialize(18432000);
/* DBGU on ttyS0. (Rx & Tx only) */
at91_register_uart(0, 0, 0);
@@ -92,9 +96,9 @@ static void __init onearm_board_init(void)
MACHINE_START(ONEARM, "Ajeco 1ARM single board computer")
/* Maintainer: Lennert Buytenhek <buytenh@wantstofly.org> */
- .boot_params = AT91_SDRAM_BASE + 0x100,
.timer = &at91rm9200_timer,
- .map_io = onearm_map_io,
+ .map_io = at91rm9200_map_io,
+ .init_early = onearm_init_early,
.init_irq = onearm_init_irq,
.init_machine = onearm_board_init,
MACHINE_END
diff --git a/arch/arm/mach-at91/board-afeb-9260v1.c b/arch/arm/mach-at91/board-afeb-9260v1.c
index cba7f7771fee..a4924de48c36 100644
--- a/arch/arm/mach-at91/board-afeb-9260v1.c
+++ b/arch/arm/mach-at91/board-afeb-9260v1.c
@@ -48,7 +48,7 @@
#include "generic.h"
-static void __init afeb9260_map_io(void)
+static void __init afeb9260_init_early(void)
{
/* Initialize processor: 18.432 MHz crystal */
at91sam9260_initialize(18432000);
@@ -218,9 +218,9 @@ static void __init afeb9260_board_init(void)
MACHINE_START(AFEB9260, "Custom afeb9260 board")
/* Maintainer: Sergey Lapin <slapin@ossfans.org> */
- .boot_params = AT91_SDRAM_BASE + 0x100,
.timer = &at91sam926x_timer,
- .map_io = afeb9260_map_io,
+ .map_io = at91sam9260_map_io,
+ .init_early = afeb9260_init_early,
.init_irq = afeb9260_init_irq,
.init_machine = afeb9260_board_init,
MACHINE_END
diff --git a/arch/arm/mach-at91/board-at572d940hf_ek.c b/arch/arm/mach-at91/board-at572d940hf_ek.c
deleted file mode 100644
index 3929f1c9e4e5..000000000000
--- a/arch/arm/mach-at91/board-at572d940hf_ek.c
+++ /dev/null
@@ -1,326 +0,0 @@
-/*
- * linux/arch/arm/mach-at91/board-at572d940hf_ek.c
- *
- * Copyright (C) 2008 Atmel Antonio R. Costa <costa.antonior@gmail.com>
- * Copyright (C) 2005 SAN People
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/spi/spi.h>
-#include <linux/spi/ds1305.h>
-#include <linux/irq.h>
-#include <linux/mtd/physmap.h>
-
-#include <mach/hardware.h>
-#include <asm/setup.h>
-#include <asm/mach-types.h>
-#include <asm/irq.h>
-
-#include <asm/mach/arch.h>
-#include <asm/mach/map.h>
-#include <asm/mach/irq.h>
-
-#include <mach/board.h>
-#include <mach/gpio.h>
-#include <mach/at91sam9_smc.h>
-
-#include "sam9_smc.h"
-#include "generic.h"
-
-
-static void __init eb_map_io(void)
-{
- /* Initialize processor: 12.500 MHz crystal */
- at572d940hf_initialize(12000000);
-
- /* DBGU on ttyS0. (Rx & Tx only) */
- at91_register_uart(0, 0, 0);
-
- /* USART0 on ttyS1. (Rx & Tx only) */
- at91_register_uart(AT572D940HF_ID_US0, 1, 0);
-
- /* USART1 on ttyS2. (Rx & Tx only) */
- at91_register_uart(AT572D940HF_ID_US1, 2, 0);
-
- /* USART2 on ttyS3. (Tx & Rx only */
- at91_register_uart(AT572D940HF_ID_US2, 3, 0);
-
- /* set serial console to ttyS0 (ie, DBGU) */
- at91_set_serial_console(0);
-}
-
-static void __init eb_init_irq(void)
-{
- at572d940hf_init_interrupts(NULL);
-}
-
-
-/*
- * USB Host Port
- */
-static struct at91_usbh_data __initdata eb_usbh_data = {
- .ports = 2,
-};
-
-
-/*
- * USB Device Port
- */
-static struct at91_udc_data __initdata eb_udc_data = {
- .vbus_pin = 0, /* no VBUS detection,UDC always on */
- .pullup_pin = 0, /* pull-up driven by UDC */
-};
-
-
-/*
- * MCI (SD/MMC)
- */
-static struct at91_mmc_data __initdata eb_mmc_data = {
- .wire4 = 1,
-/* .det_pin = ... not connected */
-/* .wp_pin = ... not connected */
-/* .vcc_pin = ... not connected */
-};
-
-
-/*
- * MACB Ethernet device
- */
-static struct at91_eth_data __initdata eb_eth_data = {
- .phy_irq_pin = AT91_PIN_PB25,
- .is_rmii = 1,
-};
-
-/*
- * NOR flash
- */
-
-static struct mtd_partition eb_nor_partitions[] = {
- {
- .name = "Raw Environment",
- .offset = 0,
- .size = SZ_4M,
- .mask_flags = 0,
- },
- {
- .name = "OS FS",
- .offset = MTDPART_OFS_APPEND,
- .size = 3 * SZ_1M,
- .mask_flags = 0,
- },
- {
- .name = "APP FS",
- .offset = MTDPART_OFS_APPEND,
- .size = MTDPART_SIZ_FULL,
- .mask_flags = 0,
- },
-};
-
-static void nor_flash_set_vpp(struct map_info* mi, int i) {
-};
-
-static struct physmap_flash_data nor_flash_data = {
- .width = 4,
- .parts = eb_nor_partitions,
- .nr_parts = ARRAY_SIZE(eb_nor_partitions),
- .set_vpp = nor_flash_set_vpp,
-};
-
-static struct resource nor_flash_resources[] = {
- {
- .start = AT91_CHIPSELECT_0,
- .end = AT91_CHIPSELECT_0 + SZ_16M - 1,
- .flags = IORESOURCE_MEM,
- },
-};
-
-static struct platform_device nor_flash = {
- .name = "physmap-flash",
- .id = 0,
- .dev = {
- .platform_data = &nor_flash_data,
- },
- .resource = nor_flash_resources,
- .num_resources = ARRAY_SIZE(nor_flash_resources),
-};
-
-static struct sam9_smc_config __initdata eb_nor_smc_config = {
- .ncs_read_setup = 1,
- .nrd_setup = 1,
- .ncs_write_setup = 1,
- .nwe_setup = 1,
-
- .ncs_read_pulse = 7,
- .nrd_pulse = 7,
- .ncs_write_pulse = 7,
- .nwe_pulse = 7,
-
- .read_cycle = 9,
- .write_cycle = 9,
-
- .mode = AT91_SMC_READMODE | AT91_SMC_WRITEMODE | AT91_SMC_EXNWMODE_DISABLE | AT91_SMC_BAT_WRITE | AT91_SMC_DBW_32,
- .tdf_cycles = 1,
-};
-
-static void __init eb_add_device_nor(void)
-{
- /* configure chip-select 0 (NOR) */
- sam9_smc_configure(0, &eb_nor_smc_config);
- platform_device_register(&nor_flash);
-}
-
-/*
- * NAND flash
- */
-static struct mtd_partition __initdata eb_nand_partition[] = {
- {
- .name = "Partition 1",
- .offset = 0,
- .size = SZ_16M,
- },
- {
- .name = "Partition 2",
- .offset = MTDPART_OFS_NXTBLK,
- .size = MTDPART_SIZ_FULL,
- }
-};
-
-static struct mtd_partition * __init nand_partitions(int size, int *num_partitions)
-{
- *num_partitions = ARRAY_SIZE(eb_nand_partition);
- return eb_nand_partition;
-}
-
-static struct atmel_nand_data __initdata eb_nand_data = {
- .ale = 22,
- .cle = 21,
-/* .det_pin = ... not connected */
-/* .rdy_pin = AT91_PIN_PC16, */
- .enable_pin = AT91_PIN_PA15,
- .partition_info = nand_partitions,
-#if defined(CONFIG_MTD_NAND_ATMEL_BUSWIDTH_16)
- .bus_width_16 = 1,
-#else
- .bus_width_16 = 0,
-#endif
-};
-
-static struct sam9_smc_config __initdata eb_nand_smc_config = {
- .ncs_read_setup = 0,
- .nrd_setup = 0,
- .ncs_write_setup = 1,
- .nwe_setup = 1,
-
- .ncs_read_pulse = 3,
- .nrd_pulse = 3,
- .ncs_write_pulse = 3,
- .nwe_pulse = 3,
-
- .read_cycle = 5,
- .write_cycle = 5,
-
- .mode = AT91_SMC_READMODE | AT91_SMC_WRITEMODE | AT91_SMC_EXNWMODE_DISABLE,
- .tdf_cycles = 12,
-};
-
-static void __init eb_add_device_nand(void)
-{
- /* setup bus-width (8 or 16) */
- if (eb_nand_data.bus_width_16)
- eb_nand_smc_config.mode |= AT91_SMC_DBW_16;
- else
- eb_nand_smc_config.mode |= AT91_SMC_DBW_8;
-
- /* configure chip-select 3 (NAND) */
- sam9_smc_configure(3, &eb_nand_smc_config);
-
- at91_add_device_nand(&eb_nand_data);
-}
-
-
-/*
- * SPI devices
- */
-static struct resource rtc_resources[] = {
- [0] = {
- .start = AT572D940HF_ID_IRQ1,
- .end = AT572D940HF_ID_IRQ1,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct ds1305_platform_data ds1306_data = {
- .is_ds1306 = true,
- .en_1hz = false,
-};
-
-static struct spi_board_info eb_spi_devices[] = {
- { /* RTC Dallas DS1306 */
- .modalias = "rtc-ds1305",
- .chip_select = 3,
- .mode = SPI_CS_HIGH | SPI_CPOL | SPI_CPHA,
- .max_speed_hz = 500000,
- .bus_num = 0,
- .irq = AT572D940HF_ID_IRQ1,
- .platform_data = (void *) &ds1306_data,
- },
-#if defined(CONFIG_MTD_AT91_DATAFLASH_CARD)
- { /* Dataflash card */
- .modalias = "mtd_dataflash",
- .chip_select = 0,
- .max_speed_hz = 15 * 1000 * 1000,
- .bus_num = 0,
- },
-#endif
-};
-
-static void __init eb_board_init(void)
-{
- /* Serial */
- at91_add_device_serial();
- /* USB Host */
- at91_add_device_usbh(&eb_usbh_data);
- /* USB Device */
- at91_add_device_udc(&eb_udc_data);
- /* I2C */
- at91_add_device_i2c(NULL, 0);
- /* NOR */
- eb_add_device_nor();
- /* NAND */
- eb_add_device_nand();
- /* SPI */
- at91_add_device_spi(eb_spi_devices, ARRAY_SIZE(eb_spi_devices));
- /* MMC */
- at91_add_device_mmc(0, &eb_mmc_data);
- /* Ethernet */
- at91_add_device_eth(&eb_eth_data);
- /* mAgic */
- at91_add_device_mAgic();
-}
-
-MACHINE_START(AT572D940HFEB, "Atmel AT91D940HF-EB")
- /* Maintainer: Atmel <costa.antonior@gmail.com> */
- .boot_params = AT91_SDRAM_BASE + 0x100,
- .timer = &at91sam926x_timer,
- .map_io = eb_map_io,
- .init_irq = eb_init_irq,
- .init_machine = eb_board_init,
-MACHINE_END
diff --git a/arch/arm/mach-at91/board-cam60.c b/arch/arm/mach-at91/board-cam60.c
index b54e3e6fceb6..148fccb9a25a 100644
--- a/arch/arm/mach-at91/board-cam60.c
+++ b/arch/arm/mach-at91/board-cam60.c
@@ -45,7 +45,7 @@
#include "generic.h"
-static void __init cam60_map_io(void)
+static void __init cam60_init_early(void)
{
/* Initialize processor: 10 MHz crystal */
at91sam9260_initialize(10000000);
@@ -198,9 +198,9 @@ static void __init cam60_board_init(void)
MACHINE_START(CAM60, "KwikByte CAM60")
/* Maintainer: KwikByte */
- .boot_params = AT91_SDRAM_BASE + 0x100,
.timer = &at91sam926x_timer,
- .map_io = cam60_map_io,
+ .map_io = at91sam9260_map_io,
+ .init_early = cam60_init_early,
.init_irq = cam60_init_irq,
.init_machine = cam60_board_init,
MACHINE_END
diff --git a/arch/arm/mach-at91/board-cap9adk.c b/arch/arm/mach-at91/board-cap9adk.c
index e7274440ead9..1904fdf87613 100644
--- a/arch/arm/mach-at91/board-cap9adk.c
+++ b/arch/arm/mach-at91/board-cap9adk.c
@@ -44,12 +44,13 @@
#include <mach/gpio.h>
#include <mach/at91cap9_matrix.h>
#include <mach/at91sam9_smc.h>
+#include <mach/system_rev.h>
#include "sam9_smc.h"
#include "generic.h"
-static void __init cap9adk_map_io(void)
+static void __init cap9adk_init_early(void)
{
/* Initialize processor: 12 MHz crystal */
at91cap9_initialize(12000000);
@@ -187,11 +188,6 @@ static struct atmel_nand_data __initdata cap9adk_nand_data = {
// .rdy_pin = ... not connected
.enable_pin = AT91_PIN_PD15,
.partition_info = nand_partitions,
-#if defined(CONFIG_MTD_NAND_ATMEL_BUSWIDTH_16)
- .bus_width_16 = 1,
-#else
- .bus_width_16 = 0,
-#endif
};
static struct sam9_smc_config __initdata cap9adk_nand_smc_config = {
@@ -219,6 +215,7 @@ static void __init cap9adk_add_device_nand(void)
csa = at91_sys_read(AT91_MATRIX_EBICSA);
at91_sys_write(AT91_MATRIX_EBICSA, csa | AT91_MATRIX_EBI_VDDIOMSEL_3_3V);
+ cap9adk_nand_data.bus_width_16 = !board_have_nand_8bit();
/* setup bus-width (8 or 16) */
if (cap9adk_nand_data.bus_width_16)
cap9adk_nand_smc_config.mode |= AT91_SMC_DBW_16;
@@ -399,9 +396,9 @@ static void __init cap9adk_board_init(void)
MACHINE_START(AT91CAP9ADK, "Atmel AT91CAP9A-DK")
/* Maintainer: Stelian Pop <stelian.pop@leadtechdesign.com> */
- .boot_params = AT91_SDRAM_BASE + 0x100,
.timer = &at91sam926x_timer,
- .map_io = cap9adk_map_io,
+ .map_io = at91cap9_map_io,
+ .init_early = cap9adk_init_early,
.init_irq = cap9adk_init_irq,
.init_machine = cap9adk_board_init,
MACHINE_END
diff --git a/arch/arm/mach-at91/board-carmeva.c b/arch/arm/mach-at91/board-carmeva.c
index 295e1e77fa60..f36b18687494 100644
--- a/arch/arm/mach-at91/board-carmeva.c
+++ b/arch/arm/mach-at91/board-carmeva.c
@@ -40,10 +40,10 @@
#include "generic.h"
-static void __init carmeva_map_io(void)
+static void __init carmeva_init_early(void)
{
/* Initialize processor: 20.000 MHz crystal */
- at91rm9200_initialize(20000000, AT91RM9200_BGA);
+ at91rm9200_initialize(20000000);
/* DBGU on ttyS0. (Rx & Tx only) */
at91_register_uart(0, 0, 0);
@@ -162,9 +162,9 @@ static void __init carmeva_board_init(void)
MACHINE_START(CARMEVA, "Carmeva")
/* Maintainer: Conitec Datasystems */
- .boot_params = AT91_SDRAM_BASE + 0x100,
.timer = &at91rm9200_timer,
- .map_io = carmeva_map_io,
+ .map_io = at91rm9200_map_io,
+ .init_early = carmeva_init_early,
.init_irq = carmeva_init_irq,
.init_machine = carmeva_board_init,
MACHINE_END
diff --git a/arch/arm/mach-at91/board-cpu9krea.c b/arch/arm/mach-at91/board-cpu9krea.c
index 3838594578f3..980511084fe4 100644
--- a/arch/arm/mach-at91/board-cpu9krea.c
+++ b/arch/arm/mach-at91/board-cpu9krea.c
@@ -47,7 +47,7 @@
#include "sam9_smc.h"
#include "generic.h"
-static void __init cpu9krea_map_io(void)
+static void __init cpu9krea_init_early(void)
{
/* Initialize processor: 18.432 MHz crystal */
at91sam9260_initialize(18432000);
@@ -375,9 +375,9 @@ MACHINE_START(CPUAT9260, "Eukrea CPU9260")
MACHINE_START(CPUAT9G20, "Eukrea CPU9G20")
#endif
/* Maintainer: Eric Benard - EUKREA Electromatique */
- .boot_params = AT91_SDRAM_BASE + 0x100,
.timer = &at91sam926x_timer,
- .map_io = cpu9krea_map_io,
+ .map_io = at91sam9260_map_io,
+ .init_early = cpu9krea_init_early,
.init_irq = cpu9krea_init_irq,
.init_machine = cpu9krea_board_init,
MACHINE_END
diff --git a/arch/arm/mach-at91/board-cpuat91.c b/arch/arm/mach-at91/board-cpuat91.c
index 2f4dd8cdd484..6daabe3907a1 100644
--- a/arch/arm/mach-at91/board-cpuat91.c
+++ b/arch/arm/mach-at91/board-cpuat91.c
@@ -38,6 +38,7 @@
#include <mach/board.h>
#include <mach/gpio.h>
#include <mach/at91rm9200_mc.h>
+#include <mach/cpu.h>
#include "generic.h"
@@ -50,10 +51,13 @@ static struct gpio_led cpuat91_leds[] = {
},
};
-static void __init cpuat91_map_io(void)
+static void __init cpuat91_init_early(void)
{
+ /* Set cpu type: PQFP */
+ at91rm9200_set_type(ARCH_REVISON_9200_PQFP);
+
/* Initialize processor: 18.432 MHz crystal */
- at91rm9200_initialize(18432000, AT91RM9200_PQFP);
+ at91rm9200_initialize(18432000);
/* DBGU on ttyS0. (Rx & Tx only) */
at91_register_uart(0, 0, 0);
@@ -175,9 +179,9 @@ static void __init cpuat91_board_init(void)
MACHINE_START(CPUAT91, "Eukrea")
/* Maintainer: Eric Benard - EUKREA Electromatique */
- .boot_params = AT91_SDRAM_BASE + 0x100,
.timer = &at91rm9200_timer,
- .map_io = cpuat91_map_io,
+ .map_io = at91rm9200_map_io,
+ .init_early = cpuat91_init_early,
.init_irq = cpuat91_init_irq,
.init_machine = cpuat91_board_init,
MACHINE_END
diff --git a/arch/arm/mach-at91/board-csb337.c b/arch/arm/mach-at91/board-csb337.c
index 464839dc39bd..d98bcec1dfe0 100644
--- a/arch/arm/mach-at91/board-csb337.c
+++ b/arch/arm/mach-at91/board-csb337.c
@@ -43,10 +43,10 @@
#include "generic.h"
-static void __init csb337_map_io(void)
+static void __init csb337_init_early(void)
{
/* Initialize processor: 3.6864 MHz crystal */
- at91rm9200_initialize(3686400, AT91RM9200_BGA);
+ at91rm9200_initialize(3686400);
/* Setup the LEDs */
at91_init_leds(AT91_PIN_PB0, AT91_PIN_PB1);
@@ -257,9 +257,9 @@ static void __init csb337_board_init(void)
MACHINE_START(CSB337, "Cogent CSB337")
/* Maintainer: Bill Gatliff */
- .boot_params = AT91_SDRAM_BASE + 0x100,
.timer = &at91rm9200_timer,
- .map_io = csb337_map_io,
+ .map_io = at91rm9200_map_io,
+ .init_early = csb337_init_early,
.init_irq = csb337_init_irq,
.init_machine = csb337_board_init,
MACHINE_END
diff --git a/arch/arm/mach-at91/board-csb637.c b/arch/arm/mach-at91/board-csb637.c
index 431688c61412..019aab4e20b0 100644
--- a/arch/arm/mach-at91/board-csb637.c
+++ b/arch/arm/mach-at91/board-csb637.c
@@ -40,10 +40,10 @@
#include "generic.h"
-static void __init csb637_map_io(void)
+static void __init csb637_init_early(void)
{
/* Initialize processor: 3.6864 MHz crystal */
- at91rm9200_initialize(3686400, AT91RM9200_BGA);
+ at91rm9200_initialize(3686400);
/* DBGU on ttyS0. (Rx & Tx only) */
at91_register_uart(0, 0, 0);
@@ -138,9 +138,9 @@ static void __init csb637_board_init(void)
MACHINE_START(CSB637, "Cogent CSB637")
/* Maintainer: Bill Gatliff */
- .boot_params = AT91_SDRAM_BASE + 0x100,
.timer = &at91rm9200_timer,
- .map_io = csb637_map_io,
+ .map_io = at91rm9200_map_io,
+ .init_early = csb637_init_early,
.init_irq = csb637_init_irq,
.init_machine = csb637_board_init,
MACHINE_END
diff --git a/arch/arm/mach-at91/board-eb01.c b/arch/arm/mach-at91/board-eb01.c
index d8df59a3426d..d2023f27c652 100644
--- a/arch/arm/mach-at91/board-eb01.c
+++ b/arch/arm/mach-at91/board-eb01.c
@@ -35,7 +35,7 @@ static void __init at91eb01_init_irq(void)
at91x40_init_interrupts(NULL);
}
-static void __init at91eb01_map_io(void)
+static void __init at91eb01_init_early(void)
{
at91x40_initialize(40000000);
}
@@ -43,7 +43,7 @@ static void __init at91eb01_map_io(void)
MACHINE_START(AT91EB01, "Atmel AT91 EB01")
/* Maintainer: Greg Ungerer <gerg@snapgear.com> */
.timer = &at91x40_timer,
+ .init_early = at91eb01_init_early,
.init_irq = at91eb01_init_irq,
- .map_io = at91eb01_map_io,
MACHINE_END
diff --git a/arch/arm/mach-at91/board-eb9200.c b/arch/arm/mach-at91/board-eb9200.c
index 6cf6566ae346..e9484535cbc8 100644
--- a/arch/arm/mach-at91/board-eb9200.c
+++ b/arch/arm/mach-at91/board-eb9200.c
@@ -40,10 +40,10 @@
#include "generic.h"
-static void __init eb9200_map_io(void)
+static void __init eb9200_init_early(void)
{
/* Initialize processor: 18.432 MHz crystal */
- at91rm9200_initialize(18432000, AT91RM9200_BGA);
+ at91rm9200_initialize(18432000);
/* DBGU on ttyS0. (Rx & Tx only) */
at91_register_uart(0, 0, 0);
@@ -120,9 +120,9 @@ static void __init eb9200_board_init(void)
}
MACHINE_START(ATEB9200, "Embest ATEB9200")
- .boot_params = AT91_SDRAM_BASE + 0x100,
.timer = &at91rm9200_timer,
- .map_io = eb9200_map_io,
+ .map_io = at91rm9200_map_io,
+ .init_early = eb9200_init_early,
.init_irq = eb9200_init_irq,
.init_machine = eb9200_board_init,
MACHINE_END
diff --git a/arch/arm/mach-at91/board-ecbat91.c b/arch/arm/mach-at91/board-ecbat91.c
index de2fd04e7c8a..a6f57faa10a7 100644
--- a/arch/arm/mach-at91/board-ecbat91.c
+++ b/arch/arm/mach-at91/board-ecbat91.c
@@ -38,14 +38,18 @@
#include <mach/board.h>
#include <mach/gpio.h>
+#include <mach/cpu.h>
#include "generic.h"
-static void __init ecb_at91map_io(void)
+static void __init ecb_at91init_early(void)
{
+ /* Set cpu type: PQFP */
+ at91rm9200_set_type(ARCH_REVISON_9200_PQFP);
+
/* Initialize processor: 18.432 MHz crystal */
- at91rm9200_initialize(18432000, AT91RM9200_PQFP);
+ at91rm9200_initialize(18432000);
/* Setup the LEDs */
at91_init_leds(AT91_PIN_PC7, AT91_PIN_PC7);
@@ -168,9 +172,9 @@ static void __init ecb_at91board_init(void)
MACHINE_START(ECBAT91, "emQbit's ECB_AT91")
/* Maintainer: emQbit.com */
- .boot_params = AT91_SDRAM_BASE + 0x100,
.timer = &at91rm9200_timer,
- .map_io = ecb_at91map_io,
+ .map_io = at91rm9200_map_io,
+ .init_early = ecb_at91init_early,
.init_irq = ecb_at91init_irq,
.init_machine = ecb_at91board_init,
MACHINE_END
diff --git a/arch/arm/mach-at91/board-eco920.c b/arch/arm/mach-at91/board-eco920.c
index a158a0ce458f..bfc0062d1483 100644
--- a/arch/arm/mach-at91/board-eco920.c
+++ b/arch/arm/mach-at91/board-eco920.c
@@ -26,11 +26,16 @@
#include <mach/board.h>
#include <mach/at91rm9200_mc.h>
+#include <mach/cpu.h>
+
#include "generic.h"
-static void __init eco920_map_io(void)
+static void __init eco920_init_early(void)
{
- at91rm9200_initialize(18432000, AT91RM9200_PQFP);
+ /* Set cpu type: PQFP */
+ at91rm9200_set_type(ARCH_REVISON_9200_PQFP);
+
+ at91rm9200_initialize(18432000);
/* Setup the LEDs */
at91_init_leds(AT91_PIN_PB0, AT91_PIN_PB1);
@@ -86,21 +91,6 @@ static struct platform_device eco920_flash = {
.num_resources = 1,
};
-static struct resource at91_beeper_resources[] = {
- [0] = {
- .start = AT91RM9200_BASE_TC3,
- .end = AT91RM9200_BASE_TC3 + 0x39,
- .flags = IORESOURCE_MEM,
- },
-};
-
-static struct platform_device at91_beeper = {
- .name = "at91_beeper",
- .id = 0,
- .resource = at91_beeper_resources,
- .num_resources = ARRAY_SIZE(at91_beeper_resources),
-};
-
static struct spi_board_info eco920_spi_devices[] = {
{ /* CAN controller */
.modalias = "tlv5638",
@@ -139,18 +129,14 @@ static void __init eco920_board_init(void)
AT91_SMC_TDF_(1) /* float time */
);
- at91_clock_associate("tc3_clk", &at91_beeper.dev, "at91_beeper");
- at91_set_B_periph(AT91_PIN_PB6, 0);
- platform_device_register(&at91_beeper);
-
at91_add_device_spi(eco920_spi_devices, ARRAY_SIZE(eco920_spi_devices));
}
MACHINE_START(ECO920, "eco920")
/* Maintainer: Sascha Hauer */
- .boot_params = AT91_SDRAM_BASE + 0x100,
.timer = &at91rm9200_timer,
- .map_io = eco920_map_io,
+ .map_io = at91rm9200_map_io,
+ .init_early = eco920_init_early,
.init_irq = eco920_init_irq,
.init_machine = eco920_board_init,
MACHINE_END
diff --git a/arch/arm/mach-at91/board-flexibity.c b/arch/arm/mach-at91/board-flexibity.c
index c8a62dc8fa65..466c063b8d21 100644
--- a/arch/arm/mach-at91/board-flexibity.c
+++ b/arch/arm/mach-at91/board-flexibity.c
@@ -37,7 +37,7 @@
#include "generic.h"
-static void __init flexibity_map_io(void)
+static void __init flexibity_init_early(void)
{
/* Initialize processor: 18.432 MHz crystal */
at91sam9260_initialize(18432000);
@@ -154,9 +154,9 @@ static void __init flexibity_board_init(void)
MACHINE_START(FLEXIBITY, "Flexibity Connect")
/* Maintainer: Maxim Osipov */
- .boot_params = AT91_SDRAM_BASE + 0x100,
.timer = &at91sam926x_timer,
- .map_io = flexibity_map_io,
+ .map_io = at91sam9260_map_io,
+ .init_early = flexibity_init_early,
.init_irq = flexibity_init_irq,
.init_machine = flexibity_board_init,
MACHINE_END
diff --git a/arch/arm/mach-at91/board-foxg20.c b/arch/arm/mach-at91/board-foxg20.c
index dfc7dfe738e4..e2d1dc9eff45 100644
--- a/arch/arm/mach-at91/board-foxg20.c
+++ b/arch/arm/mach-at91/board-foxg20.c
@@ -57,7 +57,7 @@
*/
-static void __init foxg20_map_io(void)
+static void __init foxg20_init_early(void)
{
/* Initialize processor: 18.432 MHz crystal */
at91sam9260_initialize(18432000);
@@ -266,9 +266,9 @@ static void __init foxg20_board_init(void)
MACHINE_START(ACMENETUSFOXG20, "Acme Systems srl FOX Board G20")
/* Maintainer: Sergio Tanzilli */
- .boot_params = AT91_SDRAM_BASE + 0x100,
.timer = &at91sam926x_timer,
- .map_io = foxg20_map_io,
+ .map_io = at91sam9260_map_io,
+ .init_early = foxg20_init_early,
.init_irq = foxg20_init_irq,
.init_machine = foxg20_board_init,
MACHINE_END
diff --git a/arch/arm/mach-at91/board-gsia18s.c b/arch/arm/mach-at91/board-gsia18s.c
index bc28136ee249..1d4f36b3cb27 100644
--- a/arch/arm/mach-at91/board-gsia18s.c
+++ b/arch/arm/mach-at91/board-gsia18s.c
@@ -38,9 +38,9 @@
#include "sam9_smc.h"
#include "generic.h"
-static void __init gsia18s_map_io(void)
+static void __init gsia18s_init_early(void)
{
- stamp9g20_map_io();
+ stamp9g20_init_early();
/*
* USART0 on ttyS1 (Rx, Tx, CTS, RTS, DTR, DSR, DCD, RI).
@@ -576,9 +576,9 @@ static void __init gsia18s_board_init(void)
}
MACHINE_START(GSIA18S, "GS_IA18_S")
- .boot_params = AT91_SDRAM_BASE + 0x100,
.timer = &at91sam926x_timer,
- .map_io = gsia18s_map_io,
+ .map_io = at91sam9260_map_io,
+ .init_early = gsia18s_init_early,
.init_irq = init_irq,
.init_machine = gsia18s_board_init,
MACHINE_END
diff --git a/arch/arm/mach-at91/board-kafa.c b/arch/arm/mach-at91/board-kafa.c
index d2e1f4ec1fcc..9b003ff744ba 100644
--- a/arch/arm/mach-at91/board-kafa.c
+++ b/arch/arm/mach-at91/board-kafa.c
@@ -35,14 +35,18 @@
#include <mach/board.h>
#include <mach/gpio.h>
+#include <mach/cpu.h>
#include "generic.h"
-static void __init kafa_map_io(void)
+static void __init kafa_init_early(void)
{
+ /* Set cpu type: PQFP */
+ at91rm9200_set_type(ARCH_REVISON_9200_PQFP);
+
/* Initialize processor: 18.432 MHz crystal */
- at91rm9200_initialize(18432000, AT91RM9200_PQFP);
+ at91rm9200_initialize(18432000);
/* Set up the LEDs */
at91_init_leds(AT91_PIN_PB4, AT91_PIN_PB4);
@@ -94,9 +98,9 @@ static void __init kafa_board_init(void)
MACHINE_START(KAFA, "Sperry-Sun KAFA")
/* Maintainer: Sergei Sharonov */
- .boot_params = AT91_SDRAM_BASE + 0x100,
.timer = &at91rm9200_timer,
- .map_io = kafa_map_io,
+ .map_io = at91rm9200_map_io,
+ .init_early = kafa_init_early,
.init_irq = kafa_init_irq,
.init_machine = kafa_board_init,
MACHINE_END
diff --git a/arch/arm/mach-at91/board-kb9202.c b/arch/arm/mach-at91/board-kb9202.c
index a13d2063faff..a813a74b65f9 100644
--- a/arch/arm/mach-at91/board-kb9202.c
+++ b/arch/arm/mach-at91/board-kb9202.c
@@ -36,16 +36,19 @@
#include <mach/board.h>
#include <mach/gpio.h>
-
+#include <mach/cpu.h>
#include <mach/at91rm9200_mc.h>
#include "generic.h"
-static void __init kb9202_map_io(void)
+static void __init kb9202_init_early(void)
{
+ /* Set cpu type: PQFP */
+ at91rm9200_set_type(ARCH_REVISON_9200_PQFP);
+
/* Initialize processor: 10 MHz crystal */
- at91rm9200_initialize(10000000, AT91RM9200_PQFP);
+ at91rm9200_initialize(10000000);
/* Set up the LEDs */
at91_init_leds(AT91_PIN_PC19, AT91_PIN_PC18);
@@ -136,9 +139,9 @@ static void __init kb9202_board_init(void)
MACHINE_START(KB9200, "KB920x")
/* Maintainer: KwikByte, Inc. */
- .boot_params = AT91_SDRAM_BASE + 0x100,
.timer = &at91rm9200_timer,
- .map_io = kb9202_map_io,
+ .map_io = at91rm9200_map_io,
+ .init_early = kb9202_init_early,
.init_irq = kb9202_init_irq,
.init_machine = kb9202_board_init,
MACHINE_END
diff --git a/arch/arm/mach-at91/board-neocore926.c b/arch/arm/mach-at91/board-neocore926.c
index fe5f1d47e6e2..961e805db68c 100644
--- a/arch/arm/mach-at91/board-neocore926.c
+++ b/arch/arm/mach-at91/board-neocore926.c
@@ -51,7 +51,7 @@
#include "generic.h"
-static void __init neocore926_map_io(void)
+static void __init neocore926_init_early(void)
{
/* Initialize processor: 20 MHz crystal */
at91sam9263_initialize(20000000);
@@ -387,9 +387,9 @@ static void __init neocore926_board_init(void)
MACHINE_START(NEOCORE926, "ADENEO NEOCORE 926")
/* Maintainer: ADENEO */
- .boot_params = AT91_SDRAM_BASE + 0x100,
.timer = &at91sam926x_timer,
- .map_io = neocore926_map_io,
+ .map_io = at91sam9263_map_io,
+ .init_early = neocore926_init_early,
.init_irq = neocore926_init_irq,
.init_machine = neocore926_board_init,
MACHINE_END
diff --git a/arch/arm/mach-at91/board-pcontrol-g20.c b/arch/arm/mach-at91/board-pcontrol-g20.c
index feb65787c30b..21a21af25878 100644
--- a/arch/arm/mach-at91/board-pcontrol-g20.c
+++ b/arch/arm/mach-at91/board-pcontrol-g20.c
@@ -37,9 +37,9 @@
#include "generic.h"
-static void __init pcontrol_g20_map_io(void)
+static void __init pcontrol_g20_init_early(void)
{
- stamp9g20_map_io();
+ stamp9g20_init_early();
/* USART0 on ttyS1. (Rx, Tx, CTS, RTS) piggyback A2 */
at91_register_uart(AT91SAM9260_ID_US0, 1, ATMEL_UART_CTS
@@ -222,9 +222,9 @@ static void __init pcontrol_g20_board_init(void)
MACHINE_START(PCONTROL_G20, "PControl G20")
/* Maintainer: pgsellmann@portner-elektronik.at */
- .boot_params = AT91_SDRAM_BASE + 0x100,
.timer = &at91sam926x_timer,
- .map_io = pcontrol_g20_map_io,
+ .map_io = at91sam9260_map_io,
+ .init_early = pcontrol_g20_init_early,
.init_irq = init_irq,
.init_machine = pcontrol_g20_board_init,
MACHINE_END
diff --git a/arch/arm/mach-at91/board-picotux200.c b/arch/arm/mach-at91/board-picotux200.c
index 55dad3a46547..756cc2a745dd 100644
--- a/arch/arm/mach-at91/board-picotux200.c
+++ b/arch/arm/mach-at91/board-picotux200.c
@@ -43,10 +43,10 @@
#include "generic.h"
-static void __init picotux200_map_io(void)
+static void __init picotux200_init_early(void)
{
/* Initialize processor: 18.432 MHz crystal */
- at91rm9200_initialize(18432000, AT91RM9200_BGA);
+ at91rm9200_initialize(18432000);
/* DBGU on ttyS0. (Rx & Tx only) */
at91_register_uart(0, 0, 0);
@@ -123,9 +123,9 @@ static void __init picotux200_board_init(void)
MACHINE_START(PICOTUX2XX, "picotux 200")
/* Maintainer: Kleinhenz Elektronik GmbH */
- .boot_params = AT91_SDRAM_BASE + 0x100,
.timer = &at91rm9200_timer,
- .map_io = picotux200_map_io,
+ .map_io = at91rm9200_map_io,
+ .init_early = picotux200_init_early,
.init_irq = picotux200_init_irq,
.init_machine = picotux200_board_init,
MACHINE_END
diff --git a/arch/arm/mach-at91/board-qil-a9260.c b/arch/arm/mach-at91/board-qil-a9260.c
index 69d15a875b66..d1a6001b0bd8 100644
--- a/arch/arm/mach-at91/board-qil-a9260.c
+++ b/arch/arm/mach-at91/board-qil-a9260.c
@@ -48,7 +48,7 @@
#include "generic.h"
-static void __init ek_map_io(void)
+static void __init ek_init_early(void)
{
/* Initialize processor: 12.000 MHz crystal */
at91sam9260_initialize(12000000);
@@ -268,9 +268,9 @@ static void __init ek_board_init(void)
MACHINE_START(QIL_A9260, "CALAO QIL_A9260")
/* Maintainer: calao-systems */
- .boot_params = AT91_SDRAM_BASE + 0x100,
.timer = &at91sam926x_timer,
- .map_io = ek_map_io,
+ .map_io = at91sam9260_map_io,
+ .init_early = ek_init_early,
.init_irq = ek_init_irq,
.init_machine = ek_board_init,
MACHINE_END
diff --git a/arch/arm/mach-at91/board-rm9200dk.c b/arch/arm/mach-at91/board-rm9200dk.c
index 4c1047c8200d..aef9627710b0 100644
--- a/arch/arm/mach-at91/board-rm9200dk.c
+++ b/arch/arm/mach-at91/board-rm9200dk.c
@@ -45,10 +45,10 @@
#include "generic.h"
-static void __init dk_map_io(void)
+static void __init dk_init_early(void)
{
/* Initialize processor: 18.432 MHz crystal */
- at91rm9200_initialize(18432000, AT91RM9200_BGA);
+ at91rm9200_initialize(18432000);
/* Setup the LEDs */
at91_init_leds(AT91_PIN_PB2, AT91_PIN_PB2);
@@ -227,9 +227,9 @@ static void __init dk_board_init(void)
MACHINE_START(AT91RM9200DK, "Atmel AT91RM9200-DK")
/* Maintainer: SAN People/Atmel */
- .boot_params = AT91_SDRAM_BASE + 0x100,
.timer = &at91rm9200_timer,
- .map_io = dk_map_io,
+ .map_io = at91rm9200_map_io,
+ .init_early = dk_init_early,
.init_irq = dk_init_irq,
.init_machine = dk_board_init,
MACHINE_END
diff --git a/arch/arm/mach-at91/board-rm9200ek.c b/arch/arm/mach-at91/board-rm9200ek.c
index 9df1be8818c0..015a02183080 100644
--- a/arch/arm/mach-at91/board-rm9200ek.c
+++ b/arch/arm/mach-at91/board-rm9200ek.c
@@ -45,10 +45,10 @@
#include "generic.h"
-static void __init ek_map_io(void)
+static void __init ek_init_early(void)
{
/* Initialize processor: 18.432 MHz crystal */
- at91rm9200_initialize(18432000, AT91RM9200_BGA);
+ at91rm9200_initialize(18432000);
/* Setup the LEDs */
at91_init_leds(AT91_PIN_PB1, AT91_PIN_PB2);
@@ -193,9 +193,9 @@ static void __init ek_board_init(void)
MACHINE_START(AT91RM9200EK, "Atmel AT91RM9200-EK")
/* Maintainer: SAN People/Atmel */
- .boot_params = AT91_SDRAM_BASE + 0x100,
.timer = &at91rm9200_timer,
- .map_io = ek_map_io,
+ .map_io = at91rm9200_map_io,
+ .init_early = ek_init_early,
.init_irq = ek_init_irq,
.init_machine = ek_board_init,
MACHINE_END
diff --git a/arch/arm/mach-at91/board-sam9-l9260.c b/arch/arm/mach-at91/board-sam9-l9260.c
index 25a26beaa728..aaf1bf0989b3 100644
--- a/arch/arm/mach-at91/board-sam9-l9260.c
+++ b/arch/arm/mach-at91/board-sam9-l9260.c
@@ -44,7 +44,7 @@
#include "generic.h"
-static void __init ek_map_io(void)
+static void __init ek_init_early(void)
{
/* Initialize processor: 18.432 MHz crystal */
at91sam9260_initialize(18432000);
@@ -212,9 +212,9 @@ static void __init ek_board_init(void)
MACHINE_START(SAM9_L9260, "Olimex SAM9-L9260")
/* Maintainer: Olimex */
- .boot_params = AT91_SDRAM_BASE + 0x100,
.timer = &at91sam926x_timer,
- .map_io = ek_map_io,
+ .map_io = at91sam9260_map_io,
+ .init_early = ek_init_early,
.init_irq = ek_init_irq,
.init_machine = ek_board_init,
MACHINE_END
diff --git a/arch/arm/mach-at91/board-sam9260ek.c b/arch/arm/mach-at91/board-sam9260ek.c
index de1816e0e1d9..d600dc123227 100644
--- a/arch/arm/mach-at91/board-sam9260ek.c
+++ b/arch/arm/mach-at91/board-sam9260ek.c
@@ -44,12 +44,13 @@
#include <mach/gpio.h>
#include <mach/at91sam9_smc.h>
#include <mach/at91_shdwc.h>
+#include <mach/system_rev.h>
#include "sam9_smc.h"
#include "generic.h"
-static void __init ek_map_io(void)
+static void __init ek_init_early(void)
{
/* Initialize processor: 18.432 MHz crystal */
at91sam9260_initialize(18432000);
@@ -191,11 +192,6 @@ static struct atmel_nand_data __initdata ek_nand_data = {
.rdy_pin = AT91_PIN_PC13,
.enable_pin = AT91_PIN_PC14,
.partition_info = nand_partitions,
-#if defined(CONFIG_MTD_NAND_ATMEL_BUSWIDTH_16)
- .bus_width_16 = 1,
-#else
- .bus_width_16 = 0,
-#endif
};
static struct sam9_smc_config __initdata ek_nand_smc_config = {
@@ -218,6 +214,7 @@ static struct sam9_smc_config __initdata ek_nand_smc_config = {
static void __init ek_add_device_nand(void)
{
+ ek_nand_data.bus_width_16 = !board_have_nand_8bit();
/* setup bus-width (8 or 16) */
if (ek_nand_data.bus_width_16)
ek_nand_smc_config.mode |= AT91_SMC_DBW_16;
@@ -356,9 +353,9 @@ static void __init ek_board_init(void)
MACHINE_START(AT91SAM9260EK, "Atmel AT91SAM9260-EK")
/* Maintainer: Atmel */
- .boot_params = AT91_SDRAM_BASE + 0x100,
.timer = &at91sam926x_timer,
- .map_io = ek_map_io,
+ .map_io = at91sam9260_map_io,
+ .init_early = ek_init_early,
.init_irq = ek_init_irq,
.init_machine = ek_board_init,
MACHINE_END
diff --git a/arch/arm/mach-at91/board-sam9261ek.c b/arch/arm/mach-at91/board-sam9261ek.c
index 14acc901e24c..f897f84d43dc 100644
--- a/arch/arm/mach-at91/board-sam9261ek.c
+++ b/arch/arm/mach-at91/board-sam9261ek.c
@@ -48,12 +48,13 @@
#include <mach/gpio.h>
#include <mach/at91sam9_smc.h>
#include <mach/at91_shdwc.h>
+#include <mach/system_rev.h>
#include "sam9_smc.h"
#include "generic.h"
-static void __init ek_map_io(void)
+static void __init ek_init_early(void)
{
/* Initialize processor: 18.432 MHz crystal */
at91sam9261_initialize(18432000);
@@ -197,11 +198,6 @@ static struct atmel_nand_data __initdata ek_nand_data = {
.rdy_pin = AT91_PIN_PC15,
.enable_pin = AT91_PIN_PC14,
.partition_info = nand_partitions,
-#if defined(CONFIG_MTD_NAND_ATMEL_BUSWIDTH_16)
- .bus_width_16 = 1,
-#else
- .bus_width_16 = 0,
-#endif
};
static struct sam9_smc_config __initdata ek_nand_smc_config = {
@@ -224,6 +220,7 @@ static struct sam9_smc_config __initdata ek_nand_smc_config = {
static void __init ek_add_device_nand(void)
{
+ ek_nand_data.bus_width_16 = !board_have_nand_8bit();
/* setup bus-width (8 or 16) */
if (ek_nand_data.bus_width_16)
ek_nand_smc_config.mode |= AT91_SMC_DBW_16;
@@ -623,9 +620,9 @@ MACHINE_START(AT91SAM9261EK, "Atmel AT91SAM9261-EK")
MACHINE_START(AT91SAM9G10EK, "Atmel AT91SAM9G10-EK")
#endif
/* Maintainer: Atmel */
- .boot_params = AT91_SDRAM_BASE + 0x100,
.timer = &at91sam926x_timer,
- .map_io = ek_map_io,
+ .map_io = at91sam9261_map_io,
+ .init_early = ek_init_early,
.init_irq = ek_init_irq,
.init_machine = ek_board_init,
MACHINE_END
diff --git a/arch/arm/mach-at91/board-sam9263ek.c b/arch/arm/mach-at91/board-sam9263ek.c
index bfe490df58be..605b26f40a4c 100644
--- a/arch/arm/mach-at91/board-sam9263ek.c
+++ b/arch/arm/mach-at91/board-sam9263ek.c
@@ -47,12 +47,13 @@
#include <mach/gpio.h>
#include <mach/at91sam9_smc.h>
#include <mach/at91_shdwc.h>
+#include <mach/system_rev.h>
#include "sam9_smc.h"
#include "generic.h"
-static void __init ek_map_io(void)
+static void __init ek_init_early(void)
{
/* Initialize processor: 16.367 MHz crystal */
at91sam9263_initialize(16367660);
@@ -198,11 +199,6 @@ static struct atmel_nand_data __initdata ek_nand_data = {
.rdy_pin = AT91_PIN_PA22,
.enable_pin = AT91_PIN_PD15,
.partition_info = nand_partitions,
-#if defined(CONFIG_MTD_NAND_ATMEL_BUSWIDTH_16)
- .bus_width_16 = 1,
-#else
- .bus_width_16 = 0,
-#endif
};
static struct sam9_smc_config __initdata ek_nand_smc_config = {
@@ -225,6 +221,7 @@ static struct sam9_smc_config __initdata ek_nand_smc_config = {
static void __init ek_add_device_nand(void)
{
+ ek_nand_data.bus_width_16 = !board_have_nand_8bit();
/* setup bus-width (8 or 16) */
if (ek_nand_data.bus_width_16)
ek_nand_smc_config.mode |= AT91_SMC_DBW_16;
@@ -454,9 +451,9 @@ static void __init ek_board_init(void)
MACHINE_START(AT91SAM9263EK, "Atmel AT91SAM9263-EK")
/* Maintainer: Atmel */
- .boot_params = AT91_SDRAM_BASE + 0x100,
.timer = &at91sam926x_timer,
- .map_io = ek_map_io,
+ .map_io = at91sam9263_map_io,
+ .init_early = ek_init_early,
.init_irq = ek_init_irq,
.init_machine = ek_board_init,
MACHINE_END
diff --git a/arch/arm/mach-at91/board-sam9g20ek.c b/arch/arm/mach-at91/board-sam9g20ek.c
index ca8198b3c168..7624cf0d006b 100644
--- a/arch/arm/mach-at91/board-sam9g20ek.c
+++ b/arch/arm/mach-at91/board-sam9g20ek.c
@@ -43,6 +43,7 @@
#include <mach/board.h>
#include <mach/gpio.h>
#include <mach/at91sam9_smc.h>
+#include <mach/system_rev.h>
#include "sam9_smc.h"
#include "generic.h"
@@ -60,7 +61,7 @@ static int inline ek_have_2mmc(void)
}
-static void __init ek_map_io(void)
+static void __init ek_init_early(void)
{
/* Initialize processor: 18.432 MHz crystal */
at91sam9260_initialize(18432000);
@@ -175,11 +176,6 @@ static struct atmel_nand_data __initdata ek_nand_data = {
.rdy_pin = AT91_PIN_PC13,
.enable_pin = AT91_PIN_PC14,
.partition_info = nand_partitions,
-#if defined(CONFIG_MTD_NAND_ATMEL_BUSWIDTH_16)
- .bus_width_16 = 1,
-#else
- .bus_width_16 = 0,
-#endif
};
static struct sam9_smc_config __initdata ek_nand_smc_config = {
@@ -202,6 +198,7 @@ static struct sam9_smc_config __initdata ek_nand_smc_config = {
static void __init ek_add_device_nand(void)
{
+ ek_nand_data.bus_width_16 = !board_have_nand_8bit();
/* setup bus-width (8 or 16) */
if (ek_nand_data.bus_width_16)
ek_nand_smc_config.mode |= AT91_SMC_DBW_16;
@@ -406,18 +403,18 @@ static void __init ek_board_init(void)
MACHINE_START(AT91SAM9G20EK, "Atmel AT91SAM9G20-EK")
/* Maintainer: Atmel */
- .boot_params = AT91_SDRAM_BASE + 0x100,
.timer = &at91sam926x_timer,
- .map_io = ek_map_io,
+ .map_io = at91sam9260_map_io,
+ .init_early = ek_init_early,
.init_irq = ek_init_irq,
.init_machine = ek_board_init,
MACHINE_END
MACHINE_START(AT91SAM9G20EK_2MMC, "Atmel AT91SAM9G20-EK 2 MMC Slot Mod")
/* Maintainer: Atmel */
- .boot_params = AT91_SDRAM_BASE + 0x100,
.timer = &at91sam926x_timer,
- .map_io = ek_map_io,
+ .map_io = at91sam9260_map_io,
+ .init_early = ek_init_early,
.init_irq = ek_init_irq,
.init_machine = ek_board_init,
MACHINE_END
diff --git a/arch/arm/mach-at91/board-sam9m10g45ek.c b/arch/arm/mach-at91/board-sam9m10g45ek.c
index 6c999dbd2bcf..063c95d0e8f0 100644
--- a/arch/arm/mach-at91/board-sam9m10g45ek.c
+++ b/arch/arm/mach-at91/board-sam9m10g45ek.c
@@ -41,12 +41,13 @@
#include <mach/gpio.h>
#include <mach/at91sam9_smc.h>
#include <mach/at91_shdwc.h>
+#include <mach/system_rev.h>
#include "sam9_smc.h"
#include "generic.h"
-static void __init ek_map_io(void)
+static void __init ek_init_early(void)
{
/* Initialize processor: 12.000 MHz crystal */
at91sam9g45_initialize(12000000);
@@ -155,11 +156,6 @@ static struct atmel_nand_data __initdata ek_nand_data = {
.rdy_pin = AT91_PIN_PC8,
.enable_pin = AT91_PIN_PC14,
.partition_info = nand_partitions,
-#if defined(CONFIG_MTD_NAND_ATMEL_BUSWIDTH_16)
- .bus_width_16 = 1,
-#else
- .bus_width_16 = 0,
-#endif
};
static struct sam9_smc_config __initdata ek_nand_smc_config = {
@@ -182,6 +178,7 @@ static struct sam9_smc_config __initdata ek_nand_smc_config = {
static void __init ek_add_device_nand(void)
{
+ ek_nand_data.bus_width_16 = !board_have_nand_8bit();
/* setup bus-width (8 or 16) */
if (ek_nand_data.bus_width_16)
ek_nand_smc_config.mode |= AT91_SMC_DBW_16;
@@ -424,9 +421,9 @@ static void __init ek_board_init(void)
MACHINE_START(AT91SAM9M10G45EK, "Atmel AT91SAM9M10G45-EK")
/* Maintainer: Atmel */
- .boot_params = AT91_SDRAM_BASE + 0x100,
.timer = &at91sam926x_timer,
- .map_io = ek_map_io,
+ .map_io = at91sam9g45_map_io,
+ .init_early = ek_init_early,
.init_irq = ek_init_irq,
.init_machine = ek_board_init,
MACHINE_END
diff --git a/arch/arm/mach-at91/board-sam9rlek.c b/arch/arm/mach-at91/board-sam9rlek.c
index 3bf3408e94c1..effb399a80a6 100644
--- a/arch/arm/mach-at91/board-sam9rlek.c
+++ b/arch/arm/mach-at91/board-sam9rlek.c
@@ -38,7 +38,7 @@
#include "generic.h"
-static void __init ek_map_io(void)
+static void __init ek_init_early(void)
{
/* Initialize processor: 12.000 MHz crystal */
at91sam9rl_initialize(12000000);
@@ -329,9 +329,9 @@ static void __init ek_board_init(void)
MACHINE_START(AT91SAM9RLEK, "Atmel AT91SAM9RL-EK")
/* Maintainer: Atmel */
- .boot_params = AT91_SDRAM_BASE + 0x100,
.timer = &at91sam926x_timer,
- .map_io = ek_map_io,
+ .map_io = at91sam9rl_map_io,
+ .init_early = ek_init_early,
.init_irq = ek_init_irq,
.init_machine = ek_board_init,
MACHINE_END
diff --git a/arch/arm/mach-at91/board-snapper9260.c b/arch/arm/mach-at91/board-snapper9260.c
index 17f7d9b32142..3eb0a1153cc8 100644
--- a/arch/arm/mach-at91/board-snapper9260.c
+++ b/arch/arm/mach-at91/board-snapper9260.c
@@ -40,7 +40,7 @@
#define SNAPPER9260_IO_EXP_GPIO(x) (NR_BUILTIN_GPIO + (x))
-static void __init snapper9260_map_io(void)
+static void __init snapper9260_init_early(void)
{
at91sam9260_initialize(18432000);
@@ -178,9 +178,9 @@ static void __init snapper9260_board_init(void)
}
MACHINE_START(SNAPPER_9260, "Bluewater Systems Snapper 9260/9G20 module")
- .boot_params = AT91_SDRAM_BASE + 0x100,
.timer = &at91sam926x_timer,
- .map_io = snapper9260_map_io,
+ .map_io = at91sam9260_map_io,
+ .init_early = snapper9260_init_early,
.init_irq = snapper9260_init_irq,
.init_machine = snapper9260_board_init,
MACHINE_END
diff --git a/arch/arm/mach-at91/board-stamp9g20.c b/arch/arm/mach-at91/board-stamp9g20.c
index f8902b118960..5e5c85688f5f 100644
--- a/arch/arm/mach-at91/board-stamp9g20.c
+++ b/arch/arm/mach-at91/board-stamp9g20.c
@@ -32,7 +32,7 @@
#include "generic.h"
-void __init stamp9g20_map_io(void)
+void __init stamp9g20_init_early(void)
{
/* Initialize processor: 18.432 MHz crystal */
at91sam9260_initialize(18432000);
@@ -44,9 +44,9 @@ void __init stamp9g20_map_io(void)
at91_set_serial_console(0);
}
-static void __init stamp9g20evb_map_io(void)
+static void __init stamp9g20evb_init_early(void)
{
- stamp9g20_map_io();
+ stamp9g20_init_early();
/* USART0 on ttyS1. (Rx, Tx, CTS, RTS, DTR, DSR, DCD, RI) */
at91_register_uart(AT91SAM9260_ID_US0, 1, ATMEL_UART_CTS | ATMEL_UART_RTS
@@ -54,9 +54,9 @@ static void __init stamp9g20evb_map_io(void)
| ATMEL_UART_DCD | ATMEL_UART_RI);
}
-static void __init portuxg20_map_io(void)
+static void __init portuxg20_init_early(void)
{
- stamp9g20_map_io();
+ stamp9g20_init_early();
/* USART0 on ttyS1. (Rx, Tx, CTS, RTS, DTR, DSR, DCD, RI) */
at91_register_uart(AT91SAM9260_ID_US0, 1, ATMEL_UART_CTS | ATMEL_UART_RTS
@@ -298,18 +298,18 @@ static void __init stamp9g20evb_board_init(void)
MACHINE_START(PORTUXG20, "taskit PortuxG20")
/* Maintainer: taskit GmbH */
- .boot_params = AT91_SDRAM_BASE + 0x100,
.timer = &at91sam926x_timer,
- .map_io = portuxg20_map_io,
+ .map_io = at91sam9260_map_io,
+ .init_early = portuxg20_init_early,
.init_irq = init_irq,
.init_machine = portuxg20_board_init,
MACHINE_END
MACHINE_START(STAMP9G20, "taskit Stamp9G20")
/* Maintainer: taskit GmbH */
- .boot_params = AT91_SDRAM_BASE + 0x100,
.timer = &at91sam926x_timer,
- .map_io = stamp9g20evb_map_io,
+ .map_io = at91sam9260_map_io,
+ .init_early = stamp9g20evb_init_early,
.init_irq = init_irq,
.init_machine = stamp9g20evb_board_init,
MACHINE_END
diff --git a/arch/arm/mach-at91/board-usb-a9260.c b/arch/arm/mach-at91/board-usb-a9260.c
index 07784baeae84..0e784e6fedec 100644
--- a/arch/arm/mach-at91/board-usb-a9260.c
+++ b/arch/arm/mach-at91/board-usb-a9260.c
@@ -48,7 +48,7 @@
#include "generic.h"
-static void __init ek_map_io(void)
+static void __init ek_init_early(void)
{
/* Initialize processor: 12.000 MHz crystal */
at91sam9260_initialize(12000000);
@@ -228,9 +228,9 @@ static void __init ek_board_init(void)
MACHINE_START(USB_A9260, "CALAO USB_A9260")
/* Maintainer: calao-systems */
- .boot_params = AT91_SDRAM_BASE + 0x100,
.timer = &at91sam926x_timer,
- .map_io = ek_map_io,
+ .map_io = at91sam9260_map_io,
+ .init_early = ek_init_early,
.init_irq = ek_init_irq,
.init_machine = ek_board_init,
MACHINE_END
diff --git a/arch/arm/mach-at91/board-usb-a9263.c b/arch/arm/mach-at91/board-usb-a9263.c
index b614508931fd..cf626dd14b2c 100644
--- a/arch/arm/mach-at91/board-usb-a9263.c
+++ b/arch/arm/mach-at91/board-usb-a9263.c
@@ -47,7 +47,7 @@
#include "generic.h"
-static void __init ek_map_io(void)
+static void __init ek_init_early(void)
{
/* Initialize processor: 12.00 MHz crystal */
at91sam9263_initialize(12000000);
@@ -244,9 +244,9 @@ static void __init ek_board_init(void)
MACHINE_START(USB_A9263, "CALAO USB_A9263")
/* Maintainer: calao-systems */
- .boot_params = AT91_SDRAM_BASE + 0x100,
.timer = &at91sam926x_timer,
- .map_io = ek_map_io,
+ .map_io = at91sam9263_map_io,
+ .init_early = ek_init_early,
.init_irq = ek_init_irq,
.init_machine = ek_board_init,
MACHINE_END
diff --git a/arch/arm/mach-at91/board-yl-9200.c b/arch/arm/mach-at91/board-yl-9200.c
index e0f0080eb639..c208cc334d7d 100644
--- a/arch/arm/mach-at91/board-yl-9200.c
+++ b/arch/arm/mach-at91/board-yl-9200.c
@@ -45,14 +45,18 @@
#include <mach/board.h>
#include <mach/gpio.h>
#include <mach/at91rm9200_mc.h>
+#include <mach/cpu.h>
#include "generic.h"
-static void __init yl9200_map_io(void)
+static void __init yl9200_init_early(void)
{
+ /* Set cpu type: PQFP */
+ at91rm9200_set_type(ARCH_REVISON_9200_PQFP);
+
/* Initialize processor: 18.432 MHz crystal */
- at91rm9200_initialize(18432000, AT91RM9200_PQFP);
+ at91rm9200_initialize(18432000);
/* Setup the LEDs D2=PB17 (timer), D3=PB16 (cpu) */
at91_init_leds(AT91_PIN_PB16, AT91_PIN_PB17);
@@ -594,9 +598,9 @@ static void __init yl9200_board_init(void)
MACHINE_START(YL9200, "uCdragon YL-9200")
/* Maintainer: S.Birtles */
- .boot_params = AT91_SDRAM_BASE + 0x100,
.timer = &at91rm9200_timer,
- .map_io = yl9200_map_io,
+ .map_io = at91rm9200_map_io,
+ .init_early = yl9200_init_early,
.init_irq = yl9200_init_irq,
.init_machine = yl9200_board_init,
MACHINE_END
diff --git a/arch/arm/mach-at91/clock.c b/arch/arm/mach-at91/clock.c
index 9113da6845f1..61873f3aa92d 100644
--- a/arch/arm/mach-at91/clock.c
+++ b/arch/arm/mach-at91/clock.c
@@ -163,7 +163,7 @@ static struct clk udpck = {
.parent = &pllb,
.mode = pmc_sys_mode,
};
-static struct clk utmi_clk = {
+struct clk utmi_clk = {
.name = "utmi_clk",
.parent = &main_clk,
.pmc_mask = AT91_PMC_UPLLEN, /* in CKGR_UCKR */
@@ -182,7 +182,7 @@ static struct clk uhpck = {
* memory, interfaces to on-chip peripherals, the AIC, and sometimes more
* (e.g baud rate generation). It's sourced from one of the primary clocks.
*/
-static struct clk mck = {
+struct clk mck = {
.name = "mck",
.pmc_mask = AT91_PMC_MCKRDY, /* in PMC_SR */
};
@@ -215,43 +215,6 @@ static struct clk __init *at91_css_to_clk(unsigned long css)
return NULL;
}
-/*
- * Associate a particular clock with a function (eg, "uart") and device.
- * The drivers can then request the same 'function' with several different
- * devices and not care about which clock name to use.
- */
-void __init at91_clock_associate(const char *id, struct device *dev, const char *func)
-{
- struct clk *clk = clk_get(NULL, id);
-
- if (!dev || !clk || !IS_ERR(clk_get(dev, func)))
- return;
-
- clk->function = func;
- clk->dev = dev;
-}
-
-/* clocks cannot be de-registered no refcounting necessary */
-struct clk *clk_get(struct device *dev, const char *id)
-{
- struct clk *clk;
-
- list_for_each_entry(clk, &clocks, node) {
- if (strcmp(id, clk->name) == 0)
- return clk;
- if (clk->function && (dev == clk->dev) && strcmp(id, clk->function) == 0)
- return clk;
- }
-
- return ERR_PTR(-ENOENT);
-}
-EXPORT_SYMBOL(clk_get);
-
-void clk_put(struct clk *clk)
-{
-}
-EXPORT_SYMBOL(clk_put);
-
static void __clk_enable(struct clk *clk)
{
if (clk->parent)
@@ -498,32 +461,38 @@ postcore_initcall(at91_clk_debugfs_init);
/*------------------------------------------------------------------------*/
/* Register a new clock */
+static void __init at91_clk_add(struct clk *clk)
+{
+ list_add_tail(&clk->node, &clocks);
+
+ clk->cl.con_id = clk->name;
+ clk->cl.clk = clk;
+ clkdev_add(&clk->cl);
+}
+
int __init clk_register(struct clk *clk)
{
if (clk_is_peripheral(clk)) {
if (!clk->parent)
clk->parent = &mck;
clk->mode = pmc_periph_mode;
- list_add_tail(&clk->node, &clocks);
}
else if (clk_is_sys(clk)) {
clk->parent = &mck;
clk->mode = pmc_sys_mode;
-
- list_add_tail(&clk->node, &clocks);
}
#ifdef CONFIG_AT91_PROGRAMMABLE_CLOCKS
else if (clk_is_programmable(clk)) {
clk->mode = pmc_sys_mode;
init_programmable_clock(clk);
- list_add_tail(&clk->node, &clocks);
}
#endif
+ at91_clk_add(clk);
+
return 0;
}
-
/*------------------------------------------------------------------------*/
static u32 __init at91_pll_rate(struct clk *pll, u32 freq, u32 reg)
@@ -630,7 +599,7 @@ static void __init at91_pllb_usbfs_clock_init(unsigned long main_clock)
at91_sys_write(AT91_PMC_SCER, AT91RM9200_PMC_MCKUDP);
} else if (cpu_is_at91sam9260() || cpu_is_at91sam9261() ||
cpu_is_at91sam9263() || cpu_is_at91sam9g20() ||
- cpu_is_at91sam9g10() || cpu_is_at572d940hf()) {
+ cpu_is_at91sam9g10()) {
uhpck.pmc_mask = AT91SAM926x_PMC_UHP;
udpck.pmc_mask = AT91SAM926x_PMC_UDP;
} else if (cpu_is_at91cap9()) {
@@ -754,19 +723,19 @@ int __init at91_clock_init(unsigned long main_clock)
/* Register the PMC's standard clocks */
for (i = 0; i < ARRAY_SIZE(standard_pmc_clocks); i++)
- list_add_tail(&standard_pmc_clocks[i]->node, &clocks);
+ at91_clk_add(standard_pmc_clocks[i]);
if (cpu_has_pllb())
- list_add_tail(&pllb.node, &clocks);
+ at91_clk_add(&pllb);
if (cpu_has_uhp())
- list_add_tail(&uhpck.node, &clocks);
+ at91_clk_add(&uhpck);
if (cpu_has_udpfs())
- list_add_tail(&udpck.node, &clocks);
+ at91_clk_add(&udpck);
if (cpu_has_utmi())
- list_add_tail(&utmi_clk.node, &clocks);
+ at91_clk_add(&utmi_clk);
/* MCK and CPU clock are "always on" */
clk_enable(&mck);
diff --git a/arch/arm/mach-at91/clock.h b/arch/arm/mach-at91/clock.h
index 6cf4b78e175d..c2e63e47dcbe 100644
--- a/arch/arm/mach-at91/clock.h
+++ b/arch/arm/mach-at91/clock.h
@@ -6,6 +6,8 @@
* published by the Free Software Foundation.
*/
+#include <linux/clkdev.h>
+
#define CLK_TYPE_PRIMARY 0x1
#define CLK_TYPE_PLL 0x2
#define CLK_TYPE_PROGRAMMABLE 0x4
@@ -16,8 +18,7 @@
struct clk {
struct list_head node;
const char *name; /* unique clock name */
- const char *function; /* function of the clock */
- struct device *dev; /* device associated with function */
+ struct clk_lookup cl;
unsigned long rate_hz;
struct clk *parent;
u32 pmc_mask;
@@ -29,3 +30,18 @@ struct clk {
extern int __init clk_register(struct clk *clk);
+extern struct clk mck;
+extern struct clk utmi_clk;
+
+#define CLKDEV_CON_ID(_id, _clk) \
+ { \
+ .con_id = _id, \
+ .clk = _clk, \
+ }
+
+#define CLKDEV_CON_DEV_ID(_con_id, _dev_id, _clk) \
+ { \
+ .con_id = _con_id, \
+ .dev_id = _dev_id, \
+ .clk = _clk, \
+ }
diff --git a/arch/arm/mach-at91/generic.h b/arch/arm/mach-at91/generic.h
index 0c66deb2db39..8ff3418f3430 100644
--- a/arch/arm/mach-at91/generic.h
+++ b/arch/arm/mach-at91/generic.h
@@ -8,8 +8,21 @@
* published by the Free Software Foundation.
*/
+#include <linux/clkdev.h>
+
+ /* Map io */
+extern void __init at91rm9200_map_io(void);
+extern void __init at91sam9260_map_io(void);
+extern void __init at91sam9261_map_io(void);
+extern void __init at91sam9263_map_io(void);
+extern void __init at91sam9rl_map_io(void);
+extern void __init at91sam9g45_map_io(void);
+extern void __init at91x40_map_io(void);
+extern void __init at91cap9_map_io(void);
+
/* Processors */
-extern void __init at91rm9200_initialize(unsigned long main_clock, unsigned short banks);
+extern void __init at91rm9200_set_type(int type);
+extern void __init at91rm9200_initialize(unsigned long main_clock);
extern void __init at91sam9260_initialize(unsigned long main_clock);
extern void __init at91sam9261_initialize(unsigned long main_clock);
extern void __init at91sam9263_initialize(unsigned long main_clock);
@@ -17,7 +30,6 @@ extern void __init at91sam9rl_initialize(unsigned long main_clock);
extern void __init at91sam9g45_initialize(unsigned long main_clock);
extern void __init at91x40_initialize(unsigned long main_clock);
extern void __init at91cap9_initialize(unsigned long main_clock);
-extern void __init at572d940hf_initialize(unsigned long main_clock);
/* Interrupts */
extern void __init at91rm9200_init_interrupts(unsigned int priority[]);
@@ -28,7 +40,6 @@ extern void __init at91sam9rl_init_interrupts(unsigned int priority[]);
extern void __init at91sam9g45_init_interrupts(unsigned int priority[]);
extern void __init at91x40_init_interrupts(unsigned int priority[]);
extern void __init at91cap9_init_interrupts(unsigned int priority[]);
-extern void __init at572d940hf_init_interrupts(unsigned int priority[]);
extern void __init at91_aic_init(unsigned int priority[]);
/* Timer */
@@ -39,8 +50,19 @@ extern struct sys_timer at91x40_timer;
/* Clocks */
extern int __init at91_clock_init(unsigned long main_clock);
+/*
+ * function to specify the clock of the default console. As we do not
+ * use the device/driver bus, the dev_name is not intialize. So we need
+ * to link the clock to a specific con_id only "usart"
+ */
+extern void __init at91rm9200_set_console_clock(int id);
+extern void __init at91sam9260_set_console_clock(int id);
+extern void __init at91sam9261_set_console_clock(int id);
+extern void __init at91sam9263_set_console_clock(int id);
+extern void __init at91sam9rl_set_console_clock(int id);
+extern void __init at91sam9g45_set_console_clock(int id);
+extern void __init at91cap9_set_console_clock(int id);
struct device;
-extern void __init at91_clock_associate(const char *id, struct device *dev, const char *func);
/* Power Management */
extern void at91_irq_suspend(void);
diff --git a/arch/arm/mach-at91/include/mach/at572d940hf.h b/arch/arm/mach-at91/include/mach/at572d940hf.h
deleted file mode 100644
index be510cfc56be..000000000000
--- a/arch/arm/mach-at91/include/mach/at572d940hf.h
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- * include/mach/at572d940hf.h
- *
- * Antonio R. Costa <costa.antonior@gmail.com>
- * Copyright (C) 2008 Atmel
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
-
-#ifndef AT572D940HF_H
-#define AT572D940HF_H
-
-/*
- * Peripheral identifiers/interrupts.
- */
-#define AT91_ID_FIQ 0 /* Advanced Interrupt Controller (FIQ) */
-#define AT91_ID_SYS 1 /* System Peripherals */
-#define AT572D940HF_ID_PIOA 2 /* Parallel IO Controller A */
-#define AT572D940HF_ID_PIOB 3 /* Parallel IO Controller B */
-#define AT572D940HF_ID_PIOC 4 /* Parallel IO Controller C */
-#define AT572D940HF_ID_EMAC 5 /* MACB ethernet controller */
-#define AT572D940HF_ID_US0 6 /* USART 0 */
-#define AT572D940HF_ID_US1 7 /* USART 1 */
-#define AT572D940HF_ID_US2 8 /* USART 2 */
-#define AT572D940HF_ID_MCI 9 /* Multimedia Card Interface */
-#define AT572D940HF_ID_UDP 10 /* USB Device Port */
-#define AT572D940HF_ID_TWI0 11 /* Two-Wire Interface 0 */
-#define AT572D940HF_ID_SPI0 12 /* Serial Peripheral Interface 0 */
-#define AT572D940HF_ID_SPI1 13 /* Serial Peripheral Interface 1 */
-#define AT572D940HF_ID_SSC0 14 /* Serial Synchronous Controller 0 */
-#define AT572D940HF_ID_SSC1 15 /* Serial Synchronous Controller 1 */
-#define AT572D940HF_ID_SSC2 16 /* Serial Synchronous Controller 2 */
-#define AT572D940HF_ID_TC0 17 /* Timer Counter 0 */
-#define AT572D940HF_ID_TC1 18 /* Timer Counter 1 */
-#define AT572D940HF_ID_TC2 19 /* Timer Counter 2 */
-#define AT572D940HF_ID_UHP 20 /* USB Host port */
-#define AT572D940HF_ID_SSC3 21 /* Serial Synchronous Controller 3 */
-#define AT572D940HF_ID_TWI1 22 /* Two-Wire Interface 1 */
-#define AT572D940HF_ID_CAN0 23 /* CAN Controller 0 */
-#define AT572D940HF_ID_CAN1 24 /* CAN Controller 1 */
-#define AT572D940HF_ID_MHALT 25 /* mAgicV HALT line */
-#define AT572D940HF_ID_MSIRQ0 26 /* mAgicV SIRQ0 line */
-#define AT572D940HF_ID_MEXC 27 /* mAgicV exception line */
-#define AT572D940HF_ID_MEDMA 28 /* mAgicV end of DMA line */
-#define AT572D940HF_ID_IRQ0 29 /* External Interrupt Source (IRQ0) */
-#define AT572D940HF_ID_IRQ1 30 /* External Interrupt Source (IRQ1) */
-#define AT572D940HF_ID_IRQ2 31 /* External Interrupt Source (IRQ2) */
-
-
-/*
- * User Peripheral physical base addresses.
- */
-#define AT572D940HF_BASE_TCB 0xfffa0000
-#define AT572D940HF_BASE_TC0 0xfffa0000
-#define AT572D940HF_BASE_TC1 0xfffa0040
-#define AT572D940HF_BASE_TC2 0xfffa0080
-#define AT572D940HF_BASE_UDP 0xfffa4000
-#define AT572D940HF_BASE_MCI 0xfffa8000
-#define AT572D940HF_BASE_TWI0 0xfffac000
-#define AT572D940HF_BASE_US0 0xfffb0000
-#define AT572D940HF_BASE_US1 0xfffb4000
-#define AT572D940HF_BASE_US2 0xfffb8000
-#define AT572D940HF_BASE_SSC0 0xfffbc000
-#define AT572D940HF_BASE_SSC1 0xfffc0000
-#define AT572D940HF_BASE_SSC2 0xfffc4000
-#define AT572D940HF_BASE_SPI0 0xfffc8000
-#define AT572D940HF_BASE_SPI1 0xfffcc000
-#define AT572D940HF_BASE_SSC3 0xfffd0000
-#define AT572D940HF_BASE_TWI1 0xfffd4000
-#define AT572D940HF_BASE_EMAC 0xfffd8000
-#define AT572D940HF_BASE_CAN0 0xfffdc000
-#define AT572D940HF_BASE_CAN1 0xfffe0000
-#define AT91_BASE_SYS 0xffffea00
-
-
-/*
- * System Peripherals (offset from AT91_BASE_SYS)
- */
-#define AT91_SDRAMC0 (0xffffea00 - AT91_BASE_SYS)
-#define AT91_SMC (0xffffec00 - AT91_BASE_SYS)
-#define AT91_MATRIX (0xffffee00 - AT91_BASE_SYS)
-#define AT91_AIC (0xfffff000 - AT91_BASE_SYS)
-#define AT91_DBGU (0xfffff200 - AT91_BASE_SYS)
-#define AT91_PIOA (0xfffff400 - AT91_BASE_SYS)
-#define AT91_PIOB (0xfffff600 - AT91_BASE_SYS)
-#define AT91_PIOC (0xfffff800 - AT91_BASE_SYS)
-#define AT91_PMC (0xfffffc00 - AT91_BASE_SYS)
-#define AT91_RSTC (0xfffffd00 - AT91_BASE_SYS)
-#define AT91_RTT (0xfffffd20 - AT91_BASE_SYS)
-#define AT91_PIT (0xfffffd30 - AT91_BASE_SYS)
-#define AT91_WDT (0xfffffd40 - AT91_BASE_SYS)
-
-#define AT91_USART0 AT572D940HF_ID_US0
-#define AT91_USART1 AT572D940HF_ID_US1
-#define AT91_USART2 AT572D940HF_ID_US2
-
-
-/*
- * Internal Memory.
- */
-#define AT572D940HF_SRAM_BASE 0x00300000 /* Internal SRAM base address */
-#define AT572D940HF_SRAM_SIZE (48 * SZ_1K) /* Internal SRAM size (48Kb) */
-
-#define AT572D940HF_ROM_BASE 0x00400000 /* Internal ROM base address */
-#define AT572D940HF_ROM_SIZE SZ_32K /* Internal ROM size (32Kb) */
-
-#define AT572D940HF_UHP_BASE 0x00500000 /* USB Host controller */
-
-
-#endif
diff --git a/arch/arm/mach-at91/include/mach/at572d940hf_matrix.h b/arch/arm/mach-at91/include/mach/at572d940hf_matrix.h
deleted file mode 100644
index b6751df09488..000000000000
--- a/arch/arm/mach-at91/include/mach/at572d940hf_matrix.h
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- * include/mach//at572d940hf_matrix.h
- *
- * Antonio R. Costa <costa.antonior@gmail.com>
- * Copyright (C) 2008 Atmel
- *
- * Copyright (C) 2005 SAN People
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef AT572D940HF_MATRIX_H
-#define AT572D940HF_MATRIX_H
-
-#define AT91_MATRIX_MCFG0 (AT91_MATRIX + 0x00) /* Master Configuration Register 0 */
-#define AT91_MATRIX_MCFG1 (AT91_MATRIX + 0x04) /* Master Configuration Register 1 */
-#define AT91_MATRIX_MCFG2 (AT91_MATRIX + 0x08) /* Master Configuration Register 2 */
-#define AT91_MATRIX_MCFG3 (AT91_MATRIX + 0x0C) /* Master Configuration Register 3 */
-#define AT91_MATRIX_MCFG4 (AT91_MATRIX + 0x10) /* Master Configuration Register 4 */
-#define AT91_MATRIX_MCFG5 (AT91_MATRIX + 0x14) /* Master Configuration Register 5 */
-
-#define AT91_MATRIX_ULBT (7 << 0) /* Undefined Length Burst Type */
-#define AT91_MATRIX_ULBT_INFINITE (0 << 0)
-#define AT91_MATRIX_ULBT_SINGLE (1 << 0)
-#define AT91_MATRIX_ULBT_FOUR (2 << 0)
-#define AT91_MATRIX_ULBT_EIGHT (3 << 0)
-#define AT91_MATRIX_ULBT_SIXTEEN (4 << 0)
-
-#define AT91_MATRIX_SCFG0 (AT91_MATRIX + 0x40) /* Slave Configuration Register 0 */
-#define AT91_MATRIX_SCFG1 (AT91_MATRIX + 0x44) /* Slave Configuration Register 1 */
-#define AT91_MATRIX_SCFG2 (AT91_MATRIX + 0x48) /* Slave Configuration Register 2 */
-#define AT91_MATRIX_SCFG3 (AT91_MATRIX + 0x4C) /* Slave Configuration Register 3 */
-#define AT91_MATRIX_SCFG4 (AT91_MATRIX + 0x50) /* Slave Configuration Register 4 */
-#define AT91_MATRIX_SLOT_CYCLE (0xff << 0) /* Maximum Number of Allowed Cycles for a Burst */
-#define AT91_MATRIX_DEFMSTR_TYPE (3 << 16) /* Default Master Type */
-#define AT91_MATRIX_DEFMSTR_TYPE_NONE (0 << 16)
-#define AT91_MATRIX_DEFMSTR_TYPE_LAST (1 << 16)
-#define AT91_MATRIX_DEFMSTR_TYPE_FIXED (2 << 16)
-#define AT91_MATRIX_FIXED_DEFMSTR (0x7 << 18) /* Fixed Index of Default Master */
-#define AT91_MATRIX_ARBT (3 << 24) /* Arbitration Type */
-#define AT91_MATRIX_ARBT_ROUND_ROBIN (0 << 24)
-#define AT91_MATRIX_ARBT_FIXED_PRIORITY (1 << 24)
-
-#define AT91_MATRIX_PRAS0 (AT91_MATRIX + 0x80) /* Priority Register A for Slave 0 */
-#define AT91_MATRIX_PRAS1 (AT91_MATRIX + 0x88) /* Priority Register A for Slave 1 */
-#define AT91_MATRIX_PRAS2 (AT91_MATRIX + 0x90) /* Priority Register A for Slave 2 */
-#define AT91_MATRIX_PRAS3 (AT91_MATRIX + 0x98) /* Priority Register A for Slave 3 */
-#define AT91_MATRIX_PRAS4 (AT91_MATRIX + 0xA0) /* Priority Register A for Slave 4 */
-
-#define AT91_MATRIX_M0PR (3 << 0) /* Master 0 Priority */
-#define AT91_MATRIX_M1PR (3 << 4) /* Master 1 Priority */
-#define AT91_MATRIX_M2PR (3 << 8) /* Master 2 Priority */
-#define AT91_MATRIX_M3PR (3 << 12) /* Master 3 Priority */
-#define AT91_MATRIX_M4PR (3 << 16) /* Master 4 Priority */
-#define AT91_MATRIX_M5PR (3 << 20) /* Master 5 Priority */
-#define AT91_MATRIX_M6PR (3 << 24) /* Master 6 Priority */
-
-#define AT91_MATRIX_MRCR (AT91_MATRIX + 0x100) /* Master Remap Control Register */
-#define AT91_MATRIX_RCB0 (1 << 0) /* Remap Command for AHB Master 0 (ARM926EJ-S Instruction Master) */
-#define AT91_MATRIX_RCB1 (1 << 1) /* Remap Command for AHB Master 1 (ARM926EJ-S Data Master) */
-
-#define AT91_MATRIX_SFR0 (AT91_MATRIX + 0x110) /* Special Function Register 0 */
-#define AT91_MATRIX_SFR1 (AT91_MATRIX + 0x114) /* Special Function Register 1 */
-#define AT91_MATRIX_SFR2 (AT91_MATRIX + 0x118) /* Special Function Register 2 */
-#define AT91_MATRIX_SFR3 (AT91_MATRIX + 0x11C) /* Special Function Register 3 */
-#define AT91_MATRIX_SFR4 (AT91_MATRIX + 0x120) /* Special Function Register 4 */
-#define AT91_MATRIX_SFR5 (AT91_MATRIX + 0x124) /* Special Function Register 5 */
-#define AT91_MATRIX_SFR6 (AT91_MATRIX + 0x128) /* Special Function Register 6 */
-#define AT91_MATRIX_SFR7 (AT91_MATRIX + 0x12C) /* Special Function Register 7 */
-#define AT91_MATRIX_SFR8 (AT91_MATRIX + 0x130) /* Special Function Register 8 */
-#define AT91_MATRIX_SFR9 (AT91_MATRIX + 0x134) /* Special Function Register 9 */
-#define AT91_MATRIX_SFR10 (AT91_MATRIX + 0x138) /* Special Function Register 10 */
-#define AT91_MATRIX_SFR11 (AT91_MATRIX + 0x13C) /* Special Function Register 11 */
-#define AT91_MATRIX_SFR12 (AT91_MATRIX + 0x140) /* Special Function Register 12 */
-#define AT91_MATRIX_SFR13 (AT91_MATRIX + 0x144) /* Special Function Register 13 */
-#define AT91_MATRIX_SFR14 (AT91_MATRIX + 0x148) /* Special Function Register 14 */
-#define AT91_MATRIX_SFR15 (AT91_MATRIX + 0x14C) /* Special Function Register 15 */
-
-
-/*
- * The following registers / bits are not defined in the Datasheet (Revision A)
- */
-
-#define AT91_MATRIX_TCR (AT91_MATRIX + 0x100) /* TCM Configuration Register */
-#define AT91_MATRIX_ITCM_SIZE (0xf << 0) /* Size of ITCM enabled memory block */
-#define AT91_MATRIX_ITCM_0 (0 << 0)
-#define AT91_MATRIX_ITCM_16 (5 << 0)
-#define AT91_MATRIX_ITCM_32 (6 << 0)
-#define AT91_MATRIX_ITCM_64 (7 << 0)
-#define AT91_MATRIX_DTCM_SIZE (0xf << 4) /* Size of DTCM enabled memory block */
-#define AT91_MATRIX_DTCM_0 (0 << 4)
-#define AT91_MATRIX_DTCM_16 (5 << 4)
-#define AT91_MATRIX_DTCM_32 (6 << 4)
-#define AT91_MATRIX_DTCM_64 (7 << 4)
-
-#define AT91_MATRIX_EBICSA (AT91_MATRIX + 0x11C) /* EBI Chip Select Assignment Register */
-#define AT91_MATRIX_CS1A (1 << 1) /* Chip Select 1 Assignment */
-#define AT91_MATRIX_CS1A_SMC (0 << 1)
-#define AT91_MATRIX_CS1A_SDRAMC (1 << 1)
-#define AT91_MATRIX_CS3A (1 << 3) /* Chip Select 3 Assignment */
-#define AT91_MATRIX_CS3A_SMC (0 << 3)
-#define AT91_MATRIX_CS3A_SMC_SMARTMEDIA (1 << 3)
-#define AT91_MATRIX_CS4A (1 << 4) /* Chip Select 4 Assignment */
-#define AT91_MATRIX_CS4A_SMC (0 << 4)
-#define AT91_MATRIX_CS4A_SMC_CF1 (1 << 4)
-#define AT91_MATRIX_CS5A (1 << 5) /* Chip Select 5 Assignment */
-#define AT91_MATRIX_CS5A_SMC (0 << 5)
-#define AT91_MATRIX_CS5A_SMC_CF2 (1 << 5)
-#define AT91_MATRIX_DBPUC (1 << 8) /* Data Bus Pull-up Configuration */
-
-#endif
diff --git a/arch/arm/mach-at91/include/mach/at91cap9.h b/arch/arm/mach-at91/include/mach/at91cap9.h
index 9c6af9737485..665993849a7b 100644
--- a/arch/arm/mach-at91/include/mach/at91cap9.h
+++ b/arch/arm/mach-at91/include/mach/at91cap9.h
@@ -20,8 +20,6 @@
/*
* Peripheral identifiers/interrupts.
*/
-#define AT91_ID_FIQ 0 /* Advanced Interrupt Controller (FIQ) */
-#define AT91_ID_SYS 1 /* System Peripherals */
#define AT91CAP9_ID_PIOABCD 2 /* Parallel IO Controller A, B, C and D */
#define AT91CAP9_ID_MPB0 3 /* MP Block Peripheral 0 */
#define AT91CAP9_ID_MPB1 4 /* MP Block Peripheral 1 */
@@ -123,6 +121,4 @@
#define AT91CAP9_UDPHS_FIFO 0x00600000 /* USB High Speed Device Port */
#define AT91CAP9_UHP_BASE 0x00700000 /* USB Host controller */
-#define CONFIG_DRAM_BASE AT91_CHIPSELECT_6
-
#endif
diff --git a/arch/arm/mach-at91/include/mach/at91rm9200.h b/arch/arm/mach-at91/include/mach/at91rm9200.h
index 78983155a074..99e0f8d02d7b 100644
--- a/arch/arm/mach-at91/include/mach/at91rm9200.h
+++ b/arch/arm/mach-at91/include/mach/at91rm9200.h
@@ -19,8 +19,6 @@
/*
* Peripheral identifiers/interrupts.
*/
-#define AT91_ID_FIQ 0 /* Advanced Interrupt Controller (FIQ) */
-#define AT91_ID_SYS 1 /* System Peripheral */
#define AT91RM9200_ID_PIOA 2 /* Parallel IO Controller A */
#define AT91RM9200_ID_PIOB 3 /* Parallel IO Controller B */
#define AT91RM9200_ID_PIOC 4 /* Parallel IO Controller C */
diff --git a/arch/arm/mach-at91/include/mach/at91sam9260.h b/arch/arm/mach-at91/include/mach/at91sam9260.h
index 4e79036d3b80..8b6bf835cd73 100644
--- a/arch/arm/mach-at91/include/mach/at91sam9260.h
+++ b/arch/arm/mach-at91/include/mach/at91sam9260.h
@@ -20,8 +20,6 @@
/*
* Peripheral identifiers/interrupts.
*/
-#define AT91_ID_FIQ 0 /* Advanced Interrupt Controller (FIQ) */
-#define AT91_ID_SYS 1 /* System Peripherals */
#define AT91SAM9260_ID_PIOA 2 /* Parallel IO Controller A */
#define AT91SAM9260_ID_PIOB 3 /* Parallel IO Controller B */
#define AT91SAM9260_ID_PIOC 4 /* Parallel IO Controller C */
diff --git a/arch/arm/mach-at91/include/mach/at91sam9261.h b/arch/arm/mach-at91/include/mach/at91sam9261.h
index 2b5618518129..eafbddaf523c 100644
--- a/arch/arm/mach-at91/include/mach/at91sam9261.h
+++ b/arch/arm/mach-at91/include/mach/at91sam9261.h
@@ -18,8 +18,6 @@
/*
* Peripheral identifiers/interrupts.
*/
-#define AT91_ID_FIQ 0 /* Advanced Interrupt Controller (FIQ) */
-#define AT91_ID_SYS 1 /* System Peripherals */
#define AT91SAM9261_ID_PIOA 2 /* Parallel IO Controller A */
#define AT91SAM9261_ID_PIOB 3 /* Parallel IO Controller B */
#define AT91SAM9261_ID_PIOC 4 /* Parallel IO Controller C */
diff --git a/arch/arm/mach-at91/include/mach/at91sam9263.h b/arch/arm/mach-at91/include/mach/at91sam9263.h
index 2091f1e42d43..e2d348213a7b 100644
--- a/arch/arm/mach-at91/include/mach/at91sam9263.h
+++ b/arch/arm/mach-at91/include/mach/at91sam9263.h
@@ -18,8 +18,6 @@
/*
* Peripheral identifiers/interrupts.
*/
-#define AT91_ID_FIQ 0 /* Advanced Interrupt Controller (FIQ) */
-#define AT91_ID_SYS 1 /* System Peripherals */
#define AT91SAM9263_ID_PIOA 2 /* Parallel IO Controller A */
#define AT91SAM9263_ID_PIOB 3 /* Parallel IO Controller B */
#define AT91SAM9263_ID_PIOCDE 4 /* Parallel IO Controller C, D and E */
diff --git a/arch/arm/mach-at91/include/mach/at91sam9g45.h b/arch/arm/mach-at91/include/mach/at91sam9g45.h
index a526869aee37..659304aa73d9 100644
--- a/arch/arm/mach-at91/include/mach/at91sam9g45.h
+++ b/arch/arm/mach-at91/include/mach/at91sam9g45.h
@@ -18,8 +18,6 @@
/*
* Peripheral identifiers/interrupts.
*/
-#define AT91_ID_FIQ 0 /* Advanced Interrupt Controller (FIQ) */
-#define AT91_ID_SYS 1 /* System Controller Interrupt */
#define AT91SAM9G45_ID_PIOA 2 /* Parallel I/O Controller A */
#define AT91SAM9G45_ID_PIOB 3 /* Parallel I/O Controller B */
#define AT91SAM9G45_ID_PIOC 4 /* Parallel I/O Controller C */
@@ -131,8 +129,6 @@
#define AT91SAM9G45_EHCI_BASE 0x00800000 /* USB Host controller (EHCI) */
#define AT91SAM9G45_VDEC_BASE 0x00900000 /* Video Decoder Controller */
-#define CONFIG_DRAM_BASE AT91_CHIPSELECT_6
-
#define CONSISTENT_DMA_SIZE SZ_4M
/*
diff --git a/arch/arm/mach-at91/include/mach/at91sam9rl.h b/arch/arm/mach-at91/include/mach/at91sam9rl.h
index 87ba8517ad98..41dbbe61055c 100644
--- a/arch/arm/mach-at91/include/mach/at91sam9rl.h
+++ b/arch/arm/mach-at91/include/mach/at91sam9rl.h
@@ -17,8 +17,6 @@
/*
* Peripheral identifiers/interrupts.
*/
-#define AT91_ID_FIQ 0 /* Advanced Interrupt Controller (FIQ) */
-#define AT91_ID_SYS 1 /* System Controller */
#define AT91SAM9RL_ID_PIOA 2 /* Parallel IO Controller A */
#define AT91SAM9RL_ID_PIOB 3 /* Parallel IO Controller B */
#define AT91SAM9RL_ID_PIOC 4 /* Parallel IO Controller C */
diff --git a/arch/arm/mach-at91/include/mach/at91x40.h b/arch/arm/mach-at91/include/mach/at91x40.h
index 063ac44a0204..a152ff87e688 100644
--- a/arch/arm/mach-at91/include/mach/at91x40.h
+++ b/arch/arm/mach-at91/include/mach/at91x40.h
@@ -15,8 +15,6 @@
/*
* IRQ list.
*/
-#define AT91_ID_FIQ 0 /* FIQ */
-#define AT91_ID_SYS 1 /* System Peripheral */
#define AT91X40_ID_USART0 2 /* USART port 0 */
#define AT91X40_ID_USART1 3 /* USART port 1 */
#define AT91X40_ID_TC0 4 /* Timer/Counter 0 */
diff --git a/arch/arm/mach-at91/include/mach/board.h b/arch/arm/mach-at91/include/mach/board.h
index 2b499eb343a1..ed544a0d5a1d 100644
--- a/arch/arm/mach-at91/include/mach/board.h
+++ b/arch/arm/mach-at91/include/mach/board.h
@@ -90,7 +90,7 @@ struct at91_eth_data {
extern void __init at91_add_device_eth(struct at91_eth_data *data);
#if defined(CONFIG_ARCH_AT91SAM9260) || defined(CONFIG_ARCH_AT91SAM9263) || defined(CONFIG_ARCH_AT91SAM9G20) || defined(CONFIG_ARCH_AT91CAP9) \
- || defined(CONFIG_ARCH_AT91SAM9G45) || defined(CONFIG_ARCH_AT572D940HF)
+ || defined(CONFIG_ARCH_AT91SAM9G45)
#define eth_platform_data at91_eth_data
#endif
@@ -140,6 +140,7 @@ extern void __init at91_set_serial_console(unsigned portnr);
extern struct platform_device *atmel_default_console_device;
struct atmel_uart_data {
+ int num; /* port num */
short use_dma_tx; /* use transmit DMA? */
short use_dma_rx; /* use receive DMA? */
void __iomem *regs; /* virt. base address, if any */
@@ -203,9 +204,6 @@ extern void __init at91_init_leds(u8 cpu_led, u8 timer_led);
extern void __init at91_gpio_leds(struct gpio_led *leds, int nr);
extern void __init at91_pwm_leds(struct gpio_led *leds, int nr);
- /* AT572D940HF DSP */
-extern void __init at91_add_device_mAgic(void);
-
/* FIXME: this needs a better location, but gets stuff building again */
extern int at91_suspend_entering_slow_clock(void);
diff --git a/arch/arm/mach-at91/include/mach/clkdev.h b/arch/arm/mach-at91/include/mach/clkdev.h
new file mode 100644
index 000000000000..04b37a89801c
--- /dev/null
+++ b/arch/arm/mach-at91/include/mach/clkdev.h
@@ -0,0 +1,7 @@
+#ifndef __ASM_MACH_CLKDEV_H
+#define __ASM_MACH_CLKDEV_H
+
+#define __clk_get(clk) ({ 1; })
+#define __clk_put(clk) do { } while (0)
+
+#endif
diff --git a/arch/arm/mach-at91/include/mach/cpu.h b/arch/arm/mach-at91/include/mach/cpu.h
index 0700f2125305..df966c2bc2d4 100644
--- a/arch/arm/mach-at91/include/mach/cpu.h
+++ b/arch/arm/mach-at91/include/mach/cpu.h
@@ -34,8 +34,6 @@
#define ARCH_ID_AT91SAM9XE256 0x329a93a0
#define ARCH_ID_AT91SAM9XE512 0x329aa3a0
-#define ARCH_ID_AT572D940HF 0x0e0303e0
-
#define ARCH_ID_AT91M40800 0x14080044
#define ARCH_ID_AT91R40807 0x44080746
#define ARCH_ID_AT91M40807 0x14080745
@@ -90,9 +88,16 @@ static inline unsigned long at91cap9_rev_identify(void)
#endif
#ifdef CONFIG_ARCH_AT91RM9200
+extern int rm9200_type;
+#define ARCH_REVISON_9200_BGA (0 << 0)
+#define ARCH_REVISON_9200_PQFP (1 << 0)
#define cpu_is_at91rm9200() (at91_cpu_identify() == ARCH_ID_AT91RM9200)
+#define cpu_is_at91rm9200_bga() (!cpu_is_at91rm9200_pqfp())
+#define cpu_is_at91rm9200_pqfp() (cpu_is_at91rm9200() && rm9200_type & ARCH_REVISON_9200_PQFP)
#else
#define cpu_is_at91rm9200() (0)
+#define cpu_is_at91rm9200_bga() (0)
+#define cpu_is_at91rm9200_pqfp() (0)
#endif
#ifdef CONFIG_ARCH_AT91SAM9260
@@ -181,12 +186,6 @@ static inline unsigned long at91cap9_rev_identify(void)
#define cpu_is_at91cap9_revC() (0)
#endif
-#ifdef CONFIG_ARCH_AT572D940HF
-#define cpu_is_at572d940hf() (at91_cpu_identify() == ARCH_ID_AT572D940HF)
-#else
-#define cpu_is_at572d940hf() (0)
-#endif
-
/*
* Since this is ARM, we will never run on any AVR32 CPU. But these
* definitions may reduce clutter in common drivers.
diff --git a/arch/arm/mach-at91/include/mach/hardware.h b/arch/arm/mach-at91/include/mach/hardware.h
index 3d64a75e3ed5..1008b9fb5074 100644
--- a/arch/arm/mach-at91/include/mach/hardware.h
+++ b/arch/arm/mach-at91/include/mach/hardware.h
@@ -32,13 +32,17 @@
#include <mach/at91cap9.h>
#elif defined(CONFIG_ARCH_AT91X40)
#include <mach/at91x40.h>
-#elif defined(CONFIG_ARCH_AT572D940HF)
-#include <mach/at572d940hf.h>
#else
#error "Unsupported AT91 processor"
#endif
+/*
+ * Peripheral identifiers/interrupts.
+ */
+#define AT91_ID_FIQ 0 /* Advanced Interrupt Controller (FIQ) */
+#define AT91_ID_SYS 1 /* System Peripherals */
+
#ifdef CONFIG_MMU
/*
* Remap the peripherals from address 0xFFF78000 .. 0xFFFFFFFF
@@ -82,13 +86,6 @@
#define AT91_CHIPSELECT_6 0x70000000
#define AT91_CHIPSELECT_7 0x80000000
-/* SDRAM */
-#ifdef CONFIG_DRAM_BASE
-#define AT91_SDRAM_BASE CONFIG_DRAM_BASE
-#else
-#define AT91_SDRAM_BASE AT91_CHIPSELECT_1
-#endif
-
/* Clocks */
#define AT91_SLOW_CLOCK 32768 /* slow clock */
diff --git a/arch/arm/mach-at91/include/mach/memory.h b/arch/arm/mach-at91/include/mach/memory.h
index c2cfe5040642..401c207f2f39 100644
--- a/arch/arm/mach-at91/include/mach/memory.h
+++ b/arch/arm/mach-at91/include/mach/memory.h
@@ -23,6 +23,4 @@
#include <mach/hardware.h>
-#define PLAT_PHYS_OFFSET (AT91_SDRAM_BASE)
-
#endif
diff --git a/arch/arm/mach-at91/include/mach/stamp9g20.h b/arch/arm/mach-at91/include/mach/stamp9g20.h
index 6120f9c46d59..f62c0abca4b4 100644
--- a/arch/arm/mach-at91/include/mach/stamp9g20.h
+++ b/arch/arm/mach-at91/include/mach/stamp9g20.h
@@ -1,7 +1,7 @@
#ifndef __MACH_STAMP9G20_H
#define __MACH_STAMP9G20_H
-void stamp9g20_map_io(void);
+void stamp9g20_init_early(void);
void stamp9g20_board_init(void);
#endif
diff --git a/arch/arm/mach-at91/include/mach/system_rev.h b/arch/arm/mach-at91/include/mach/system_rev.h
new file mode 100644
index 000000000000..b855ee75f72c
--- /dev/null
+++ b/arch/arm/mach-at91/include/mach/system_rev.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2011 Jean-Christophe PLAGNIOL-VILLARD <plagnioj@jcrosoft.com>
+ *
+ * Under GPLv2 only
+ */
+
+#ifndef __ARCH_SYSTEM_REV_H__
+#define __ARCH_SYSTEM_REV_H__
+
+/*
+ * board revision encoding
+ * mach specific
+ * the 16-31 bit are reserved for at91 generic information
+ *
+ * bit 31:
+ * 0 => nand 16 bit
+ * 1 => nand 8 bit
+ */
+#define BOARD_HAVE_NAND_8BIT (1 << 31)
+static int inline board_have_nand_8bit(void)
+{
+ return system_rev & BOARD_HAVE_NAND_8BIT;
+}
+
+#endif /* __ARCH_SYSTEM_REV_H__ */
diff --git a/arch/arm/mach-at91/include/mach/timex.h b/arch/arm/mach-at91/include/mach/timex.h
index 05a6e8af80c4..31ac2d97f14c 100644
--- a/arch/arm/mach-at91/include/mach/timex.h
+++ b/arch/arm/mach-at91/include/mach/timex.h
@@ -82,11 +82,6 @@
#define AT91X40_MASTER_CLOCK 40000000
#define CLOCK_TICK_RATE (AT91X40_MASTER_CLOCK)
-#elif defined(CONFIG_ARCH_AT572D940HF)
-
-#define AT572D940HF_MASTER_CLOCK 80000000
-#define CLOCK_TICK_RATE (AT572D940HF_MASTER_CLOCK/16)
-
#endif
#endif
diff --git a/arch/arm/mach-davinci/da850.c b/arch/arm/mach-davinci/da850.c
index b95b9196deed..133aac405853 100644
--- a/arch/arm/mach-davinci/da850.c
+++ b/arch/arm/mach-davinci/da850.c
@@ -1055,7 +1055,7 @@ int da850_register_pm(struct platform_device *pdev)
if (!pdata->cpupll_reg_base)
return -ENOMEM;
- pdata->ddrpll_reg_base = ioremap(DA8XX_PLL1_BASE, SZ_4K);
+ pdata->ddrpll_reg_base = ioremap(DA850_PLL1_BASE, SZ_4K);
if (!pdata->ddrpll_reg_base) {
ret = -ENOMEM;
goto no_ddrpll_mem;
diff --git a/arch/arm/mach-davinci/devices-da8xx.c b/arch/arm/mach-davinci/devices-da8xx.c
index 58a02dc7b15a..4e66881c7aee 100644
--- a/arch/arm/mach-davinci/devices-da8xx.c
+++ b/arch/arm/mach-davinci/devices-da8xx.c
@@ -24,23 +24,25 @@
#include "clock.h"
#define DA8XX_TPCC_BASE 0x01c00000
-#define DA850_MMCSD1_BASE 0x01e1b000
-#define DA850_TPCC1_BASE 0x01e30000
#define DA8XX_TPTC0_BASE 0x01c08000
#define DA8XX_TPTC1_BASE 0x01c08400
-#define DA850_TPTC2_BASE 0x01e38000
#define DA8XX_WDOG_BASE 0x01c21000 /* DA8XX_TIMER64P1_BASE */
#define DA8XX_I2C0_BASE 0x01c22000
-#define DA8XX_RTC_BASE 0x01C23000
+#define DA8XX_RTC_BASE 0x01c23000
+#define DA8XX_MMCSD0_BASE 0x01c40000
+#define DA8XX_SPI0_BASE 0x01c41000
+#define DA830_SPI1_BASE 0x01e12000
+#define DA8XX_LCD_CNTRL_BASE 0x01e13000
+#define DA850_MMCSD1_BASE 0x01e1b000
#define DA8XX_EMAC_CPPI_PORT_BASE 0x01e20000
#define DA8XX_EMAC_CPGMACSS_BASE 0x01e22000
#define DA8XX_EMAC_CPGMAC_BASE 0x01e23000
#define DA8XX_EMAC_MDIO_BASE 0x01e24000
-#define DA8XX_GPIO_BASE 0x01e26000
#define DA8XX_I2C1_BASE 0x01e28000
-#define DA8XX_SPI0_BASE 0x01c41000
-#define DA830_SPI1_BASE 0x01e12000
+#define DA850_TPCC1_BASE 0x01e30000
+#define DA850_TPTC2_BASE 0x01e38000
#define DA850_SPI1_BASE 0x01f0e000
+#define DA8XX_DDR2_CTL_BASE 0xb0000000
#define DA8XX_EMAC_CTRL_REG_OFFSET 0x3000
#define DA8XX_EMAC_MOD_REG_OFFSET 0x2000
diff --git a/arch/arm/mach-davinci/devices.c b/arch/arm/mach-davinci/devices.c
index 22ebc64bc9d9..8f4f736aa267 100644
--- a/arch/arm/mach-davinci/devices.c
+++ b/arch/arm/mach-davinci/devices.c
@@ -33,6 +33,9 @@
#define DM365_MMCSD0_BASE 0x01D11000
#define DM365_MMCSD1_BASE 0x01D00000
+/* System control register offsets */
+#define DM64XX_VDD3P3V_PWDN 0x48
+
static struct resource i2c_resources[] = {
{
.start = DAVINCI_I2C_BASE,
diff --git a/arch/arm/mach-davinci/include/mach/da8xx.h b/arch/arm/mach-davinci/include/mach/da8xx.h
index e4fc1af8500e..ad64da713fc8 100644
--- a/arch/arm/mach-davinci/include/mach/da8xx.h
+++ b/arch/arm/mach-davinci/include/mach/da8xx.h
@@ -64,13 +64,9 @@ extern unsigned int da850_max_speed;
#define DA8XX_TIMER64P1_BASE 0x01c21000
#define DA8XX_GPIO_BASE 0x01e26000
#define DA8XX_PSC1_BASE 0x01e27000
-#define DA8XX_LCD_CNTRL_BASE 0x01e13000
-#define DA8XX_PLL1_BASE 0x01e1a000
-#define DA8XX_MMCSD0_BASE 0x01c40000
#define DA8XX_AEMIF_CS2_BASE 0x60000000
#define DA8XX_AEMIF_CS3_BASE 0x62000000
#define DA8XX_AEMIF_CTL_BASE 0x68000000
-#define DA8XX_DDR2_CTL_BASE 0xb0000000
#define DA8XX_ARM_RAM_BASE 0xffff0000
void __init da830_init(void);
diff --git a/arch/arm/mach-davinci/include/mach/hardware.h b/arch/arm/mach-davinci/include/mach/hardware.h
index c45ba1f62a11..414e0b93e741 100644
--- a/arch/arm/mach-davinci/include/mach/hardware.h
+++ b/arch/arm/mach-davinci/include/mach/hardware.h
@@ -21,9 +21,6 @@
*/
#define DAVINCI_SYSTEM_MODULE_BASE 0x01C40000
-/* System control register offsets */
-#define DM64XX_VDD3P3V_PWDN 0x48
-
/*
* I/O mapping
*/
diff --git a/arch/arm/mach-exynos4/Kconfig b/arch/arm/mach-exynos4/Kconfig
index 805196207ce8..b92c1e557145 100644
--- a/arch/arm/mach-exynos4/Kconfig
+++ b/arch/arm/mach-exynos4/Kconfig
@@ -169,9 +169,11 @@ config MACH_NURI
select S3C_DEV_HSMMC2
select S3C_DEV_HSMMC3
select S3C_DEV_I2C1
+ select S3C_DEV_I2C3
select S3C_DEV_I2C5
select S5P_DEV_USB_EHCI
select EXYNOS4_SETUP_I2C1
+ select EXYNOS4_SETUP_I2C3
select EXYNOS4_SETUP_I2C5
select EXYNOS4_SETUP_SDHCI
select SAMSUNG_DEV_PWM
diff --git a/arch/arm/mach-exynos4/Makefile b/arch/arm/mach-exynos4/Makefile
index 777897551e42..a9bb94fabaa7 100644
--- a/arch/arm/mach-exynos4/Makefile
+++ b/arch/arm/mach-exynos4/Makefile
@@ -13,9 +13,10 @@ obj- :=
# Core support for EXYNOS4 system
obj-$(CONFIG_CPU_EXYNOS4210) += cpu.o init.o clock.o irq-combiner.o
-obj-$(CONFIG_CPU_EXYNOS4210) += setup-i2c0.o gpiolib.o irq-eint.o dma.o
+obj-$(CONFIG_CPU_EXYNOS4210) += setup-i2c0.o irq-eint.o dma.o
obj-$(CONFIG_PM) += pm.o sleep.o
obj-$(CONFIG_CPU_FREQ) += cpufreq.o
+obj-$(CONFIG_CPU_IDLE) += cpuidle.o
obj-$(CONFIG_SMP) += platsmp.o headsmp.o
diff --git a/arch/arm/mach-exynos4/cpuidle.c b/arch/arm/mach-exynos4/cpuidle.c
new file mode 100644
index 000000000000..bf7e96f2793a
--- /dev/null
+++ b/arch/arm/mach-exynos4/cpuidle.c
@@ -0,0 +1,86 @@
+/* linux/arch/arm/mach-exynos4/cpuidle.c
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/cpuidle.h>
+#include <linux/io.h>
+
+#include <asm/proc-fns.h>
+
+static int exynos4_enter_idle(struct cpuidle_device *dev,
+ struct cpuidle_state *state);
+
+static struct cpuidle_state exynos4_cpuidle_set[] = {
+ [0] = {
+ .enter = exynos4_enter_idle,
+ .exit_latency = 1,
+ .target_residency = 100000,
+ .flags = CPUIDLE_FLAG_TIME_VALID,
+ .name = "IDLE",
+ .desc = "ARM clock gating(WFI)",
+ },
+};
+
+static DEFINE_PER_CPU(struct cpuidle_device, exynos4_cpuidle_device);
+
+static struct cpuidle_driver exynos4_idle_driver = {
+ .name = "exynos4_idle",
+ .owner = THIS_MODULE,
+};
+
+static int exynos4_enter_idle(struct cpuidle_device *dev,
+ struct cpuidle_state *state)
+{
+ struct timeval before, after;
+ int idle_time;
+
+ local_irq_disable();
+ do_gettimeofday(&before);
+
+ cpu_do_idle();
+
+ do_gettimeofday(&after);
+ local_irq_enable();
+ idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC +
+ (after.tv_usec - before.tv_usec);
+
+ return idle_time;
+}
+
+static int __init exynos4_init_cpuidle(void)
+{
+ int i, max_cpuidle_state, cpu_id;
+ struct cpuidle_device *device;
+
+ cpuidle_register_driver(&exynos4_idle_driver);
+
+ for_each_cpu(cpu_id, cpu_online_mask) {
+ device = &per_cpu(exynos4_cpuidle_device, cpu_id);
+ device->cpu = cpu_id;
+
+ device->state_count = (sizeof(exynos4_cpuidle_set) /
+ sizeof(struct cpuidle_state));
+
+ max_cpuidle_state = device->state_count;
+
+ for (i = 0; i < max_cpuidle_state; i++) {
+ memcpy(&device->states[i], &exynos4_cpuidle_set[i],
+ sizeof(struct cpuidle_state));
+ }
+
+ if (cpuidle_register_device(device)) {
+ printk(KERN_ERR "CPUidle register device failed\n,");
+ return -EIO;
+ }
+ }
+ return 0;
+}
+device_initcall(exynos4_init_cpuidle);
diff --git a/arch/arm/mach-exynos4/mach-nuri.c b/arch/arm/mach-exynos4/mach-nuri.c
index bb5d12f43af8..642702bb5b12 100644
--- a/arch/arm/mach-exynos4/mach-nuri.c
+++ b/arch/arm/mach-exynos4/mach-nuri.c
@@ -12,6 +12,7 @@
#include <linux/serial_core.h>
#include <linux/input.h>
#include <linux/i2c.h>
+#include <linux/i2c/atmel_mxt_ts.h>
#include <linux/gpio_keys.h>
#include <linux/gpio.h>
#include <linux/regulator/machine.h>
@@ -32,6 +33,8 @@
#include <plat/sdhci.h>
#include <plat/ehci.h>
#include <plat/clock.h>
+#include <plat/gpio-cfg.h>
+#include <plat/iic.h>
#include <mach/map.h>
@@ -259,6 +262,88 @@ static struct i2c_board_info i2c1_devs[] __initdata = {
/* Gyro, To be updated */
};
+/* TSP */
+static u8 mxt_init_vals[] = {
+ /* MXT_GEN_COMMAND(6) */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* MXT_GEN_POWER(7) */
+ 0x20, 0xff, 0x32,
+ /* MXT_GEN_ACQUIRE(8) */
+ 0x0a, 0x00, 0x05, 0x00, 0x00, 0x00, 0x09, 0x23,
+ /* MXT_TOUCH_MULTI(9) */
+ 0x00, 0x00, 0x00, 0x13, 0x0b, 0x00, 0x00, 0x00, 0x02, 0x00,
+ 0x00, 0x01, 0x01, 0x0e, 0x0a, 0x0a, 0x0a, 0x0a, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00,
+ /* MXT_TOUCH_KEYARRAY(15) */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00,
+ 0x00,
+ /* MXT_SPT_GPIOPWM(19) */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* MXT_PROCI_GRIPFACE(20) */
+ 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, 0x28, 0x04,
+ 0x0f, 0x0a,
+ /* MXT_PROCG_NOISE(22) */
+ 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x23, 0x00,
+ 0x00, 0x05, 0x0f, 0x19, 0x23, 0x2d, 0x03,
+ /* MXT_TOUCH_PROXIMITY(23) */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* MXT_PROCI_ONETOUCH(24) */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* MXT_SPT_SELFTEST(25) */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ /* MXT_PROCI_TWOTOUCH(27) */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* MXT_SPT_CTECONFIG(28) */
+ 0x00, 0x00, 0x02, 0x08, 0x10, 0x00,
+};
+
+static struct mxt_platform_data mxt_platform_data = {
+ .config = mxt_init_vals,
+ .config_length = ARRAY_SIZE(mxt_init_vals),
+
+ .x_line = 18,
+ .y_line = 11,
+ .x_size = 1024,
+ .y_size = 600,
+ .blen = 0x1,
+ .threshold = 0x28,
+ .voltage = 2800000, /* 2.8V */
+ .orient = MXT_DIAGONAL_COUNTER,
+ .irqflags = IRQF_TRIGGER_FALLING,
+};
+
+static struct s3c2410_platform_i2c i2c3_data __initdata = {
+ .flags = 0,
+ .bus_num = 3,
+ .slave_addr = 0x10,
+ .frequency = 400 * 1000,
+ .sda_delay = 100,
+};
+
+static struct i2c_board_info i2c3_devs[] __initdata = {
+ {
+ I2C_BOARD_INFO("atmel_mxt_ts", 0x4a),
+ .platform_data = &mxt_platform_data,
+ .irq = IRQ_EINT(4),
+ },
+};
+
+static void __init nuri_tsp_init(void)
+{
+ int gpio;
+
+ /* TOUCH_INT: XEINT_4 */
+ gpio = EXYNOS4_GPX0(4);
+ gpio_request(gpio, "TOUCH_INT");
+ s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(0xf));
+ s3c_gpio_setpull(gpio, S3C_GPIO_PULL_UP);
+}
+
/* GPIO I2C 5 (PMIC) */
static struct i2c_board_info i2c5_devs[] __initdata = {
/* max8997, To be updated */
@@ -283,6 +368,7 @@ static struct platform_device *nuri_devices[] __initdata = {
&s3c_device_wdt,
&s3c_device_timer[0],
&s5p_device_ehci,
+ &s3c_device_i2c3,
/* NURI Devices */
&nuri_gpio_keys,
@@ -300,8 +386,11 @@ static void __init nuri_map_io(void)
static void __init nuri_machine_init(void)
{
nuri_sdhci_init();
+ nuri_tsp_init();
i2c_register_board_info(1, i2c1_devs, ARRAY_SIZE(i2c1_devs));
+ s3c_i2c3_set_platdata(&i2c3_data);
+ i2c_register_board_info(3, i2c3_devs, ARRAY_SIZE(i2c3_devs));
i2c_register_board_info(5, i2c5_devs, ARRAY_SIZE(i2c5_devs));
nuri_ehci_init();
diff --git a/arch/arm/mach-gemini/board-wbd111.c b/arch/arm/mach-gemini/board-wbd111.c
index af7b68a6b258..88cc422ee444 100644
--- a/arch/arm/mach-gemini/board-wbd111.c
+++ b/arch/arm/mach-gemini/board-wbd111.c
@@ -84,7 +84,6 @@ static struct sys_timer wbd111_timer = {
.init = gemini_timer_init,
};
-#ifdef CONFIG_MTD_PARTITIONS
static struct mtd_partition wbd111_partitions[] = {
{
.name = "RedBoot",
@@ -116,11 +115,7 @@ static struct mtd_partition wbd111_partitions[] = {
.mask_flags = MTD_WRITEABLE,
}
};
-#define wbd111_num_partitions ARRAY_SIZE(wbd111_partitions)
-#else
-#define wbd111_partitions NULL
-#define wbd111_num_partitions 0
-#endif /* CONFIG_MTD_PARTITIONS */
+#define wbd111_num_partitions ARRAY_SIZE(wbd111_partitions)
static void __init wbd111_init(void)
{
diff --git a/arch/arm/mach-gemini/board-wbd222.c b/arch/arm/mach-gemini/board-wbd222.c
index 99e5bbecf923..3a220347bc88 100644
--- a/arch/arm/mach-gemini/board-wbd222.c
+++ b/arch/arm/mach-gemini/board-wbd222.c
@@ -84,7 +84,6 @@ static struct sys_timer wbd222_timer = {
.init = gemini_timer_init,
};
-#ifdef CONFIG_MTD_PARTITIONS
static struct mtd_partition wbd222_partitions[] = {
{
.name = "RedBoot",
@@ -116,11 +115,7 @@ static struct mtd_partition wbd222_partitions[] = {
.mask_flags = MTD_WRITEABLE,
}
};
-#define wbd222_num_partitions ARRAY_SIZE(wbd222_partitions)
-#else
-#define wbd222_partitions NULL
-#define wbd222_num_partitions 0
-#endif /* CONFIG_MTD_PARTITIONS */
+#define wbd222_num_partitions ARRAY_SIZE(wbd222_partitions)
static void __init wbd222_init(void)
{
diff --git a/arch/arm/mach-ixp4xx/ixdp425-setup.c b/arch/arm/mach-ixp4xx/ixdp425-setup.c
index 140783386785..dca4f7f9f4f7 100644
--- a/arch/arm/mach-ixp4xx/ixdp425-setup.c
+++ b/arch/arm/mach-ixp4xx/ixdp425-setup.c
@@ -60,7 +60,6 @@ static struct platform_device ixdp425_flash = {
#if defined(CONFIG_MTD_NAND_PLATFORM) || \
defined(CONFIG_MTD_NAND_PLATFORM_MODULE)
-#ifdef CONFIG_MTD_PARTITIONS
const char *part_probes[] = { "cmdlinepart", NULL };
static struct mtd_partition ixdp425_partitions[] = {
@@ -74,7 +73,6 @@ static struct mtd_partition ixdp425_partitions[] = {
.size = MTDPART_SIZ_FULL
},
};
-#endif
static void
ixdp425_flash_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
@@ -103,11 +101,9 @@ static struct platform_nand_data ixdp425_flash_nand_data = {
.nr_chips = 1,
.chip_delay = 30,
.options = NAND_NO_AUTOINCR,
-#ifdef CONFIG_MTD_PARTITIONS
.part_probe_types = part_probes,
.partitions = ixdp425_partitions,
.nr_partitions = ARRAY_SIZE(ixdp425_partitions),
-#endif
},
.ctrl = {
.cmd_ctrl = ixdp425_flash_nand_cmd_ctrl
diff --git a/arch/arm/mach-netx/fb.c b/arch/arm/mach-netx/fb.c
index 5b84bcd30271..b9913234bbf6 100644
--- a/arch/arm/mach-netx/fb.c
+++ b/arch/arm/mach-netx/fb.c
@@ -103,7 +103,6 @@ static struct amba_device fb_device = {
.flags = IORESOURCE_MEM,
},
.irq = { NETX_IRQ_LCD, NO_IRQ },
- .periphid = 0x10112400,
};
int netx_fb_init(struct clcd_board *board, struct clcd_panel *panel)
diff --git a/arch/arm/mach-nomadik/Kconfig b/arch/arm/mach-nomadik/Kconfig
index 71f3ea623974..3c5e0f522e9c 100644
--- a/arch/arm/mach-nomadik/Kconfig
+++ b/arch/arm/mach-nomadik/Kconfig
@@ -6,7 +6,6 @@ config MACH_NOMADIK_8815NHK
bool "ST 8815 Nomadik Hardware Kit (evaluation board)"
select NOMADIK_8815
select HAS_MTU
- select NOMADIK_GPIO
endmenu
diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c
index 2b00f72e8e36..f6247e71a194 100644
--- a/arch/arm/mach-omap2/board-rx51-peripherals.c
+++ b/arch/arm/mach-omap2/board-rx51-peripherals.c
@@ -23,6 +23,7 @@
#include <linux/gpio.h>
#include <linux/gpio_keys.h>
#include <linux/mmc/host.h>
+#include <linux/power/isp1704_charger.h>
#include <plat/mcspi.h>
#include <plat/board.h>
@@ -53,6 +54,8 @@
#define RX51_FMTX_RESET_GPIO 163
#define RX51_FMTX_IRQ 53
+#define RX51_USB_TRANSCEIVER_RST_GPIO 67
+
/* list all spi devices here */
enum {
RX51_SPI_WL1251,
@@ -111,10 +114,30 @@ static struct spi_board_info rx51_peripherals_spi_board_info[] __initdata = {
},
};
+static void rx51_charger_set_power(bool on)
+{
+ gpio_set_value(RX51_USB_TRANSCEIVER_RST_GPIO, on);
+}
+
+static struct isp1704_charger_data rx51_charger_data = {
+ .set_power = rx51_charger_set_power,
+};
+
static struct platform_device rx51_charger_device = {
- .name = "isp1704_charger",
+ .name = "isp1704_charger",
+ .dev = {
+ .platform_data = &rx51_charger_data,
+ },
};
+static void __init rx51_charger_init(void)
+{
+ WARN_ON(gpio_request_one(RX51_USB_TRANSCEIVER_RST_GPIO,
+ GPIOF_OUT_INIT_LOW, "isp1704_reset"));
+
+ platform_device_register(&rx51_charger_device);
+}
+
#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
#define RX51_GPIO_CAMERA_LENS_COVER 110
@@ -961,6 +984,6 @@ void __init rx51_peripherals_init(void)
if (partition)
omap2_hsmmc_init(mmc);
- platform_device_register(&rx51_charger_device);
+ rx51_charger_init();
}
diff --git a/arch/arm/mach-pxa/Kconfig b/arch/arm/mach-pxa/Kconfig
index 2fc9f94cdd29..cd19309fd3b8 100644
--- a/arch/arm/mach-pxa/Kconfig
+++ b/arch/arm/mach-pxa/Kconfig
@@ -153,7 +153,6 @@ config MACH_XCEP
bool "Iskratel Electronics XCEP"
select PXA25x
select MTD
- select MTD_PARTITIONS
select MTD_PHYSMAP
select MTD_CFI_INTELEXT
select MTD_CFI
diff --git a/arch/arm/mach-s3c2410/mach-amlm5900.c b/arch/arm/mach-s3c2410/mach-amlm5900.c
index 44440cbd7620..dabc141243f3 100644
--- a/arch/arm/mach-s3c2410/mach-amlm5900.c
+++ b/arch/arm/mach-s3c2410/mach-amlm5900.c
@@ -58,8 +58,6 @@
#include <plat/cpu.h>
#include <plat/gpio-cfg.h>
-#ifdef CONFIG_MTD_PARTITIONS
-
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/map.h>
@@ -113,7 +111,6 @@ static struct platform_device amlm5900_device_nor = {
.num_resources = 1,
.resource = &amlm5900_nor_resource,
};
-#endif
static struct map_desc amlm5900_iodesc[] __initdata = {
};
@@ -158,9 +155,7 @@ static struct platform_device *amlm5900_devices[] __initdata = {
&s3c_device_rtc,
&s3c_device_usbgadget,
&s3c_device_sdi,
-#ifdef CONFIG_MTD_PARTITIONS
&amlm5900_device_nor,
-#endif
};
static void __init amlm5900_map_io(void)
diff --git a/arch/arm/mach-s3c2410/mach-tct_hammer.c b/arch/arm/mach-s3c2410/mach-tct_hammer.c
index a15d0621c22f..43c2b831b9e8 100644
--- a/arch/arm/mach-s3c2410/mach-tct_hammer.c
+++ b/arch/arm/mach-s3c2410/mach-tct_hammer.c
@@ -49,8 +49,6 @@
#include <plat/devs.h>
#include <plat/cpu.h>
-#ifdef CONFIG_MTD_PARTITIONS
-
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/map.h>
@@ -91,8 +89,6 @@ static struct platform_device tct_hammer_device_nor = {
.resource = &tct_hammer_nor_resource,
};
-#endif
-
static struct map_desc tct_hammer_iodesc[] __initdata = {
};
@@ -133,9 +129,7 @@ static struct platform_device *tct_hammer_devices[] __initdata = {
&s3c_device_rtc,
&s3c_device_usbgadget,
&s3c_device_sdi,
-#ifdef CONFIG_MTD_PARTITIONS
&tct_hammer_device_nor,
-#endif
};
static void __init tct_hammer_map_io(void)
diff --git a/arch/arm/mach-s3c64xx/dev-spi.c b/arch/arm/mach-s3c64xx/dev-spi.c
index 405e62128917..82db072cb836 100644
--- a/arch/arm/mach-s3c64xx/dev-spi.c
+++ b/arch/arm/mach-s3c64xx/dev-spi.c
@@ -16,7 +16,6 @@
#include <mach/dma.h>
#include <mach/map.h>
-#include <mach/gpio-bank-c.h>
#include <mach/spi-clocks.h>
#include <mach/irqs.h>
@@ -40,23 +39,15 @@ static char *spi_src_clks[] = {
*/
static int s3c64xx_spi_cfg_gpio(struct platform_device *pdev)
{
+ unsigned int base;
+
switch (pdev->id) {
case 0:
- s3c_gpio_cfgpin(S3C64XX_GPC(0), S3C64XX_GPC0_SPI_MISO0);
- s3c_gpio_cfgpin(S3C64XX_GPC(1), S3C64XX_GPC1_SPI_CLKO);
- s3c_gpio_cfgpin(S3C64XX_GPC(2), S3C64XX_GPC2_SPI_MOSIO);
- s3c_gpio_setpull(S3C64XX_GPC(0), S3C_GPIO_PULL_UP);
- s3c_gpio_setpull(S3C64XX_GPC(1), S3C_GPIO_PULL_UP);
- s3c_gpio_setpull(S3C64XX_GPC(2), S3C_GPIO_PULL_UP);
+ base = S3C64XX_GPC(0);
break;
case 1:
- s3c_gpio_cfgpin(S3C64XX_GPC(4), S3C64XX_GPC4_SPI_MISO1);
- s3c_gpio_cfgpin(S3C64XX_GPC(5), S3C64XX_GPC5_SPI_CLK1);
- s3c_gpio_cfgpin(S3C64XX_GPC(6), S3C64XX_GPC6_SPI_MOSI1);
- s3c_gpio_setpull(S3C64XX_GPC(4), S3C_GPIO_PULL_UP);
- s3c_gpio_setpull(S3C64XX_GPC(5), S3C_GPIO_PULL_UP);
- s3c_gpio_setpull(S3C64XX_GPC(6), S3C_GPIO_PULL_UP);
+ base = S3C64XX_GPC(4);
break;
default:
@@ -64,6 +55,9 @@ static int s3c64xx_spi_cfg_gpio(struct platform_device *pdev)
return -EINVAL;
}
+ s3c_gpio_cfgall_range(base, 3,
+ S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP);
+
return 0;
}
diff --git a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-a.h b/arch/arm/mach-s3c64xx/include/mach/gpio-bank-a.h
deleted file mode 100644
index 34212e1a7e81..000000000000
--- a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-a.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/* linux/arch/arm/mach-s3c64xx/include/mach/gpio-bank-a.h
- *
- * Copyright 2008 Openmoko, Inc.
- * Copyright 2008 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- * http://armlinux.simtec.co.uk/
- *
- * GPIO Bank A register and configuration definitions
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#define S3C64XX_GPACON (S3C64XX_GPA_BASE + 0x00)
-#define S3C64XX_GPADAT (S3C64XX_GPA_BASE + 0x04)
-#define S3C64XX_GPAPUD (S3C64XX_GPA_BASE + 0x08)
-#define S3C64XX_GPACONSLP (S3C64XX_GPA_BASE + 0x0c)
-#define S3C64XX_GPAPUDSLP (S3C64XX_GPA_BASE + 0x10)
-
-#define S3C64XX_GPA_CONMASK(__gpio) (0xf << ((__gpio) * 4))
-#define S3C64XX_GPA_INPUT(__gpio) (0x0 << ((__gpio) * 4))
-#define S3C64XX_GPA_OUTPUT(__gpio) (0x1 << ((__gpio) * 4))
-
-#define S3C64XX_GPA0_UART_RXD0 (0x02 << 0)
-#define S3C64XX_GPA0_EINT_G1_0 (0x07 << 0)
-
-#define S3C64XX_GPA1_UART_TXD0 (0x02 << 4)
-#define S3C64XX_GPA1_EINT_G1_1 (0x07 << 4)
-
-#define S3C64XX_GPA2_UART_nCTS0 (0x02 << 8)
-#define S3C64XX_GPA2_EINT_G1_2 (0x07 << 8)
-
-#define S3C64XX_GPA3_UART_nRTS0 (0x02 << 12)
-#define S3C64XX_GPA3_EINT_G1_3 (0x07 << 12)
-
-#define S3C64XX_GPA4_UART_RXD1 (0x02 << 16)
-#define S3C64XX_GPA4_EINT_G1_4 (0x07 << 16)
-
-#define S3C64XX_GPA5_UART_TXD1 (0x02 << 20)
-#define S3C64XX_GPA5_EINT_G1_5 (0x07 << 20)
-
-#define S3C64XX_GPA6_UART_nCTS1 (0x02 << 24)
-#define S3C64XX_GPA6_EINT_G1_6 (0x07 << 24)
-
-#define S3C64XX_GPA7_UART_nRTS1 (0x02 << 28)
-#define S3C64XX_GPA7_EINT_G1_7 (0x07 << 28)
-
diff --git a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-b.h b/arch/arm/mach-s3c64xx/include/mach/gpio-bank-b.h
deleted file mode 100644
index 7232c037e642..000000000000
--- a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-b.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/* linux/arch/arm/mach-s3c64xx/include/mach/gpio-bank-b.h
- *
- * Copyright 2008 Openmoko, Inc.
- * Copyright 2008 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- * http://armlinux.simtec.co.uk/
- *
- * GPIO Bank B register and configuration definitions
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#define S3C64XX_GPBCON (S3C64XX_GPB_BASE + 0x00)
-#define S3C64XX_GPBDAT (S3C64XX_GPB_BASE + 0x04)
-#define S3C64XX_GPBPUD (S3C64XX_GPB_BASE + 0x08)
-#define S3C64XX_GPBCONSLP (S3C64XX_GPB_BASE + 0x0c)
-#define S3C64XX_GPBPUDSLP (S3C64XX_GPB_BASE + 0x10)
-
-#define S3C64XX_GPB_CONMASK(__gpio) (0xf << ((__gpio) * 4))
-#define S3C64XX_GPB_INPUT(__gpio) (0x0 << ((__gpio) * 4))
-#define S3C64XX_GPB_OUTPUT(__gpio) (0x1 << ((__gpio) * 4))
-
-#define S3C64XX_GPB0_UART_RXD2 (0x02 << 0)
-#define S3C64XX_GPB0_EXTDMA_REQ (0x03 << 0)
-#define S3C64XX_GPB0_IrDA_RXD (0x04 << 0)
-#define S3C64XX_GPB0_ADDR_CF0 (0x05 << 0)
-#define S3C64XX_GPB0_EINT_G1_8 (0x07 << 0)
-
-#define S3C64XX_GPB1_UART_TXD2 (0x02 << 4)
-#define S3C64XX_GPB1_EXTDMA_ACK (0x03 << 4)
-#define S3C64XX_GPB1_IrDA_TXD (0x04 << 4)
-#define S3C64XX_GPB1_ADDR_CF1 (0x05 << 4)
-#define S3C64XX_GPB1_EINT_G1_9 (0x07 << 4)
-
-#define S3C64XX_GPB2_UART_RXD3 (0x02 << 8)
-#define S3C64XX_GPB2_IrDA_RXD (0x03 << 8)
-#define S3C64XX_GPB2_EXTDMA_REQ (0x04 << 8)
-#define S3C64XX_GPB2_ADDR_CF2 (0x05 << 8)
-#define S3C64XX_GPB2_I2C_SCL1 (0x06 << 8)
-#define S3C64XX_GPB2_EINT_G1_10 (0x07 << 8)
-
-#define S3C64XX_GPB3_UART_TXD3 (0x02 << 12)
-#define S3C64XX_GPB3_IrDA_TXD (0x03 << 12)
-#define S3C64XX_GPB3_EXTDMA_ACK (0x04 << 12)
-#define S3C64XX_GPB3_I2C_SDA1 (0x06 << 12)
-#define S3C64XX_GPB3_EINT_G1_11 (0x07 << 12)
-
-#define S3C64XX_GPB4_IrDA_SDBW (0x02 << 16)
-#define S3C64XX_GPB4_CAM_FIELD (0x03 << 16)
-#define S3C64XX_GPB4_CF_DATA_DIR (0x04 << 16)
-#define S3C64XX_GPB4_EINT_G1_12 (0x07 << 16)
-
-#define S3C64XX_GPB5_I2C_SCL0 (0x02 << 20)
-#define S3C64XX_GPB5_EINT_G1_13 (0x07 << 20)
-
-#define S3C64XX_GPB6_I2C_SDA0 (0x02 << 24)
-#define S3C64XX_GPB6_EINT_G1_14 (0x07 << 24)
-
diff --git a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-c.h b/arch/arm/mach-s3c64xx/include/mach/gpio-bank-c.h
deleted file mode 100644
index db189ab1639a..000000000000
--- a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-c.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/* linux/arch/arm/mach-s3c64xx/include/mach/gpio-bank-c.h
- *
- * Copyright 2008 Openmoko, Inc.
- * Copyright 2008 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- * http://armlinux.simtec.co.uk/
- *
- * GPIO Bank C register and configuration definitions
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#define S3C64XX_GPCCON (S3C64XX_GPC_BASE + 0x00)
-#define S3C64XX_GPCDAT (S3C64XX_GPC_BASE + 0x04)
-#define S3C64XX_GPCPUD (S3C64XX_GPC_BASE + 0x08)
-#define S3C64XX_GPCCONSLP (S3C64XX_GPC_BASE + 0x0c)
-#define S3C64XX_GPCPUDSLP (S3C64XX_GPC_BASE + 0x10)
-
-#define S3C64XX_GPC_CONMASK(__gpio) (0xf << ((__gpio) * 4))
-#define S3C64XX_GPC_INPUT(__gpio) (0x0 << ((__gpio) * 4))
-#define S3C64XX_GPC_OUTPUT(__gpio) (0x1 << ((__gpio) * 4))
-
-#define S3C64XX_GPC0_SPI_MISO0 (0x02 << 0)
-#define S3C64XX_GPC0_EINT_G2_0 (0x07 << 0)
-
-#define S3C64XX_GPC1_SPI_CLKO (0x02 << 4)
-#define S3C64XX_GPC1_EINT_G2_1 (0x07 << 4)
-
-#define S3C64XX_GPC2_SPI_MOSIO (0x02 << 8)
-#define S3C64XX_GPC2_EINT_G2_2 (0x07 << 8)
-
-#define S3C64XX_GPC3_SPI_nCSO (0x02 << 12)
-#define S3C64XX_GPC3_EINT_G2_3 (0x07 << 12)
-
-#define S3C64XX_GPC4_SPI_MISO1 (0x02 << 16)
-#define S3C64XX_GPC4_MMC2_CMD (0x03 << 16)
-#define S3C64XX_GPC4_I2S_V40_DO0 (0x05 << 16)
-#define S3C64XX_GPC4_EINT_G2_4 (0x07 << 16)
-
-#define S3C64XX_GPC5_SPI_CLK1 (0x02 << 20)
-#define S3C64XX_GPC5_MMC2_CLK (0x03 << 20)
-#define S3C64XX_GPC5_I2S_V40_DO1 (0x05 << 20)
-#define S3C64XX_GPC5_EINT_G2_5 (0x07 << 20)
-
-#define S3C64XX_GPC6_SPI_MOSI1 (0x02 << 24)
-#define S3C64XX_GPC6_EINT_G2_6 (0x07 << 24)
-
-#define S3C64XX_GPC7_SPI_nCS1 (0x02 << 28)
-#define S3C64XX_GPC7_I2S_V40_DO2 (0x05 << 28)
-#define S3C64XX_GPC7_EINT_G2_7 (0x07 << 28)
-
diff --git a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-d.h b/arch/arm/mach-s3c64xx/include/mach/gpio-bank-d.h
deleted file mode 100644
index 1a01cee7aca3..000000000000
--- a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-d.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/* linux/arch/arm/mach-s3c64xx/include/mach/gpio-bank-d.h
- *
- * Copyright 2008 Openmoko, Inc.
- * Copyright 2008 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- * http://armlinux.simtec.co.uk/
- *
- * GPIO Bank D register and configuration definitions
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#define S3C64XX_GPDCON (S3C64XX_GPD_BASE + 0x00)
-#define S3C64XX_GPDDAT (S3C64XX_GPD_BASE + 0x04)
-#define S3C64XX_GPDPUD (S3C64XX_GPD_BASE + 0x08)
-#define S3C64XX_GPDCONSLP (S3C64XX_GPD_BASE + 0x0c)
-#define S3C64XX_GPDPUDSLP (S3C64XX_GPD_BASE + 0x10)
-
-#define S3C64XX_GPD_CONMASK(__gpio) (0xf << ((__gpio) * 4))
-#define S3C64XX_GPD_INPUT(__gpio) (0x0 << ((__gpio) * 4))
-#define S3C64XX_GPD_OUTPUT(__gpio) (0x1 << ((__gpio) * 4))
-
-#define S3C64XX_GPD0_PCM0_SCLK (0x02 << 0)
-#define S3C64XX_GPD0_I2S0_CLK (0x03 << 0)
-#define S3C64XX_GPD0_AC97_BITCLK (0x04 << 0)
-#define S3C64XX_GPD0_EINT_G3_0 (0x07 << 0)
-
-#define S3C64XX_GPD1_PCM0_EXTCLK (0x02 << 4)
-#define S3C64XX_GPD1_I2S0_CDCLK (0x03 << 4)
-#define S3C64XX_GPD1_AC97_nRESET (0x04 << 4)
-#define S3C64XX_GPD1_EINT_G3_1 (0x07 << 4)
-
-#define S3C64XX_GPD2_PCM0_FSYNC (0x02 << 8)
-#define S3C64XX_GPD2_I2S0_LRCLK (0x03 << 8)
-#define S3C64XX_GPD2_AC97_SYNC (0x04 << 8)
-#define S3C64XX_GPD2_EINT_G3_2 (0x07 << 8)
-
-#define S3C64XX_GPD3_PCM0_SIN (0x02 << 12)
-#define S3C64XX_GPD3_I2S0_DI (0x03 << 12)
-#define S3C64XX_GPD3_AC97_SDI (0x04 << 12)
-#define S3C64XX_GPD3_EINT_G3_3 (0x07 << 12)
-
-#define S3C64XX_GPD4_PCM0_SOUT (0x02 << 16)
-#define S3C64XX_GPD4_I2S0_D0 (0x03 << 16)
-#define S3C64XX_GPD4_AC97_SDO (0x04 << 16)
-#define S3C64XX_GPD4_EINT_G3_4 (0x07 << 16)
-
diff --git a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-e.h b/arch/arm/mach-s3c64xx/include/mach/gpio-bank-e.h
deleted file mode 100644
index f057adb627dd..000000000000
--- a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-e.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/* linux/arch/arm/mach-s3c64xx/include/mach/gpio-bank-e.h
- *
- * Copyright 2008 Openmoko, Inc.
- * Copyright 2008 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- * http://armlinux.simtec.co.uk/
- *
- * GPIO Bank E register and configuration definitions
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#define S3C64XX_GPECON (S3C64XX_GPE_BASE + 0x00)
-#define S3C64XX_GPEDAT (S3C64XX_GPE_BASE + 0x04)
-#define S3C64XX_GPEPUD (S3C64XX_GPE_BASE + 0x08)
-#define S3C64XX_GPECONSLP (S3C64XX_GPE_BASE + 0x0c)
-#define S3C64XX_GPEPUDSLP (S3C64XX_GPE_BASE + 0x10)
-
-#define S3C64XX_GPE_CONMASK(__gpio) (0xf << ((__gpio) * 4))
-#define S3C64XX_GPE_INPUT(__gpio) (0x0 << ((__gpio) * 4))
-#define S3C64XX_GPE_OUTPUT(__gpio) (0x1 << ((__gpio) * 4))
-
-#define S3C64XX_GPE0_PCM1_SCLK (0x02 << 0)
-#define S3C64XX_GPE0_I2S1_CLK (0x03 << 0)
-#define S3C64XX_GPE0_AC97_BITCLK (0x04 << 0)
-
-#define S3C64XX_GPE1_PCM1_EXTCLK (0x02 << 4)
-#define S3C64XX_GPE1_I2S1_CDCLK (0x03 << 4)
-#define S3C64XX_GPE1_AC97_nRESET (0x04 << 4)
-
-#define S3C64XX_GPE2_PCM1_FSYNC (0x02 << 8)
-#define S3C64XX_GPE2_I2S1_LRCLK (0x03 << 8)
-#define S3C64XX_GPE2_AC97_SYNC (0x04 << 8)
-
-#define S3C64XX_GPE3_PCM1_SIN (0x02 << 12)
-#define S3C64XX_GPE3_I2S1_DI (0x03 << 12)
-#define S3C64XX_GPE3_AC97_SDI (0x04 << 12)
-
-#define S3C64XX_GPE4_PCM1_SOUT (0x02 << 16)
-#define S3C64XX_GPE4_I2S1_D0 (0x03 << 16)
-#define S3C64XX_GPE4_AC97_SDO (0x04 << 16)
-
diff --git a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-f.h b/arch/arm/mach-s3c64xx/include/mach/gpio-bank-f.h
deleted file mode 100644
index 62ab8f5e7835..000000000000
--- a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-f.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/* linux/arch/arm/mach-s3c64xx/include/mach/gpio-bank-f.h
- *
- * Copyright 2008 Openmoko, Inc.
- * Copyright 2008 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- * http://armlinux.simtec.co.uk/
- *
- * GPIO Bank F register and configuration definitions
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#define S3C64XX_GPFCON (S3C64XX_GPF_BASE + 0x00)
-#define S3C64XX_GPFDAT (S3C64XX_GPF_BASE + 0x04)
-#define S3C64XX_GPFPUD (S3C64XX_GPF_BASE + 0x08)
-#define S3C64XX_GPFCONSLP (S3C64XX_GPF_BASE + 0x0c)
-#define S3C64XX_GPFPUDSLP (S3C64XX_GPF_BASE + 0x10)
-
-#define S3C64XX_GPF_CONMASK(__gpio) (0x3 << ((__gpio) * 2))
-#define S3C64XX_GPF_INPUT(__gpio) (0x0 << ((__gpio) * 2))
-#define S3C64XX_GPF_OUTPUT(__gpio) (0x1 << ((__gpio) * 2))
-
-#define S3C64XX_GPF0_CAMIF_CLK (0x02 << 0)
-#define S3C64XX_GPF0_EINT_G4_0 (0x03 << 0)
-
-#define S3C64XX_GPF1_CAMIF_HREF (0x02 << 2)
-#define S3C64XX_GPF1_EINT_G4_1 (0x03 << 2)
-
-#define S3C64XX_GPF2_CAMIF_PCLK (0x02 << 4)
-#define S3C64XX_GPF2_EINT_G4_2 (0x03 << 4)
-
-#define S3C64XX_GPF3_CAMIF_nRST (0x02 << 6)
-#define S3C64XX_GPF3_EINT_G4_3 (0x03 << 6)
-
-#define S3C64XX_GPF4_CAMIF_VSYNC (0x02 << 8)
-#define S3C64XX_GPF4_EINT_G4_4 (0x03 << 8)
-
-#define S3C64XX_GPF5_CAMIF_YDATA0 (0x02 << 10)
-#define S3C64XX_GPF5_EINT_G4_5 (0x03 << 10)
-
-#define S3C64XX_GPF6_CAMIF_YDATA1 (0x02 << 12)
-#define S3C64XX_GPF6_EINT_G4_6 (0x03 << 12)
-
-#define S3C64XX_GPF7_CAMIF_YDATA2 (0x02 << 14)
-#define S3C64XX_GPF7_EINT_G4_7 (0x03 << 14)
-
-#define S3C64XX_GPF8_CAMIF_YDATA3 (0x02 << 16)
-#define S3C64XX_GPF8_EINT_G4_8 (0x03 << 16)
-
-#define S3C64XX_GPF9_CAMIF_YDATA4 (0x02 << 18)
-#define S3C64XX_GPF9_EINT_G4_9 (0x03 << 18)
-
-#define S3C64XX_GPF10_CAMIF_YDATA5 (0x02 << 20)
-#define S3C64XX_GPF10_EINT_G4_10 (0x03 << 20)
-
-#define S3C64XX_GPF11_CAMIF_YDATA6 (0x02 << 22)
-#define S3C64XX_GPF11_EINT_G4_11 (0x03 << 22)
-
-#define S3C64XX_GPF12_CAMIF_YDATA7 (0x02 << 24)
-#define S3C64XX_GPF12_EINT_G4_12 (0x03 << 24)
-
-#define S3C64XX_GPF13_PWM_ECLK (0x02 << 26)
-#define S3C64XX_GPF13_EINT_G4_13 (0x03 << 26)
-
-#define S3C64XX_GPF14_PWM_TOUT0 (0x02 << 28)
-#define S3C64XX_GPF14_CLKOUT0 (0x03 << 28)
-
-#define S3C64XX_GPF15_PWM_TOUT1 (0x02 << 30)
-
diff --git a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-g.h b/arch/arm/mach-s3c64xx/include/mach/gpio-bank-g.h
deleted file mode 100644
index b94954af1598..000000000000
--- a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-g.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/* linux/arch/arm/mach-s3c64xx/include/mach/gpio-bank-g.h
- *
- * Copyright 2008 Openmoko, Inc.
- * Copyright 2008 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- * http://armlinux.simtec.co.uk/
- *
- * GPIO Bank G register and configuration definitions
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#define S3C64XX_GPGCON (S3C64XX_GPG_BASE + 0x00)
-#define S3C64XX_GPGDAT (S3C64XX_GPG_BASE + 0x04)
-#define S3C64XX_GPGPUD (S3C64XX_GPG_BASE + 0x08)
-#define S3C64XX_GPGCONSLP (S3C64XX_GPG_BASE + 0x0c)
-#define S3C64XX_GPGPUDSLP (S3C64XX_GPG_BASE + 0x10)
-
-#define S3C64XX_GPG_CONMASK(__gpio) (0xf << ((__gpio) * 4))
-#define S3C64XX_GPG_INPUT(__gpio) (0x0 << ((__gpio) * 4))
-#define S3C64XX_GPG_OUTPUT(__gpio) (0x1 << ((__gpio) * 4))
-
-#define S3C64XX_GPG0_MMC0_CLK (0x02 << 0)
-#define S3C64XX_GPG0_EINT_G5_0 (0x07 << 0)
-
-#define S3C64XX_GPG1_MMC0_CMD (0x02 << 4)
-#define S3C64XX_GPG1_EINT_G5_1 (0x07 << 4)
-
-#define S3C64XX_GPG2_MMC0_DATA0 (0x02 << 8)
-#define S3C64XX_GPG2_EINT_G5_2 (0x07 << 8)
-
-#define S3C64XX_GPG3_MMC0_DATA1 (0x02 << 12)
-#define S3C64XX_GPG3_EINT_G5_3 (0x07 << 12)
-
-#define S3C64XX_GPG4_MMC0_DATA2 (0x02 << 16)
-#define S3C64XX_GPG4_EINT_G5_4 (0x07 << 16)
-
-#define S3C64XX_GPG5_MMC0_DATA3 (0x02 << 20)
-#define S3C64XX_GPG5_EINT_G5_5 (0x07 << 20)
-
diff --git a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-h.h b/arch/arm/mach-s3c64xx/include/mach/gpio-bank-h.h
deleted file mode 100644
index 5d75aaad865e..000000000000
--- a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-h.h
+++ /dev/null
@@ -1,74 +0,0 @@
-/* linux/arch/arm/mach-s3c64xx/include/mach/gpio-bank-h.h
- *
- * Copyright 2008 Openmoko, Inc.
- * Copyright 2008 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- * http://armlinux.simtec.co.uk/
- *
- * GPIO Bank H register and configuration definitions
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#define S3C64XX_GPHCON0 (S3C64XX_GPH_BASE + 0x00)
-#define S3C64XX_GPHCON1 (S3C64XX_GPH_BASE + 0x04)
-#define S3C64XX_GPHDAT (S3C64XX_GPH_BASE + 0x08)
-#define S3C64XX_GPHPUD (S3C64XX_GPH_BASE + 0x0c)
-#define S3C64XX_GPHCONSLP (S3C64XX_GPH_BASE + 0x10)
-#define S3C64XX_GPHPUDSLP (S3C64XX_GPH_BASE + 0x14)
-
-#define S3C64XX_GPH_CONMASK(__gpio) (0xf << ((__gpio) * 4))
-#define S3C64XX_GPH_INPUT(__gpio) (0x0 << ((__gpio) * 4))
-#define S3C64XX_GPH_OUTPUT(__gpio) (0x1 << ((__gpio) * 4))
-
-#define S3C64XX_GPH0_MMC1_CLK (0x02 << 0)
-#define S3C64XX_GPH0_KP_COL0 (0x04 << 0)
-#define S3C64XX_GPH0_EINT_G6_0 (0x07 << 0)
-
-#define S3C64XX_GPH1_MMC1_CMD (0x02 << 4)
-#define S3C64XX_GPH1_KP_COL1 (0x04 << 4)
-#define S3C64XX_GPH1_EINT_G6_1 (0x07 << 4)
-
-#define S3C64XX_GPH2_MMC1_DATA0 (0x02 << 8)
-#define S3C64XX_GPH2_KP_COL2 (0x04 << 8)
-#define S3C64XX_GPH2_EINT_G6_2 (0x07 << 8)
-
-#define S3C64XX_GPH3_MMC1_DATA1 (0x02 << 12)
-#define S3C64XX_GPH3_KP_COL3 (0x04 << 12)
-#define S3C64XX_GPH3_EINT_G6_3 (0x07 << 12)
-
-#define S3C64XX_GPH4_MMC1_DATA2 (0x02 << 16)
-#define S3C64XX_GPH4_KP_COL4 (0x04 << 16)
-#define S3C64XX_GPH4_EINT_G6_4 (0x07 << 16)
-
-#define S3C64XX_GPH5_MMC1_DATA3 (0x02 << 20)
-#define S3C64XX_GPH5_KP_COL5 (0x04 << 20)
-#define S3C64XX_GPH5_EINT_G6_5 (0x07 << 20)
-
-#define S3C64XX_GPH6_MMC1_DATA4 (0x02 << 24)
-#define S3C64XX_GPH6_MMC2_DATA0 (0x03 << 24)
-#define S3C64XX_GPH6_KP_COL6 (0x04 << 24)
-#define S3C64XX_GPH6_I2S_V40_BCLK (0x05 << 24)
-#define S3C64XX_GPH6_ADDR_CF0 (0x06 << 24)
-#define S3C64XX_GPH6_EINT_G6_6 (0x07 << 24)
-
-#define S3C64XX_GPH7_MMC1_DATA5 (0x02 << 28)
-#define S3C64XX_GPH7_MMC2_DATA1 (0x03 << 28)
-#define S3C64XX_GPH7_KP_COL7 (0x04 << 28)
-#define S3C64XX_GPH7_I2S_V40_CDCLK (0x05 << 28)
-#define S3C64XX_GPH7_ADDR_CF1 (0x06 << 28)
-#define S3C64XX_GPH7_EINT_G6_7 (0x07 << 28)
-
-#define S3C64XX_GPH8_MMC1_DATA6 (0x02 << 0)
-#define S3C64XX_GPH8_MMC2_DATA2 (0x03 << 0)
-#define S3C64XX_GPH8_I2S_V40_LRCLK (0x05 << 0)
-#define S3C64XX_GPH8_ADDR_CF2 (0x06 << 0)
-#define S3C64XX_GPH8_EINT_G6_8 (0x07 << 0)
-
-#define S3C64XX_GPH9_OUTPUT (0x01 << 4)
-#define S3C64XX_GPH9_MMC1_DATA7 (0x02 << 4)
-#define S3C64XX_GPH9_MMC2_DATA3 (0x03 << 4)
-#define S3C64XX_GPH9_I2S_V40_DI (0x05 << 4)
-#define S3C64XX_GPH9_EINT_G6_9 (0x07 << 4)
diff --git a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-i.h b/arch/arm/mach-s3c64xx/include/mach/gpio-bank-i.h
deleted file mode 100644
index 4ceaa6098bc7..000000000000
--- a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-i.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/* linux/arch/arm/mach-s3c64xx/include/mach/gpio-bank-i.h
- *
- * Copyright 2008 Openmoko, Inc.
- * Copyright 2008 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- * http://armlinux.simtec.co.uk/
- *
- * GPIO Bank I register and configuration definitions
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#define S3C64XX_GPICON (S3C64XX_GPI_BASE + 0x00)
-#define S3C64XX_GPIDAT (S3C64XX_GPI_BASE + 0x04)
-#define S3C64XX_GPIPUD (S3C64XX_GPI_BASE + 0x08)
-#define S3C64XX_GPICONSLP (S3C64XX_GPI_BASE + 0x0c)
-#define S3C64XX_GPIPUDSLP (S3C64XX_GPI_BASE + 0x10)
-
-#define S3C64XX_GPI_CONMASK(__gpio) (0x3 << ((__gpio) * 2))
-#define S3C64XX_GPI_INPUT(__gpio) (0x0 << ((__gpio) * 2))
-#define S3C64XX_GPI_OUTPUT(__gpio) (0x1 << ((__gpio) * 2))
-
-#define S3C64XX_GPI0_VD0 (0x02 << 0)
-#define S3C64XX_GPI1_VD1 (0x02 << 2)
-#define S3C64XX_GPI2_VD2 (0x02 << 4)
-#define S3C64XX_GPI3_VD3 (0x02 << 6)
-#define S3C64XX_GPI4_VD4 (0x02 << 8)
-#define S3C64XX_GPI5_VD5 (0x02 << 10)
-#define S3C64XX_GPI6_VD6 (0x02 << 12)
-#define S3C64XX_GPI7_VD7 (0x02 << 14)
-#define S3C64XX_GPI8_VD8 (0x02 << 16)
-#define S3C64XX_GPI9_VD9 (0x02 << 18)
-#define S3C64XX_GPI10_VD10 (0x02 << 20)
-#define S3C64XX_GPI11_VD11 (0x02 << 22)
-#define S3C64XX_GPI12_VD12 (0x02 << 24)
-#define S3C64XX_GPI13_VD13 (0x02 << 26)
-#define S3C64XX_GPI14_VD14 (0x02 << 28)
-#define S3C64XX_GPI15_VD15 (0x02 << 30)
diff --git a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-j.h b/arch/arm/mach-s3c64xx/include/mach/gpio-bank-j.h
deleted file mode 100644
index 6f25cd079a40..000000000000
--- a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-j.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/* linux/arch/arm/mach-s3c64xx/include/mach/gpio-bank-j.h
- *
- * Copyright 2008 Openmoko, Inc.
- * Copyright 2008 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- * http://armlinux.simtec.co.uk/
- *
- * GPIO Bank J register and configuration definitions
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#define S3C64XX_GPJCON (S3C64XX_GPJ_BASE + 0x00)
-#define S3C64XX_GPJDAT (S3C64XX_GPJ_BASE + 0x04)
-#define S3C64XX_GPJPUD (S3C64XX_GPJ_BASE + 0x08)
-#define S3C64XX_GPJCONSLP (S3C64XX_GPJ_BASE + 0x0c)
-#define S3C64XX_GPJPUDSLP (S3C64XX_GPJ_BASE + 0x10)
-
-#define S3C64XX_GPJ_CONMASK(__gpio) (0x3 << ((__gpio) * 2))
-#define S3C64XX_GPJ_INPUT(__gpio) (0x0 << ((__gpio) * 2))
-#define S3C64XX_GPJ_OUTPUT(__gpio) (0x1 << ((__gpio) * 2))
-
-#define S3C64XX_GPJ0_VD16 (0x02 << 0)
-#define S3C64XX_GPJ1_VD17 (0x02 << 2)
-#define S3C64XX_GPJ2_VD18 (0x02 << 4)
-#define S3C64XX_GPJ3_VD19 (0x02 << 6)
-#define S3C64XX_GPJ4_VD20 (0x02 << 8)
-#define S3C64XX_GPJ5_VD21 (0x02 << 10)
-#define S3C64XX_GPJ6_VD22 (0x02 << 12)
-#define S3C64XX_GPJ7_VD23 (0x02 << 14)
-#define S3C64XX_GPJ8_LCD_HSYNC (0x02 << 16)
-#define S3C64XX_GPJ9_LCD_VSYNC (0x02 << 18)
-#define S3C64XX_GPJ10_LCD_VDEN (0x02 << 20)
-#define S3C64XX_GPJ11_LCD_VCLK (0x02 << 22)
diff --git a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-n.h b/arch/arm/mach-s3c64xx/include/mach/gpio-bank-n.h
deleted file mode 100644
index d0aeda1cd9de..000000000000
--- a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-n.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/* linux/arch/arm/mach-s3c64xx/include/mach/gpio-bank-n.h
- *
- * Copyright 2008 Openmoko, Inc.
- * Copyright 2008 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- * http://armlinux.simtec.co.uk/
- *
- * GPIO Bank N register and configuration definitions
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#define S3C64XX_GPNCON (S3C64XX_GPN_BASE + 0x00)
-#define S3C64XX_GPNDAT (S3C64XX_GPN_BASE + 0x04)
-#define S3C64XX_GPNPUD (S3C64XX_GPN_BASE + 0x08)
-
-#define S3C64XX_GPN_CONMASK(__gpio) (0x3 << ((__gpio) * 2))
-#define S3C64XX_GPN_INPUT(__gpio) (0x0 << ((__gpio) * 2))
-#define S3C64XX_GPN_OUTPUT(__gpio) (0x1 << ((__gpio) * 2))
-
-#define S3C64XX_GPN0_EINT0 (0x02 << 0)
-#define S3C64XX_GPN0_KP_ROW0 (0x03 << 0)
-
-#define S3C64XX_GPN1_EINT1 (0x02 << 2)
-#define S3C64XX_GPN1_KP_ROW1 (0x03 << 2)
-
-#define S3C64XX_GPN2_EINT2 (0x02 << 4)
-#define S3C64XX_GPN2_KP_ROW2 (0x03 << 4)
-
-#define S3C64XX_GPN3_EINT3 (0x02 << 6)
-#define S3C64XX_GPN3_KP_ROW3 (0x03 << 6)
-
-#define S3C64XX_GPN4_EINT4 (0x02 << 8)
-#define S3C64XX_GPN4_KP_ROW4 (0x03 << 8)
-
-#define S3C64XX_GPN5_EINT5 (0x02 << 10)
-#define S3C64XX_GPN5_KP_ROW5 (0x03 << 10)
-
-#define S3C64XX_GPN6_EINT6 (0x02 << 12)
-#define S3C64XX_GPN6_KP_ROW6 (0x03 << 12)
-
-#define S3C64XX_GPN7_EINT7 (0x02 << 14)
-#define S3C64XX_GPN7_KP_ROW7 (0x03 << 14)
-
-#define S3C64XX_GPN8_EINT8 (0x02 << 16)
-#define S3C64XX_GPN9_EINT9 (0x02 << 18)
-#define S3C64XX_GPN10_EINT10 (0x02 << 20)
-#define S3C64XX_GPN11_EINT11 (0x02 << 22)
-#define S3C64XX_GPN12_EINT12 (0x02 << 24)
-#define S3C64XX_GPN13_EINT13 (0x02 << 26)
-#define S3C64XX_GPN14_EINT14 (0x02 << 28)
-#define S3C64XX_GPN15_EINT15 (0x02 << 30)
diff --git a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-o.h b/arch/arm/mach-s3c64xx/include/mach/gpio-bank-o.h
deleted file mode 100644
index 21868fa102d0..000000000000
--- a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-o.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/* linux/arch/arm/mach-s3c64xx/include/mach/gpio-bank-o.h
- *
- * Copyright 2008 Openmoko, Inc.
- * Copyright 2008 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- * http://armlinux.simtec.co.uk/
- *
- * GPIO Bank O register and configuration definitions
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#define S3C64XX_GPOCON (S3C64XX_GPO_BASE + 0x00)
-#define S3C64XX_GPODAT (S3C64XX_GPO_BASE + 0x04)
-#define S3C64XX_GPOPUD (S3C64XX_GPO_BASE + 0x08)
-#define S3C64XX_GPOCONSLP (S3C64XX_GPO_BASE + 0x0c)
-#define S3C64XX_GPOPUDSLP (S3C64XX_GPO_BASE + 0x10)
-
-#define S3C64XX_GPO_CONMASK(__gpio) (0x3 << ((__gpio) * 2))
-#define S3C64XX_GPO_INPUT(__gpio) (0x0 << ((__gpio) * 2))
-#define S3C64XX_GPO_OUTPUT(__gpio) (0x1 << ((__gpio) * 2))
-
-#define S3C64XX_GPO0_MEM0_nCS2 (0x02 << 0)
-#define S3C64XX_GPO0_EINT_G7_0 (0x03 << 0)
-
-#define S3C64XX_GPO1_MEM0_nCS3 (0x02 << 2)
-#define S3C64XX_GPO1_EINT_G7_1 (0x03 << 2)
-
-#define S3C64XX_GPO2_MEM0_nCS4 (0x02 << 4)
-#define S3C64XX_GPO2_EINT_G7_2 (0x03 << 4)
-
-#define S3C64XX_GPO3_MEM0_nCS5 (0x02 << 6)
-#define S3C64XX_GPO3_EINT_G7_3 (0x03 << 6)
-
-#define S3C64XX_GPO4_EINT_G7_4 (0x03 << 8)
-
-#define S3C64XX_GPO5_EINT_G7_5 (0x03 << 10)
-
-#define S3C64XX_GPO6_MEM0_ADDR6 (0x02 << 12)
-#define S3C64XX_GPO6_EINT_G7_6 (0x03 << 12)
-
-#define S3C64XX_GPO7_MEM0_ADDR7 (0x02 << 14)
-#define S3C64XX_GPO7_EINT_G7_7 (0x03 << 14)
-
-#define S3C64XX_GPO8_MEM0_ADDR8 (0x02 << 16)
-#define S3C64XX_GPO8_EINT_G7_8 (0x03 << 16)
-
-#define S3C64XX_GPO9_MEM0_ADDR9 (0x02 << 18)
-#define S3C64XX_GPO9_EINT_G7_9 (0x03 << 18)
-
-#define S3C64XX_GPO10_MEM0_ADDR10 (0x02 << 20)
-#define S3C64XX_GPO10_EINT_G7_10 (0x03 << 20)
-
-#define S3C64XX_GPO11_MEM0_ADDR11 (0x02 << 22)
-#define S3C64XX_GPO11_EINT_G7_11 (0x03 << 22)
-
-#define S3C64XX_GPO12_MEM0_ADDR12 (0x02 << 24)
-#define S3C64XX_GPO12_EINT_G7_12 (0x03 << 24)
-
-#define S3C64XX_GPO13_MEM0_ADDR13 (0x02 << 26)
-#define S3C64XX_GPO13_EINT_G7_13 (0x03 << 26)
-
-#define S3C64XX_GPO14_MEM0_ADDR14 (0x02 << 28)
-#define S3C64XX_GPO14_EINT_G7_14 (0x03 << 28)
-
-#define S3C64XX_GPO15_MEM0_ADDR15 (0x02 << 30)
-#define S3C64XX_GPO15_EINT_G7_15 (0x03 << 30)
-
diff --git a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-p.h b/arch/arm/mach-s3c64xx/include/mach/gpio-bank-p.h
deleted file mode 100644
index 46bcfb63b8de..000000000000
--- a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-p.h
+++ /dev/null
@@ -1,69 +0,0 @@
-/* linux/arch/arm/mach-s3c64xx/include/mach/gpio-bank-p.h
- *
- * Copyright 2008 Openmoko, Inc.
- * Copyright 2008 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- * http://armlinux.simtec.co.uk/
- *
- * GPIO Bank P register and configuration definitions
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#define S3C64XX_GPPCON (S3C64XX_GPP_BASE + 0x00)
-#define S3C64XX_GPPDAT (S3C64XX_GPP_BASE + 0x04)
-#define S3C64XX_GPPPUD (S3C64XX_GPP_BASE + 0x08)
-#define S3C64XX_GPPCONSLP (S3C64XX_GPP_BASE + 0x0c)
-#define S3C64XX_GPPPUDSLP (S3C64XX_GPP_BASE + 0x10)
-
-#define S3C64XX_GPP_CONMASK(__gpio) (0x3 << ((__gpio) * 2))
-#define S3C64XX_GPP_INPUT(__gpio) (0x0 << ((__gpio) * 2))
-#define S3C64XX_GPP_OUTPUT(__gpio) (0x1 << ((__gpio) * 2))
-
-#define S3C64XX_GPP0_MEM0_ADDRV (0x02 << 0)
-#define S3C64XX_GPP0_EINT_G8_0 (0x03 << 0)
-
-#define S3C64XX_GPP1_MEM0_SMCLK (0x02 << 2)
-#define S3C64XX_GPP1_EINT_G8_1 (0x03 << 2)
-
-#define S3C64XX_GPP2_MEM0_nWAIT (0x02 << 4)
-#define S3C64XX_GPP2_EINT_G8_2 (0x03 << 4)
-
-#define S3C64XX_GPP3_MEM0_RDY0_ALE (0x02 << 6)
-#define S3C64XX_GPP3_EINT_G8_3 (0x03 << 6)
-
-#define S3C64XX_GPP4_MEM0_RDY1_CLE (0x02 << 8)
-#define S3C64XX_GPP4_EINT_G8_4 (0x03 << 8)
-
-#define S3C64XX_GPP5_MEM0_INTsm0_FWE (0x02 << 10)
-#define S3C64XX_GPP5_EINT_G8_5 (0x03 << 10)
-
-#define S3C64XX_GPP6_MEM0_(null) (0x02 << 12)
-#define S3C64XX_GPP6_EINT_G8_6 (0x03 << 12)
-
-#define S3C64XX_GPP7_MEM0_INTsm1_FRE (0x02 << 14)
-#define S3C64XX_GPP7_EINT_G8_7 (0x03 << 14)
-
-#define S3C64XX_GPP8_MEM0_RPn_RnB (0x02 << 16)
-#define S3C64XX_GPP8_EINT_G8_8 (0x03 << 16)
-
-#define S3C64XX_GPP9_MEM0_ATA_RESET (0x02 << 18)
-#define S3C64XX_GPP9_EINT_G8_9 (0x03 << 18)
-
-#define S3C64XX_GPP10_MEM0_ATA_INPACK (0x02 << 20)
-#define S3C64XX_GPP10_EINT_G8_10 (0x03 << 20)
-
-#define S3C64XX_GPP11_MEM0_ATA_REG (0x02 << 22)
-#define S3C64XX_GPP11_EINT_G8_11 (0x03 << 22)
-
-#define S3C64XX_GPP12_MEM0_ATA_WE (0x02 << 24)
-#define S3C64XX_GPP12_EINT_G8_12 (0x03 << 24)
-
-#define S3C64XX_GPP13_MEM0_ATA_OE (0x02 << 26)
-#define S3C64XX_GPP13_EINT_G8_13 (0x03 << 26)
-
-#define S3C64XX_GPP14_MEM0_ATA_CD (0x02 << 28)
-#define S3C64XX_GPP14_EINT_G8_14 (0x03 << 28)
-
diff --git a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-q.h b/arch/arm/mach-s3c64xx/include/mach/gpio-bank-q.h
deleted file mode 100644
index 1712223487b0..000000000000
--- a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-q.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/* linux/arch/arm/mach-s3c64xx/include/mach/gpio-bank-q.h
- *
- * Copyright 2008 Openmoko, Inc.
- * Copyright 2008 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- * http://armlinux.simtec.co.uk/
- *
- * GPIO Bank Q register and configuration definitions
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#define S3C64XX_GPQCON (S3C64XX_GPQ_BASE + 0x00)
-#define S3C64XX_GPQDAT (S3C64XX_GPQ_BASE + 0x04)
-#define S3C64XX_GPQPUD (S3C64XX_GPQ_BASE + 0x08)
-#define S3C64XX_GPQCONSLP (S3C64XX_GPQ_BASE + 0x0c)
-#define S3C64XX_GPQPUDSLP (S3C64XX_GPQ_BASE + 0x10)
-
-#define S3C64XX_GPQ_CONMASK(__gpio) (0x3 << ((__gpio) * 2))
-#define S3C64XX_GPQ_INPUT(__gpio) (0x0 << ((__gpio) * 2))
-#define S3C64XX_GPQ_OUTPUT(__gpio) (0x1 << ((__gpio) * 2))
-
-#define S3C64XX_GPQ0_MEM0_ADDR18_RAS (0x02 << 0)
-#define S3C64XX_GPQ0_EINT_G9_0 (0x03 << 0)
-
-#define S3C64XX_GPQ1_MEM0_ADDR19_CAS (0x02 << 2)
-#define S3C64XX_GPQ1_EINT_G9_1 (0x03 << 2)
-
-#define S3C64XX_GPQ2_EINT_G9_2 (0x03 << 4)
-
-#define S3C64XX_GPQ3_EINT_G9_3 (0x03 << 6)
-
-#define S3C64XX_GPQ4_EINT_G9_4 (0x03 << 8)
-
-#define S3C64XX_GPQ5_EINT_G9_5 (0x03 << 10)
-
-#define S3C64XX_GPQ6_EINT_G9_6 (0x03 << 12)
-
-#define S3C64XX_GPQ7_MEM0_ADDR17_WENDMC (0x02 << 14)
-#define S3C64XX_GPQ7_EINT_G9_7 (0x03 << 14)
-
-#define S3C64XX_GPQ8_MEM0_ADDR16_APDMC (0x02 << 16)
-#define S3C64XX_GPQ8_EINT_G9_8 (0x03 << 16)
-
diff --git a/arch/arm/mach-s3c64xx/mach-smdk6410.c b/arch/arm/mach-s3c64xx/mach-smdk6410.c
index 686a4f270b12..2c0353a80906 100644
--- a/arch/arm/mach-s3c64xx/mach-smdk6410.c
+++ b/arch/arm/mach-s3c64xx/mach-smdk6410.c
@@ -50,7 +50,6 @@
#include <mach/hardware.h>
#include <mach/regs-fb.h>
#include <mach/map.h>
-#include <mach/gpio-bank-f.h>
#include <asm/irq.h>
#include <asm/mach-types.h>
diff --git a/arch/arm/mach-s3c64xx/pm.c b/arch/arm/mach-s3c64xx/pm.c
index 79412f735a8d..bc1c470b7de6 100644
--- a/arch/arm/mach-s3c64xx/pm.c
+++ b/arch/arm/mach-s3c64xx/pm.c
@@ -30,26 +30,18 @@
#include <mach/regs-gpio-memport.h>
#ifdef CONFIG_S3C_PM_DEBUG_LED_SMDK
-#include <mach/gpio-bank-n.h>
-
void s3c_pm_debug_smdkled(u32 set, u32 clear)
{
unsigned long flags;
- u32 reg;
+ int i;
local_irq_save(flags);
- reg = __raw_readl(S3C64XX_GPNCON);
- reg &= ~(S3C64XX_GPN_CONMASK(12) | S3C64XX_GPN_CONMASK(13) |
- S3C64XX_GPN_CONMASK(14) | S3C64XX_GPN_CONMASK(15));
- reg |= S3C64XX_GPN_OUTPUT(12) | S3C64XX_GPN_OUTPUT(13) |
- S3C64XX_GPN_OUTPUT(14) | S3C64XX_GPN_OUTPUT(15);
- __raw_writel(reg, S3C64XX_GPNCON);
-
- reg = __raw_readl(S3C64XX_GPNDAT);
- reg &= ~(clear << 12);
- reg |= set << 12;
- __raw_writel(reg, S3C64XX_GPNDAT);
-
+ for (i = 0; i < 4; i++) {
+ if (clear & (1 << i))
+ gpio_set_value(S3C64XX_GPN(12 + i), 0);
+ if (set & (1 << i))
+ gpio_set_value(S3C64XX_GPN(12 + i), 1);
+ }
local_irq_restore(flags);
}
#endif
@@ -187,6 +179,18 @@ static int s3c64xx_pm_init(void)
pm_cpu_prep = s3c64xx_pm_prepare;
pm_cpu_sleep = s3c64xx_cpu_suspend;
pm_uart_udivslot = 1;
+
+#ifdef CONFIG_S3C_PM_DEBUG_LED_SMDK
+ gpio_request(S3C64XX_GPN(12), "DEBUG_LED0");
+ gpio_request(S3C64XX_GPN(13), "DEBUG_LED1");
+ gpio_request(S3C64XX_GPN(14), "DEBUG_LED2");
+ gpio_request(S3C64XX_GPN(15), "DEBUG_LED3");
+ gpio_direction_output(S3C64XX_GPN(12), 0);
+ gpio_direction_output(S3C64XX_GPN(13), 0);
+ gpio_direction_output(S3C64XX_GPN(14), 0);
+ gpio_direction_output(S3C64XX_GPN(15), 0);
+#endif
+
return 0;
}
diff --git a/arch/arm/mach-s3c64xx/setup-i2c0.c b/arch/arm/mach-s3c64xx/setup-i2c0.c
index 406192a43c6e..241af94a9e70 100644
--- a/arch/arm/mach-s3c64xx/setup-i2c0.c
+++ b/arch/arm/mach-s3c64xx/setup-i2c0.c
@@ -18,14 +18,11 @@
struct platform_device; /* don't need the contents */
-#include <mach/gpio-bank-b.h>
#include <plat/iic.h>
#include <plat/gpio-cfg.h>
void s3c_i2c0_cfg_gpio(struct platform_device *dev)
{
- s3c_gpio_cfgpin(S3C64XX_GPB(5), S3C64XX_GPB5_I2C_SCL0);
- s3c_gpio_cfgpin(S3C64XX_GPB(6), S3C64XX_GPB6_I2C_SDA0);
- s3c_gpio_setpull(S3C64XX_GPB(5), S3C_GPIO_PULL_UP);
- s3c_gpio_setpull(S3C64XX_GPB(6), S3C_GPIO_PULL_UP);
+ s3c_gpio_cfgall_range(S3C64XX_GPB(5), 2,
+ S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP);
}
diff --git a/arch/arm/mach-s3c64xx/setup-i2c1.c b/arch/arm/mach-s3c64xx/setup-i2c1.c
index 1ee62c97cd7f..3d13a961986d 100644
--- a/arch/arm/mach-s3c64xx/setup-i2c1.c
+++ b/arch/arm/mach-s3c64xx/setup-i2c1.c
@@ -18,14 +18,11 @@
struct platform_device; /* don't need the contents */
-#include <mach/gpio-bank-b.h>
#include <plat/iic.h>
#include <plat/gpio-cfg.h>
void s3c_i2c1_cfg_gpio(struct platform_device *dev)
{
- s3c_gpio_cfgpin(S3C64XX_GPB(2), S3C64XX_GPB2_I2C_SCL1);
- s3c_gpio_cfgpin(S3C64XX_GPB(3), S3C64XX_GPB3_I2C_SDA1);
- s3c_gpio_setpull(S3C64XX_GPB(2), S3C_GPIO_PULL_UP);
- s3c_gpio_setpull(S3C64XX_GPB(3), S3C_GPIO_PULL_UP);
+ s3c_gpio_cfgall_range(S3C64XX_GPB(2), 2,
+ S3C_GPIO_SFN(6), S3C_GPIO_PULL_UP);
}
diff --git a/arch/arm/mach-s3c64xx/sleep.S b/arch/arm/mach-s3c64xx/sleep.S
index afe5a762f46e..1f87732b2320 100644
--- a/arch/arm/mach-s3c64xx/sleep.S
+++ b/arch/arm/mach-s3c64xx/sleep.S
@@ -20,7 +20,6 @@
#define S3C64XX_VA_GPIO (0x0)
#include <mach/regs-gpio.h>
-#include <mach/gpio-bank-n.h>
#define LL_UART (S3C_PA_UART + (0x400 * CONFIG_S3C_LOWLEVEL_UART_PORT))
@@ -68,6 +67,13 @@ ENTRY(s3c_cpu_resume)
ldr r2, =LL_UART /* for debug */
#ifdef CONFIG_S3C_PM_DEBUG_LED_SMDK
+
+#define S3C64XX_GPNCON (S3C64XX_GPN_BASE + 0x00)
+#define S3C64XX_GPNDAT (S3C64XX_GPN_BASE + 0x04)
+
+#define S3C64XX_GPN_CONMASK(__gpio) (0x3 << ((__gpio) * 2))
+#define S3C64XX_GPN_OUTPUT(__gpio) (0x1 << ((__gpio) * 2))
+
/* Initialise the GPIO state if we are debugging via the SMDK LEDs,
* as the uboot version supplied resets these to inputs during the
* resume checks.
diff --git a/arch/arm/mach-s5p6442/Kconfig b/arch/arm/mach-s5p6442/Kconfig
deleted file mode 100644
index 33569e4007c4..000000000000
--- a/arch/arm/mach-s5p6442/Kconfig
+++ /dev/null
@@ -1,25 +0,0 @@
-# arch/arm/mach-s5p6442/Kconfig
-#
-# Copyright (c) 2010 Samsung Electronics Co., Ltd.
-# http://www.samsung.com/
-#
-# Licensed under GPLv2
-
-# Configuration options for the S5P6442
-
-if ARCH_S5P6442
-
-config CPU_S5P6442
- bool
- select S3C_PL330_DMA
- help
- Enable S5P6442 CPU support
-
-config MACH_SMDK6442
- bool "SMDK6442"
- select CPU_S5P6442
- select S3C_DEV_WDT
- help
- Machine support for Samsung SMDK6442
-
-endif
diff --git a/arch/arm/mach-s5p6442/Makefile b/arch/arm/mach-s5p6442/Makefile
deleted file mode 100644
index 90a3d8373416..000000000000
--- a/arch/arm/mach-s5p6442/Makefile
+++ /dev/null
@@ -1,24 +0,0 @@
-# arch/arm/mach-s5p6442/Makefile
-#
-# Copyright (c) 2010 Samsung Electronics Co., Ltd.
-# http://www.samsung.com/
-#
-# Licensed under GPLv2
-
-obj-y :=
-obj-m :=
-obj-n :=
-obj- :=
-
-# Core support for S5P6442 system
-
-obj-$(CONFIG_CPU_S5P6442) += cpu.o init.o clock.o dma.o
-obj-$(CONFIG_CPU_S5P6442) += setup-i2c0.o
-
-# machine support
-
-obj-$(CONFIG_MACH_SMDK6442) += mach-smdk6442.o
-
-# device support
-obj-y += dev-audio.o
-obj-$(CONFIG_S3C64XX_DEV_SPI) += dev-spi.o
diff --git a/arch/arm/mach-s5p6442/Makefile.boot b/arch/arm/mach-s5p6442/Makefile.boot
deleted file mode 100644
index ff90aa13bd67..000000000000
--- a/arch/arm/mach-s5p6442/Makefile.boot
+++ /dev/null
@@ -1,2 +0,0 @@
- zreladdr-y := 0x20008000
-params_phys-y := 0x20000100
diff --git a/arch/arm/mach-s5p6442/clock.c b/arch/arm/mach-s5p6442/clock.c
deleted file mode 100644
index fbbc7bede685..000000000000
--- a/arch/arm/mach-s5p6442/clock.c
+++ /dev/null
@@ -1,420 +0,0 @@
-/* linux/arch/arm/mach-s5p6442/clock.c
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- *
- * S5P6442 - Clock support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/err.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-
-#include <mach/map.h>
-
-#include <plat/cpu-freq.h>
-#include <mach/regs-clock.h>
-#include <plat/clock.h>
-#include <plat/cpu.h>
-#include <plat/pll.h>
-#include <plat/s5p-clock.h>
-#include <plat/clock-clksrc.h>
-#include <plat/s5p6442.h>
-
-static struct clksrc_clk clk_mout_apll = {
- .clk = {
- .name = "mout_apll",
- .id = -1,
- },
- .sources = &clk_src_apll,
- .reg_src = { .reg = S5P_CLK_SRC0, .shift = 0, .size = 1 },
-};
-
-static struct clksrc_clk clk_mout_mpll = {
- .clk = {
- .name = "mout_mpll",
- .id = -1,
- },
- .sources = &clk_src_mpll,
- .reg_src = { .reg = S5P_CLK_SRC0, .shift = 4, .size = 1 },
-};
-
-static struct clksrc_clk clk_mout_epll = {
- .clk = {
- .name = "mout_epll",
- .id = -1,
- },
- .sources = &clk_src_epll,
- .reg_src = { .reg = S5P_CLK_SRC0, .shift = 8, .size = 1 },
-};
-
-/* Possible clock sources for ARM Mux */
-static struct clk *clk_src_arm_list[] = {
- [1] = &clk_mout_apll.clk,
- [2] = &clk_mout_mpll.clk,
-};
-
-static struct clksrc_sources clk_src_arm = {
- .sources = clk_src_arm_list,
- .nr_sources = ARRAY_SIZE(clk_src_arm_list),
-};
-
-static struct clksrc_clk clk_mout_arm = {
- .clk = {
- .name = "mout_arm",
- .id = -1,
- },
- .sources = &clk_src_arm,
- .reg_src = { .reg = S5P_CLK_MUX_STAT0, .shift = 16, .size = 3 },
-};
-
-static struct clk clk_dout_a2m = {
- .name = "dout_a2m",
- .id = -1,
- .parent = &clk_mout_apll.clk,
-};
-
-/* Possible clock sources for D0 Mux */
-static struct clk *clk_src_d0_list[] = {
- [1] = &clk_mout_mpll.clk,
- [2] = &clk_dout_a2m,
-};
-
-static struct clksrc_sources clk_src_d0 = {
- .sources = clk_src_d0_list,
- .nr_sources = ARRAY_SIZE(clk_src_d0_list),
-};
-
-static struct clksrc_clk clk_mout_d0 = {
- .clk = {
- .name = "mout_d0",
- .id = -1,
- },
- .sources = &clk_src_d0,
- .reg_src = { .reg = S5P_CLK_MUX_STAT0, .shift = 20, .size = 3 },
-};
-
-static struct clk clk_dout_apll = {
- .name = "dout_apll",
- .id = -1,
- .parent = &clk_mout_arm.clk,
-};
-
-/* Possible clock sources for D0SYNC Mux */
-static struct clk *clk_src_d0sync_list[] = {
- [1] = &clk_mout_d0.clk,
- [2] = &clk_dout_apll,
-};
-
-static struct clksrc_sources clk_src_d0sync = {
- .sources = clk_src_d0sync_list,
- .nr_sources = ARRAY_SIZE(clk_src_d0sync_list),
-};
-
-static struct clksrc_clk clk_mout_d0sync = {
- .clk = {
- .name = "mout_d0sync",
- .id = -1,
- },
- .sources = &clk_src_d0sync,
- .reg_src = { .reg = S5P_CLK_MUX_STAT1, .shift = 28, .size = 3 },
-};
-
-/* Possible clock sources for D1 Mux */
-static struct clk *clk_src_d1_list[] = {
- [1] = &clk_mout_mpll.clk,
- [2] = &clk_dout_a2m,
-};
-
-static struct clksrc_sources clk_src_d1 = {
- .sources = clk_src_d1_list,
- .nr_sources = ARRAY_SIZE(clk_src_d1_list),
-};
-
-static struct clksrc_clk clk_mout_d1 = {
- .clk = {
- .name = "mout_d1",
- .id = -1,
- },
- .sources = &clk_src_d1,
- .reg_src = { .reg = S5P_CLK_MUX_STAT0, .shift = 24, .size = 3 },
-};
-
-/* Possible clock sources for D1SYNC Mux */
-static struct clk *clk_src_d1sync_list[] = {
- [1] = &clk_mout_d1.clk,
- [2] = &clk_dout_apll,
-};
-
-static struct clksrc_sources clk_src_d1sync = {
- .sources = clk_src_d1sync_list,
- .nr_sources = ARRAY_SIZE(clk_src_d1sync_list),
-};
-
-static struct clksrc_clk clk_mout_d1sync = {
- .clk = {
- .name = "mout_d1sync",
- .id = -1,
- },
- .sources = &clk_src_d1sync,
- .reg_src = { .reg = S5P_CLK_MUX_STAT1, .shift = 24, .size = 3 },
-};
-
-static struct clk clk_hclkd0 = {
- .name = "hclkd0",
- .id = -1,
- .parent = &clk_mout_d0sync.clk,
-};
-
-static struct clk clk_hclkd1 = {
- .name = "hclkd1",
- .id = -1,
- .parent = &clk_mout_d1sync.clk,
-};
-
-static struct clk clk_pclkd0 = {
- .name = "pclkd0",
- .id = -1,
- .parent = &clk_hclkd0,
-};
-
-static struct clk clk_pclkd1 = {
- .name = "pclkd1",
- .id = -1,
- .parent = &clk_hclkd1,
-};
-
-int s5p6442_clk_ip0_ctrl(struct clk *clk, int enable)
-{
- return s5p_gatectrl(S5P_CLKGATE_IP0, clk, enable);
-}
-
-int s5p6442_clk_ip3_ctrl(struct clk *clk, int enable)
-{
- return s5p_gatectrl(S5P_CLKGATE_IP3, clk, enable);
-}
-
-static struct clksrc_clk clksrcs[] = {
- {
- .clk = {
- .name = "dout_a2m",
- .id = -1,
- .parent = &clk_mout_apll.clk,
- },
- .sources = &clk_src_apll,
- .reg_src = { .reg = S5P_CLK_SRC0, .shift = 0, .size = 1 },
- .reg_div = { .reg = S5P_CLK_DIV0, .shift = 4, .size = 3 },
- }, {
- .clk = {
- .name = "dout_apll",
- .id = -1,
- .parent = &clk_mout_arm.clk,
- },
- .sources = &clk_src_arm,
- .reg_src = { .reg = S5P_CLK_MUX_STAT0, .shift = 16, .size = 3 },
- .reg_div = { .reg = S5P_CLK_DIV0, .shift = 0, .size = 3 },
- }, {
- .clk = {
- .name = "hclkd1",
- .id = -1,
- .parent = &clk_mout_d1sync.clk,
- },
- .sources = &clk_src_d1sync,
- .reg_src = { .reg = S5P_CLK_MUX_STAT1, .shift = 24, .size = 3 },
- .reg_div = { .reg = S5P_CLK_DIV0, .shift = 24, .size = 4 },
- }, {
- .clk = {
- .name = "hclkd0",
- .id = -1,
- .parent = &clk_mout_d0sync.clk,
- },
- .sources = &clk_src_d0sync,
- .reg_src = { .reg = S5P_CLK_MUX_STAT1, .shift = 28, .size = 3 },
- .reg_div = { .reg = S5P_CLK_DIV0, .shift = 16, .size = 4 },
- }, {
- .clk = {
- .name = "pclkd0",
- .id = -1,
- .parent = &clk_hclkd0,
- },
- .sources = &clk_src_d0sync,
- .reg_src = { .reg = S5P_CLK_MUX_STAT1, .shift = 28, .size = 3 },
- .reg_div = { .reg = S5P_CLK_DIV0, .shift = 20, .size = 3 },
- }, {
- .clk = {
- .name = "pclkd1",
- .id = -1,
- .parent = &clk_hclkd1,
- },
- .sources = &clk_src_d1sync,
- .reg_src = { .reg = S5P_CLK_MUX_STAT1, .shift = 24, .size = 3 },
- .reg_div = { .reg = S5P_CLK_DIV0, .shift = 28, .size = 3 },
- }
-};
-
-/* Clock initialisation code */
-static struct clksrc_clk *init_parents[] = {
- &clk_mout_apll,
- &clk_mout_mpll,
- &clk_mout_epll,
- &clk_mout_arm,
- &clk_mout_d0,
- &clk_mout_d0sync,
- &clk_mout_d1,
- &clk_mout_d1sync,
-};
-
-void __init_or_cpufreq s5p6442_setup_clocks(void)
-{
- struct clk *pclkd0_clk;
- struct clk *pclkd1_clk;
-
- unsigned long xtal;
- unsigned long arm;
- unsigned long hclkd0 = 0;
- unsigned long hclkd1 = 0;
- unsigned long pclkd0 = 0;
- unsigned long pclkd1 = 0;
-
- unsigned long apll;
- unsigned long mpll;
- unsigned long epll;
- unsigned int ptr;
-
- printk(KERN_DEBUG "%s: registering clocks\n", __func__);
-
- xtal = clk_get_rate(&clk_xtal);
-
- printk(KERN_DEBUG "%s: xtal is %ld\n", __func__, xtal);
-
- apll = s5p_get_pll45xx(xtal, __raw_readl(S5P_APLL_CON), pll_4508);
- mpll = s5p_get_pll45xx(xtal, __raw_readl(S5P_MPLL_CON), pll_4502);
- epll = s5p_get_pll45xx(xtal, __raw_readl(S5P_EPLL_CON), pll_4500);
-
- printk(KERN_INFO "S5P6442: PLL settings, A=%ld, M=%ld, E=%ld",
- apll, mpll, epll);
-
- clk_fout_apll.rate = apll;
- clk_fout_mpll.rate = mpll;
- clk_fout_epll.rate = epll;
-
- for (ptr = 0; ptr < ARRAY_SIZE(init_parents); ptr++)
- s3c_set_clksrc(init_parents[ptr], true);
-
- for (ptr = 0; ptr < ARRAY_SIZE(clksrcs); ptr++)
- s3c_set_clksrc(&clksrcs[ptr], true);
-
- arm = clk_get_rate(&clk_dout_apll);
- hclkd0 = clk_get_rate(&clk_hclkd0);
- hclkd1 = clk_get_rate(&clk_hclkd1);
-
- pclkd0_clk = clk_get(NULL, "pclkd0");
- BUG_ON(IS_ERR(pclkd0_clk));
-
- pclkd0 = clk_get_rate(pclkd0_clk);
- clk_put(pclkd0_clk);
-
- pclkd1_clk = clk_get(NULL, "pclkd1");
- BUG_ON(IS_ERR(pclkd1_clk));
-
- pclkd1 = clk_get_rate(pclkd1_clk);
- clk_put(pclkd1_clk);
-
- printk(KERN_INFO "S5P6442: HCLKD0=%ld, HCLKD1=%ld, PCLKD0=%ld, PCLKD1=%ld\n",
- hclkd0, hclkd1, pclkd0, pclkd1);
-
- /* For backward compatibility */
- clk_f.rate = arm;
- clk_h.rate = hclkd1;
- clk_p.rate = pclkd1;
-
- clk_pclkd0.rate = pclkd0;
- clk_pclkd1.rate = pclkd1;
-}
-
-static struct clk init_clocks_off[] = {
- {
- .name = "pdma",
- .id = -1,
- .parent = &clk_pclkd1,
- .enable = s5p6442_clk_ip0_ctrl,
- .ctrlbit = (1 << 3),
- },
-};
-
-static struct clk init_clocks[] = {
- {
- .name = "systimer",
- .id = -1,
- .parent = &clk_pclkd1,
- .enable = s5p6442_clk_ip3_ctrl,
- .ctrlbit = (1<<16),
- }, {
- .name = "uart",
- .id = 0,
- .parent = &clk_pclkd1,
- .enable = s5p6442_clk_ip3_ctrl,
- .ctrlbit = (1<<17),
- }, {
- .name = "uart",
- .id = 1,
- .parent = &clk_pclkd1,
- .enable = s5p6442_clk_ip3_ctrl,
- .ctrlbit = (1<<18),
- }, {
- .name = "uart",
- .id = 2,
- .parent = &clk_pclkd1,
- .enable = s5p6442_clk_ip3_ctrl,
- .ctrlbit = (1<<19),
- }, {
- .name = "watchdog",
- .id = -1,
- .parent = &clk_pclkd1,
- .enable = s5p6442_clk_ip3_ctrl,
- .ctrlbit = (1 << 22),
- }, {
- .name = "timers",
- .id = -1,
- .parent = &clk_pclkd1,
- .enable = s5p6442_clk_ip3_ctrl,
- .ctrlbit = (1<<23),
- },
-};
-
-static struct clk *clks[] __initdata = {
- &clk_ext,
- &clk_epll,
- &clk_mout_apll.clk,
- &clk_mout_mpll.clk,
- &clk_mout_epll.clk,
- &clk_mout_d0.clk,
- &clk_mout_d0sync.clk,
- &clk_mout_d1.clk,
- &clk_mout_d1sync.clk,
- &clk_hclkd0,
- &clk_pclkd0,
- &clk_hclkd1,
- &clk_pclkd1,
-};
-
-void __init s5p6442_register_clocks(void)
-{
- s3c24xx_register_clocks(clks, ARRAY_SIZE(clks));
-
- s3c_register_clksrc(clksrcs, ARRAY_SIZE(clksrcs));
- s3c_register_clocks(init_clocks, ARRAY_SIZE(init_clocks));
-
- s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
- s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
-
- s3c_pwmclk_init();
-}
diff --git a/arch/arm/mach-s5p6442/cpu.c b/arch/arm/mach-s5p6442/cpu.c
deleted file mode 100644
index 842af86bda6d..000000000000
--- a/arch/arm/mach-s5p6442/cpu.c
+++ /dev/null
@@ -1,143 +0,0 @@
-/* linux/arch/arm/mach-s5p6442/cpu.c
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/interrupt.h>
-#include <linux/list.h>
-#include <linux/timer.h>
-#include <linux/init.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/sysdev.h>
-#include <linux/serial_core.h>
-#include <linux/platform_device.h>
-#include <linux/sched.h>
-
-#include <asm/mach/arch.h>
-#include <asm/mach/map.h>
-#include <asm/mach/irq.h>
-
-#include <asm/proc-fns.h>
-
-#include <mach/hardware.h>
-#include <mach/map.h>
-#include <asm/irq.h>
-
-#include <plat/regs-serial.h>
-#include <mach/regs-clock.h>
-
-#include <plat/cpu.h>
-#include <plat/devs.h>
-#include <plat/clock.h>
-#include <plat/s5p6442.h>
-
-/* Initial IO mappings */
-
-static struct map_desc s5p6442_iodesc[] __initdata = {
- {
- .virtual = (unsigned long)S5P_VA_SYSTIMER,
- .pfn = __phys_to_pfn(S5P6442_PA_SYSTIMER),
- .length = SZ_16K,
- .type = MT_DEVICE,
- }, {
- .virtual = (unsigned long)S5P_VA_GPIO,
- .pfn = __phys_to_pfn(S5P6442_PA_GPIO),
- .length = SZ_4K,
- .type = MT_DEVICE,
- }, {
- .virtual = (unsigned long)VA_VIC0,
- .pfn = __phys_to_pfn(S5P6442_PA_VIC0),
- .length = SZ_16K,
- .type = MT_DEVICE,
- }, {
- .virtual = (unsigned long)VA_VIC1,
- .pfn = __phys_to_pfn(S5P6442_PA_VIC1),
- .length = SZ_16K,
- .type = MT_DEVICE,
- }, {
- .virtual = (unsigned long)VA_VIC2,
- .pfn = __phys_to_pfn(S5P6442_PA_VIC2),
- .length = SZ_16K,
- .type = MT_DEVICE,
- }, {
- .virtual = (unsigned long)S3C_VA_UART,
- .pfn = __phys_to_pfn(S3C_PA_UART),
- .length = SZ_512K,
- .type = MT_DEVICE,
- }
-};
-
-static void s5p6442_idle(void)
-{
- if (!need_resched())
- cpu_do_idle();
-
- local_irq_enable();
-}
-
-/*
- * s5p6442_map_io
- *
- * register the standard cpu IO areas
- */
-
-void __init s5p6442_map_io(void)
-{
- iotable_init(s5p6442_iodesc, ARRAY_SIZE(s5p6442_iodesc));
-}
-
-void __init s5p6442_init_clocks(int xtal)
-{
- printk(KERN_DEBUG "%s: initializing clocks\n", __func__);
-
- s3c24xx_register_baseclocks(xtal);
- s5p_register_clocks(xtal);
- s5p6442_register_clocks();
- s5p6442_setup_clocks();
-}
-
-void __init s5p6442_init_irq(void)
-{
- /* S5P6442 supports 3 VIC */
- u32 vic[3];
-
- /* VIC0, VIC1, and VIC2: some interrupt reserved */
- vic[0] = 0x7fefffff;
- vic[1] = 0X7f389c81;
- vic[2] = 0X1bbbcfff;
-
- s5p_init_irq(vic, ARRAY_SIZE(vic));
-}
-
-struct sysdev_class s5p6442_sysclass = {
- .name = "s5p6442-core",
-};
-
-static struct sys_device s5p6442_sysdev = {
- .cls = &s5p6442_sysclass,
-};
-
-static int __init s5p6442_core_init(void)
-{
- return sysdev_class_register(&s5p6442_sysclass);
-}
-
-core_initcall(s5p6442_core_init);
-
-int __init s5p6442_init(void)
-{
- printk(KERN_INFO "S5P6442: Initializing architecture\n");
-
- /* set idle function */
- pm_idle = s5p6442_idle;
-
- return sysdev_register(&s5p6442_sysdev);
-}
diff --git a/arch/arm/mach-s5p6442/dev-audio.c b/arch/arm/mach-s5p6442/dev-audio.c
deleted file mode 100644
index 8719dc41fe32..000000000000
--- a/arch/arm/mach-s5p6442/dev-audio.c
+++ /dev/null
@@ -1,217 +0,0 @@
-/* linux/arch/arm/mach-s5p6442/dev-audio.c
- *
- * Copyright (c) 2010 Samsung Electronics Co. Ltd
- * Jaswinder Singh <jassi.brar@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/platform_device.h>
-#include <linux/dma-mapping.h>
-#include <linux/gpio.h>
-
-#include <plat/gpio-cfg.h>
-#include <plat/audio.h>
-
-#include <mach/map.h>
-#include <mach/dma.h>
-#include <mach/irqs.h>
-
-static int s5p6442_cfg_i2s(struct platform_device *pdev)
-{
- unsigned int base;
-
- /* configure GPIO for i2s port */
- switch (pdev->id) {
- case 1:
- base = S5P6442_GPC1(0);
- break;
-
- case 0:
- base = S5P6442_GPC0(0);
- break;
-
- default:
- printk(KERN_ERR "Invalid Device %d\n", pdev->id);
- return -EINVAL;
- }
-
- s3c_gpio_cfgpin_range(base, 5, S3C_GPIO_SFN(2));
- return 0;
-}
-
-static const char *rclksrc_v35[] = {
- [0] = "busclk",
- [1] = "i2sclk",
-};
-
-static struct s3c_audio_pdata i2sv35_pdata = {
- .cfg_gpio = s5p6442_cfg_i2s,
- .type = {
- .i2s = {
- .quirks = QUIRK_SEC_DAI | QUIRK_NEED_RSTCLR,
- .src_clk = rclksrc_v35,
- },
- },
-};
-
-static struct resource s5p6442_iis0_resource[] = {
- [0] = {
- .start = S5P6442_PA_I2S0,
- .end = S5P6442_PA_I2S0 + 0x100 - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = DMACH_I2S0_TX,
- .end = DMACH_I2S0_TX,
- .flags = IORESOURCE_DMA,
- },
- [2] = {
- .start = DMACH_I2S0_RX,
- .end = DMACH_I2S0_RX,
- .flags = IORESOURCE_DMA,
- },
- [3] = {
- .start = DMACH_I2S0S_TX,
- .end = DMACH_I2S0S_TX,
- .flags = IORESOURCE_DMA,
- },
-};
-
-struct platform_device s5p6442_device_iis0 = {
- .name = "samsung-i2s",
- .id = 0,
- .num_resources = ARRAY_SIZE(s5p6442_iis0_resource),
- .resource = s5p6442_iis0_resource,
- .dev = {
- .platform_data = &i2sv35_pdata,
- },
-};
-
-static const char *rclksrc_v3[] = {
- [0] = "iis",
- [1] = "sclk_audio",
-};
-
-static struct s3c_audio_pdata i2sv3_pdata = {
- .cfg_gpio = s5p6442_cfg_i2s,
- .type = {
- .i2s = {
- .src_clk = rclksrc_v3,
- },
- },
-};
-
-static struct resource s5p6442_iis1_resource[] = {
- [0] = {
- .start = S5P6442_PA_I2S1,
- .end = S5P6442_PA_I2S1 + 0x100 - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = DMACH_I2S1_TX,
- .end = DMACH_I2S1_TX,
- .flags = IORESOURCE_DMA,
- },
- [2] = {
- .start = DMACH_I2S1_RX,
- .end = DMACH_I2S1_RX,
- .flags = IORESOURCE_DMA,
- },
-};
-
-struct platform_device s5p6442_device_iis1 = {
- .name = "samsung-i2s",
- .id = 1,
- .num_resources = ARRAY_SIZE(s5p6442_iis1_resource),
- .resource = s5p6442_iis1_resource,
- .dev = {
- .platform_data = &i2sv3_pdata,
- },
-};
-
-/* PCM Controller platform_devices */
-
-static int s5p6442_pcm_cfg_gpio(struct platform_device *pdev)
-{
- unsigned int base;
-
- switch (pdev->id) {
- case 0:
- base = S5P6442_GPC0(0);
- break;
-
- case 1:
- base = S5P6442_GPC1(0);
- break;
-
- default:
- printk(KERN_DEBUG "Invalid PCM Controller number!");
- return -EINVAL;
- }
-
- s3c_gpio_cfgpin_range(base, 5, S3C_GPIO_SFN(3));
- return 0;
-}
-
-static struct s3c_audio_pdata s3c_pcm_pdata = {
- .cfg_gpio = s5p6442_pcm_cfg_gpio,
-};
-
-static struct resource s5p6442_pcm0_resource[] = {
- [0] = {
- .start = S5P6442_PA_PCM0,
- .end = S5P6442_PA_PCM0 + 0x100 - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = DMACH_PCM0_TX,
- .end = DMACH_PCM0_TX,
- .flags = IORESOURCE_DMA,
- },
- [2] = {
- .start = DMACH_PCM0_RX,
- .end = DMACH_PCM0_RX,
- .flags = IORESOURCE_DMA,
- },
-};
-
-struct platform_device s5p6442_device_pcm0 = {
- .name = "samsung-pcm",
- .id = 0,
- .num_resources = ARRAY_SIZE(s5p6442_pcm0_resource),
- .resource = s5p6442_pcm0_resource,
- .dev = {
- .platform_data = &s3c_pcm_pdata,
- },
-};
-
-static struct resource s5p6442_pcm1_resource[] = {
- [0] = {
- .start = S5P6442_PA_PCM1,
- .end = S5P6442_PA_PCM1 + 0x100 - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = DMACH_PCM1_TX,
- .end = DMACH_PCM1_TX,
- .flags = IORESOURCE_DMA,
- },
- [2] = {
- .start = DMACH_PCM1_RX,
- .end = DMACH_PCM1_RX,
- .flags = IORESOURCE_DMA,
- },
-};
-
-struct platform_device s5p6442_device_pcm1 = {
- .name = "samsung-pcm",
- .id = 1,
- .num_resources = ARRAY_SIZE(s5p6442_pcm1_resource),
- .resource = s5p6442_pcm1_resource,
- .dev = {
- .platform_data = &s3c_pcm_pdata,
- },
-};
diff --git a/arch/arm/mach-s5p6442/dev-spi.c b/arch/arm/mach-s5p6442/dev-spi.c
deleted file mode 100644
index cce8c2470709..000000000000
--- a/arch/arm/mach-s5p6442/dev-spi.c
+++ /dev/null
@@ -1,121 +0,0 @@
-/* linux/arch/arm/mach-s5p6442/dev-spi.c
- *
- * Copyright (C) 2010 Samsung Electronics Co. Ltd.
- * Jaswinder Singh <jassi.brar@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/platform_device.h>
-#include <linux/dma-mapping.h>
-#include <linux/gpio.h>
-
-#include <mach/dma.h>
-#include <mach/map.h>
-#include <mach/irqs.h>
-#include <mach/spi-clocks.h>
-
-#include <plat/s3c64xx-spi.h>
-#include <plat/gpio-cfg.h>
-
-static char *spi_src_clks[] = {
- [S5P6442_SPI_SRCCLK_PCLK] = "pclk",
- [S5P6442_SPI_SRCCLK_SCLK] = "spi_epll",
-};
-
-/* SPI Controller platform_devices */
-
-/* Since we emulate multi-cs capability, we do not touch the CS.
- * The emulated CS is toggled by board specific mechanism, as it can
- * be either some immediate GPIO or some signal out of some other
- * chip in between ... or some yet another way.
- * We simply do not assume anything about CS.
- */
-static int s5p6442_spi_cfg_gpio(struct platform_device *pdev)
-{
- switch (pdev->id) {
- case 0:
- s3c_gpio_cfgpin(S5P6442_GPB(0), S3C_GPIO_SFN(2));
- s3c_gpio_setpull(S5P6442_GPB(0), S3C_GPIO_PULL_UP);
- s3c_gpio_cfgall_range(S5P6442_GPB(2), 2,
- S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP);
- break;
-
- default:
- dev_err(&pdev->dev, "Invalid SPI Controller number!");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static struct resource s5p6442_spi0_resource[] = {
- [0] = {
- .start = S5P6442_PA_SPI,
- .end = S5P6442_PA_SPI + 0x100 - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = DMACH_SPI0_TX,
- .end = DMACH_SPI0_TX,
- .flags = IORESOURCE_DMA,
- },
- [2] = {
- .start = DMACH_SPI0_RX,
- .end = DMACH_SPI0_RX,
- .flags = IORESOURCE_DMA,
- },
- [3] = {
- .start = IRQ_SPI0,
- .end = IRQ_SPI0,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct s3c64xx_spi_info s5p6442_spi0_pdata = {
- .cfg_gpio = s5p6442_spi_cfg_gpio,
- .fifo_lvl_mask = 0x1ff,
- .rx_lvl_offset = 15,
-};
-
-static u64 spi_dmamask = DMA_BIT_MASK(32);
-
-struct platform_device s5p6442_device_spi = {
- .name = "s3c64xx-spi",
- .id = 0,
- .num_resources = ARRAY_SIZE(s5p6442_spi0_resource),
- .resource = s5p6442_spi0_resource,
- .dev = {
- .dma_mask = &spi_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- .platform_data = &s5p6442_spi0_pdata,
- },
-};
-
-void __init s5p6442_spi_set_info(int cntrlr, int src_clk_nr, int num_cs)
-{
- struct s3c64xx_spi_info *pd;
-
- /* Reject invalid configuration */
- if (!num_cs || src_clk_nr < 0
- || src_clk_nr > S5P6442_SPI_SRCCLK_SCLK) {
- printk(KERN_ERR "%s: Invalid SPI configuration\n", __func__);
- return;
- }
-
- switch (cntrlr) {
- case 0:
- pd = &s5p6442_spi0_pdata;
- break;
- default:
- printk(KERN_ERR "%s: Invalid SPI controller(%d)\n",
- __func__, cntrlr);
- return;
- }
-
- pd->num_cs = num_cs;
- pd->src_clk_nr = src_clk_nr;
- pd->src_clk_name = spi_src_clks[src_clk_nr];
-}
diff --git a/arch/arm/mach-s5p6442/dma.c b/arch/arm/mach-s5p6442/dma.c
deleted file mode 100644
index 7dfb13654f8a..000000000000
--- a/arch/arm/mach-s5p6442/dma.c
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * Copyright (C) 2010 Samsung Electronics Co. Ltd.
- * Jaswinder Singh <jassi.brar@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/platform_device.h>
-#include <linux/dma-mapping.h>
-
-#include <plat/devs.h>
-#include <plat/irqs.h>
-
-#include <mach/map.h>
-#include <mach/irqs.h>
-
-#include <plat/s3c-pl330-pdata.h>
-
-static u64 dma_dmamask = DMA_BIT_MASK(32);
-
-static struct resource s5p6442_pdma_resource[] = {
- [0] = {
- .start = S5P6442_PA_PDMA,
- .end = S5P6442_PA_PDMA + SZ_4K,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = IRQ_PDMA,
- .end = IRQ_PDMA,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct s3c_pl330_platdata s5p6442_pdma_pdata = {
- .peri = {
- [0] = DMACH_UART0_RX,
- [1] = DMACH_UART0_TX,
- [2] = DMACH_UART1_RX,
- [3] = DMACH_UART1_TX,
- [4] = DMACH_UART2_RX,
- [5] = DMACH_UART2_TX,
- [6] = DMACH_MAX,
- [7] = DMACH_MAX,
- [8] = DMACH_MAX,
- [9] = DMACH_I2S0_RX,
- [10] = DMACH_I2S0_TX,
- [11] = DMACH_I2S0S_TX,
- [12] = DMACH_I2S1_RX,
- [13] = DMACH_I2S1_TX,
- [14] = DMACH_MAX,
- [15] = DMACH_MAX,
- [16] = DMACH_SPI0_RX,
- [17] = DMACH_SPI0_TX,
- [18] = DMACH_MAX,
- [19] = DMACH_MAX,
- [20] = DMACH_PCM0_RX,
- [21] = DMACH_PCM0_TX,
- [22] = DMACH_PCM1_RX,
- [23] = DMACH_PCM1_TX,
- [24] = DMACH_MAX,
- [25] = DMACH_MAX,
- [26] = DMACH_MAX,
- [27] = DMACH_MSM_REQ0,
- [28] = DMACH_MSM_REQ1,
- [29] = DMACH_MSM_REQ2,
- [30] = DMACH_MSM_REQ3,
- [31] = DMACH_MAX,
- },
-};
-
-static struct platform_device s5p6442_device_pdma = {
- .name = "s3c-pl330",
- .id = -1,
- .num_resources = ARRAY_SIZE(s5p6442_pdma_resource),
- .resource = s5p6442_pdma_resource,
- .dev = {
- .dma_mask = &dma_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- .platform_data = &s5p6442_pdma_pdata,
- },
-};
-
-static struct platform_device *s5p6442_dmacs[] __initdata = {
- &s5p6442_device_pdma,
-};
-
-static int __init s5p6442_dma_init(void)
-{
- platform_add_devices(s5p6442_dmacs, ARRAY_SIZE(s5p6442_dmacs));
-
- return 0;
-}
-arch_initcall(s5p6442_dma_init);
diff --git a/arch/arm/mach-s5p6442/include/mach/debug-macro.S b/arch/arm/mach-s5p6442/include/mach/debug-macro.S
deleted file mode 100644
index e2213205d780..000000000000
--- a/arch/arm/mach-s5p6442/include/mach/debug-macro.S
+++ /dev/null
@@ -1,35 +0,0 @@
-/* linux/arch/arm/mach-s5p6442/include/mach/debug-macro.S
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- *
- * Based on arch/arm/mach-s3c6400/include/mach/debug-macro.S
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-/* pull in the relevant register and map files. */
-
-#include <mach/map.h>
-#include <plat/regs-serial.h>
-
- .macro addruart, rp, rv
- ldr \rp, = S3C_PA_UART
- ldr \rv, = S3C_VA_UART
-#if CONFIG_DEBUG_S3C_UART != 0
- add \rp, \rp, #(0x400 * CONFIG_DEBUG_S3C_UART)
- add \rv, \rv, #(0x400 * CONFIG_DEBUG_S3C_UART)
-#endif
- .endm
-
-#define fifo_full fifo_full_s5pv210
-#define fifo_level fifo_level_s5pv210
-
-/* include the reset of the code which will do the work, we're only
- * compiling for a single cpu processor type so the default of s3c2440
- * will be fine with us.
- */
-
-#include <plat/debug-macro.S>
diff --git a/arch/arm/mach-s5p6442/include/mach/entry-macro.S b/arch/arm/mach-s5p6442/include/mach/entry-macro.S
deleted file mode 100644
index 6d574edbf1ae..000000000000
--- a/arch/arm/mach-s5p6442/include/mach/entry-macro.S
+++ /dev/null
@@ -1,48 +0,0 @@
-/* linux/arch/arm/mach-s5p6442/include/mach/entry-macro.S
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- *
- * Low-level IRQ helper macros for the Samsung S5P6442
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <asm/hardware/vic.h>
-#include <mach/map.h>
-#include <plat/irqs.h>
-
- .macro disable_fiq
- .endm
-
- .macro get_irqnr_preamble, base, tmp
- ldr \base, =VA_VIC0
- .endm
-
- .macro arch_ret_to_user, tmp1, tmp2
- .endm
-
- .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
-
- @ check the vic0
- mov \irqnr, # S5P_IRQ_OFFSET + 31
- ldr \irqstat, [ \base, # VIC_IRQ_STATUS ]
- teq \irqstat, #0
-
- @ otherwise try vic1
- addeq \tmp, \base, #(VA_VIC1 - VA_VIC0)
- addeq \irqnr, \irqnr, #32
- ldreq \irqstat, [ \tmp, # VIC_IRQ_STATUS ]
- teqeq \irqstat, #0
-
- @ otherwise try vic2
- addeq \tmp, \base, #(VA_VIC2 - VA_VIC0)
- addeq \irqnr, \irqnr, #32
- ldreq \irqstat, [ \tmp, # VIC_IRQ_STATUS ]
- teqeq \irqstat, #0
-
- clzne \irqstat, \irqstat
- subne \irqnr, \irqnr, \irqstat
- .endm
diff --git a/arch/arm/mach-s5p6442/include/mach/gpio.h b/arch/arm/mach-s5p6442/include/mach/gpio.h
deleted file mode 100644
index b8715df2fdab..000000000000
--- a/arch/arm/mach-s5p6442/include/mach/gpio.h
+++ /dev/null
@@ -1,123 +0,0 @@
-/* linux/arch/arm/mach-s5p6442/include/mach/gpio.h
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- *
- * S5P6442 - GPIO lib support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __ASM_ARCH_GPIO_H
-#define __ASM_ARCH_GPIO_H __FILE__
-
-#define gpio_get_value __gpio_get_value
-#define gpio_set_value __gpio_set_value
-#define gpio_cansleep __gpio_cansleep
-#define gpio_to_irq __gpio_to_irq
-
-/* GPIO bank sizes */
-#define S5P6442_GPIO_A0_NR (8)
-#define S5P6442_GPIO_A1_NR (2)
-#define S5P6442_GPIO_B_NR (4)
-#define S5P6442_GPIO_C0_NR (5)
-#define S5P6442_GPIO_C1_NR (5)
-#define S5P6442_GPIO_D0_NR (2)
-#define S5P6442_GPIO_D1_NR (6)
-#define S5P6442_GPIO_E0_NR (8)
-#define S5P6442_GPIO_E1_NR (5)
-#define S5P6442_GPIO_F0_NR (8)
-#define S5P6442_GPIO_F1_NR (8)
-#define S5P6442_GPIO_F2_NR (8)
-#define S5P6442_GPIO_F3_NR (6)
-#define S5P6442_GPIO_G0_NR (7)
-#define S5P6442_GPIO_G1_NR (7)
-#define S5P6442_GPIO_G2_NR (7)
-#define S5P6442_GPIO_H0_NR (8)
-#define S5P6442_GPIO_H1_NR (8)
-#define S5P6442_GPIO_H2_NR (8)
-#define S5P6442_GPIO_H3_NR (8)
-#define S5P6442_GPIO_J0_NR (8)
-#define S5P6442_GPIO_J1_NR (6)
-#define S5P6442_GPIO_J2_NR (8)
-#define S5P6442_GPIO_J3_NR (8)
-#define S5P6442_GPIO_J4_NR (5)
-
-/* GPIO bank numbers */
-
-/* CONFIG_S3C_GPIO_SPACE allows the user to select extra
- * space for debugging purposes so that any accidental
- * change from one gpio bank to another can be caught.
-*/
-
-#define S5P6442_GPIO_NEXT(__gpio) \
- ((__gpio##_START) + (__gpio##_NR) + CONFIG_S3C_GPIO_SPACE + 1)
-
-enum s5p_gpio_number {
- S5P6442_GPIO_A0_START = 0,
- S5P6442_GPIO_A1_START = S5P6442_GPIO_NEXT(S5P6442_GPIO_A0),
- S5P6442_GPIO_B_START = S5P6442_GPIO_NEXT(S5P6442_GPIO_A1),
- S5P6442_GPIO_C0_START = S5P6442_GPIO_NEXT(S5P6442_GPIO_B),
- S5P6442_GPIO_C1_START = S5P6442_GPIO_NEXT(S5P6442_GPIO_C0),
- S5P6442_GPIO_D0_START = S5P6442_GPIO_NEXT(S5P6442_GPIO_C1),
- S5P6442_GPIO_D1_START = S5P6442_GPIO_NEXT(S5P6442_GPIO_D0),
- S5P6442_GPIO_E0_START = S5P6442_GPIO_NEXT(S5P6442_GPIO_D1),
- S5P6442_GPIO_E1_START = S5P6442_GPIO_NEXT(S5P6442_GPIO_E0),
- S5P6442_GPIO_F0_START = S5P6442_GPIO_NEXT(S5P6442_GPIO_E1),
- S5P6442_GPIO_F1_START = S5P6442_GPIO_NEXT(S5P6442_GPIO_F0),
- S5P6442_GPIO_F2_START = S5P6442_GPIO_NEXT(S5P6442_GPIO_F1),
- S5P6442_GPIO_F3_START = S5P6442_GPIO_NEXT(S5P6442_GPIO_F2),
- S5P6442_GPIO_G0_START = S5P6442_GPIO_NEXT(S5P6442_GPIO_F3),
- S5P6442_GPIO_G1_START = S5P6442_GPIO_NEXT(S5P6442_GPIO_G0),
- S5P6442_GPIO_G2_START = S5P6442_GPIO_NEXT(S5P6442_GPIO_G1),
- S5P6442_GPIO_H0_START = S5P6442_GPIO_NEXT(S5P6442_GPIO_G2),
- S5P6442_GPIO_H1_START = S5P6442_GPIO_NEXT(S5P6442_GPIO_H0),
- S5P6442_GPIO_H2_START = S5P6442_GPIO_NEXT(S5P6442_GPIO_H1),
- S5P6442_GPIO_H3_START = S5P6442_GPIO_NEXT(S5P6442_GPIO_H2),
- S5P6442_GPIO_J0_START = S5P6442_GPIO_NEXT(S5P6442_GPIO_H3),
- S5P6442_GPIO_J1_START = S5P6442_GPIO_NEXT(S5P6442_GPIO_J0),
- S5P6442_GPIO_J2_START = S5P6442_GPIO_NEXT(S5P6442_GPIO_J1),
- S5P6442_GPIO_J3_START = S5P6442_GPIO_NEXT(S5P6442_GPIO_J2),
- S5P6442_GPIO_J4_START = S5P6442_GPIO_NEXT(S5P6442_GPIO_J3),
-};
-
-/* S5P6442 GPIO number definitions. */
-#define S5P6442_GPA0(_nr) (S5P6442_GPIO_A0_START + (_nr))
-#define S5P6442_GPA1(_nr) (S5P6442_GPIO_A1_START + (_nr))
-#define S5P6442_GPB(_nr) (S5P6442_GPIO_B_START + (_nr))
-#define S5P6442_GPC0(_nr) (S5P6442_GPIO_C0_START + (_nr))
-#define S5P6442_GPC1(_nr) (S5P6442_GPIO_C1_START + (_nr))
-#define S5P6442_GPD0(_nr) (S5P6442_GPIO_D0_START + (_nr))
-#define S5P6442_GPD1(_nr) (S5P6442_GPIO_D1_START + (_nr))
-#define S5P6442_GPE0(_nr) (S5P6442_GPIO_E0_START + (_nr))
-#define S5P6442_GPE1(_nr) (S5P6442_GPIO_E1_START + (_nr))
-#define S5P6442_GPF0(_nr) (S5P6442_GPIO_F0_START + (_nr))
-#define S5P6442_GPF1(_nr) (S5P6442_GPIO_F1_START + (_nr))
-#define S5P6442_GPF2(_nr) (S5P6442_GPIO_F2_START + (_nr))
-#define S5P6442_GPF3(_nr) (S5P6442_GPIO_F3_START + (_nr))
-#define S5P6442_GPG0(_nr) (S5P6442_GPIO_G0_START + (_nr))
-#define S5P6442_GPG1(_nr) (S5P6442_GPIO_G1_START + (_nr))
-#define S5P6442_GPG2(_nr) (S5P6442_GPIO_G2_START + (_nr))
-#define S5P6442_GPH0(_nr) (S5P6442_GPIO_H0_START + (_nr))
-#define S5P6442_GPH1(_nr) (S5P6442_GPIO_H1_START + (_nr))
-#define S5P6442_GPH2(_nr) (S5P6442_GPIO_H2_START + (_nr))
-#define S5P6442_GPH3(_nr) (S5P6442_GPIO_H3_START + (_nr))
-#define S5P6442_GPJ0(_nr) (S5P6442_GPIO_J0_START + (_nr))
-#define S5P6442_GPJ1(_nr) (S5P6442_GPIO_J1_START + (_nr))
-#define S5P6442_GPJ2(_nr) (S5P6442_GPIO_J2_START + (_nr))
-#define S5P6442_GPJ3(_nr) (S5P6442_GPIO_J3_START + (_nr))
-#define S5P6442_GPJ4(_nr) (S5P6442_GPIO_J4_START + (_nr))
-
-/* the end of the S5P6442 specific gpios */
-#define S5P6442_GPIO_END (S5P6442_GPJ4(S5P6442_GPIO_J4_NR) + 1)
-#define S3C_GPIO_END S5P6442_GPIO_END
-
-/* define the number of gpios we need to the one after the GPJ4() range */
-#define ARCH_NR_GPIOS (S5P6442_GPJ4(S5P6442_GPIO_J4_NR) + \
- CONFIG_SAMSUNG_GPIO_EXTRA + 1)
-
-#include <asm-generic/gpio.h>
-
-#endif /* __ASM_ARCH_GPIO_H */
diff --git a/arch/arm/mach-s5p6442/include/mach/hardware.h b/arch/arm/mach-s5p6442/include/mach/hardware.h
deleted file mode 100644
index 8cd7b67b49d4..000000000000
--- a/arch/arm/mach-s5p6442/include/mach/hardware.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* linux/arch/arm/mach-s5p6442/include/mach/hardware.h
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- *
- * S5P6442 - Hardware support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __ASM_ARCH_HARDWARE_H
-#define __ASM_ARCH_HARDWARE_H __FILE__
-
-/* currently nothing here, placeholder */
-
-#endif /* __ASM_ARCH_HARDWARE_H */
diff --git a/arch/arm/mach-s5p6442/include/mach/io.h b/arch/arm/mach-s5p6442/include/mach/io.h
deleted file mode 100644
index 5d2195ad0b67..000000000000
--- a/arch/arm/mach-s5p6442/include/mach/io.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* arch/arm/mach-s5p6442/include/mach/io.h
- *
- * Copyright 2008-2010 Ben Dooks <ben-linux@fluff.org>
- *
- * Default IO routines for S5P6442
- */
-
-#ifndef __ASM_ARM_ARCH_IO_H
-#define __ASM_ARM_ARCH_IO_H
-
-/* No current ISA/PCI bus support. */
-#define __io(a) __typesafe_io(a)
-#define __mem_pci(a) (a)
-
-#define IO_SPACE_LIMIT (0xFFFFFFFF)
-
-#endif
diff --git a/arch/arm/mach-s5p6442/include/mach/irqs.h b/arch/arm/mach-s5p6442/include/mach/irqs.h
deleted file mode 100644
index 3fbc6c3ad2da..000000000000
--- a/arch/arm/mach-s5p6442/include/mach/irqs.h
+++ /dev/null
@@ -1,87 +0,0 @@
-/* linux/arch/arm/mach-s5p6442/include/mach/irqs.h
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- *
- * S5P6442 - IRQ definitions
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __ASM_ARCH_IRQS_H
-#define __ASM_ARCH_IRQS_H __FILE__
-
-#include <plat/irqs.h>
-
-/* VIC0 */
-#define IRQ_EINT16_31 S5P_IRQ_VIC0(16)
-#define IRQ_BATF S5P_IRQ_VIC0(17)
-#define IRQ_MDMA S5P_IRQ_VIC0(18)
-#define IRQ_PDMA S5P_IRQ_VIC0(19)
-#define IRQ_TIMER0_VIC S5P_IRQ_VIC0(21)
-#define IRQ_TIMER1_VIC S5P_IRQ_VIC0(22)
-#define IRQ_TIMER2_VIC S5P_IRQ_VIC0(23)
-#define IRQ_TIMER3_VIC S5P_IRQ_VIC0(24)
-#define IRQ_TIMER4_VIC S5P_IRQ_VIC0(25)
-#define IRQ_SYSTIMER S5P_IRQ_VIC0(26)
-#define IRQ_WDT S5P_IRQ_VIC0(27)
-#define IRQ_RTC_ALARM S5P_IRQ_VIC0(28)
-#define IRQ_RTC_TIC S5P_IRQ_VIC0(29)
-#define IRQ_GPIOINT S5P_IRQ_VIC0(30)
-
-/* VIC1 */
-#define IRQ_PMU S5P_IRQ_VIC1(0)
-#define IRQ_ONENAND S5P_IRQ_VIC1(7)
-#define IRQ_UART0 S5P_IRQ_VIC1(10)
-#define IRQ_UART1 S5P_IRQ_VIC1(11)
-#define IRQ_UART2 S5P_IRQ_VIC1(12)
-#define IRQ_SPI0 S5P_IRQ_VIC1(15)
-#define IRQ_IIC S5P_IRQ_VIC1(19)
-#define IRQ_IIC1 S5P_IRQ_VIC1(20)
-#define IRQ_IIC2 S5P_IRQ_VIC1(21)
-#define IRQ_OTG S5P_IRQ_VIC1(24)
-#define IRQ_MSM S5P_IRQ_VIC1(25)
-#define IRQ_HSMMC0 S5P_IRQ_VIC1(26)
-#define IRQ_HSMMC1 S5P_IRQ_VIC1(27)
-#define IRQ_HSMMC2 S5P_IRQ_VIC1(28)
-#define IRQ_COMMRX S5P_IRQ_VIC1(29)
-#define IRQ_COMMTX S5P_IRQ_VIC1(30)
-
-/* VIC2 */
-#define IRQ_LCD0 S5P_IRQ_VIC2(0)
-#define IRQ_LCD1 S5P_IRQ_VIC2(1)
-#define IRQ_LCD2 S5P_IRQ_VIC2(2)
-#define IRQ_LCD3 S5P_IRQ_VIC2(3)
-#define IRQ_ROTATOR S5P_IRQ_VIC2(4)
-#define IRQ_FIMC0 S5P_IRQ_VIC2(5)
-#define IRQ_FIMC1 S5P_IRQ_VIC2(6)
-#define IRQ_FIMC2 S5P_IRQ_VIC2(7)
-#define IRQ_JPEG S5P_IRQ_VIC2(8)
-#define IRQ_3D S5P_IRQ_VIC2(10)
-#define IRQ_Mixer S5P_IRQ_VIC2(11)
-#define IRQ_MFC S5P_IRQ_VIC2(14)
-#define IRQ_TVENC S5P_IRQ_VIC2(15)
-#define IRQ_I2S0 S5P_IRQ_VIC2(16)
-#define IRQ_I2S1 S5P_IRQ_VIC2(17)
-#define IRQ_RP S5P_IRQ_VIC2(19)
-#define IRQ_PCM0 S5P_IRQ_VIC2(20)
-#define IRQ_PCM1 S5P_IRQ_VIC2(21)
-#define IRQ_ADC S5P_IRQ_VIC2(23)
-#define IRQ_PENDN S5P_IRQ_VIC2(24)
-#define IRQ_KEYPAD S5P_IRQ_VIC2(25)
-#define IRQ_SSS_INT S5P_IRQ_VIC2(27)
-#define IRQ_SSS_HASH S5P_IRQ_VIC2(28)
-#define IRQ_VIC_END S5P_IRQ_VIC2(31)
-
-#define S5P_IRQ_EINT_BASE (IRQ_VIC_END + 1)
-
-#define S5P_EINT_BASE1 (S5P_IRQ_VIC0(0))
-#define S5P_EINT_BASE2 (S5P_IRQ_EINT_BASE)
-
-/* Set the default NR_IRQS */
-
-#define NR_IRQS (IRQ_EINT(31) + 1)
-
-#endif /* __ASM_ARCH_IRQS_H */
diff --git a/arch/arm/mach-s5p6442/include/mach/map.h b/arch/arm/mach-s5p6442/include/mach/map.h
deleted file mode 100644
index 058dab4482a1..000000000000
--- a/arch/arm/mach-s5p6442/include/mach/map.h
+++ /dev/null
@@ -1,76 +0,0 @@
-/* linux/arch/arm/mach-s5p6442/include/mach/map.h
- *
- * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- *
- * S5P6442 - Memory map definitions
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __ASM_ARCH_MAP_H
-#define __ASM_ARCH_MAP_H __FILE__
-
-#include <plat/map-base.h>
-#include <plat/map-s5p.h>
-
-#define S5P6442_PA_SDRAM 0x20000000
-
-#define S5P6442_PA_I2S0 0xC0B00000
-#define S5P6442_PA_I2S1 0xF2200000
-
-#define S5P6442_PA_CHIPID 0xE0000000
-
-#define S5P6442_PA_SYSCON 0xE0100000
-
-#define S5P6442_PA_GPIO 0xE0200000
-
-#define S5P6442_PA_VIC0 0xE4000000
-#define S5P6442_PA_VIC1 0xE4100000
-#define S5P6442_PA_VIC2 0xE4200000
-
-#define S5P6442_PA_SROMC 0xE7000000
-
-#define S5P6442_PA_MDMA 0xE8000000
-#define S5P6442_PA_PDMA 0xE9000000
-
-#define S5P6442_PA_TIMER 0xEA000000
-
-#define S5P6442_PA_SYSTIMER 0xEA100000
-
-#define S5P6442_PA_WATCHDOG 0xEA200000
-
-#define S5P6442_PA_UART 0xEC000000
-
-#define S5P6442_PA_IIC0 0xEC100000
-
-#define S5P6442_PA_SPI 0xEC300000
-
-#define S5P6442_PA_PCM0 0xF2400000
-#define S5P6442_PA_PCM1 0xF2500000
-
-/* Compatibiltiy Defines */
-
-#define S3C_PA_IIC S5P6442_PA_IIC0
-#define S3C_PA_WDT S5P6442_PA_WATCHDOG
-
-#define S5P_PA_CHIPID S5P6442_PA_CHIPID
-#define S5P_PA_SDRAM S5P6442_PA_SDRAM
-#define S5P_PA_SROMC S5P6442_PA_SROMC
-#define S5P_PA_SYSCON S5P6442_PA_SYSCON
-#define S5P_PA_TIMER S5P6442_PA_TIMER
-
-/* UART */
-
-#define S3C_PA_UART S5P6442_PA_UART
-
-#define S5P_PA_UART(x) (S3C_PA_UART + ((x) * S3C_UART_OFFSET))
-#define S5P_PA_UART0 S5P_PA_UART(0)
-#define S5P_PA_UART1 S5P_PA_UART(1)
-#define S5P_PA_UART2 S5P_PA_UART(2)
-
-#define S5P_SZ_UART SZ_256
-
-#endif /* __ASM_ARCH_MAP_H */
diff --git a/arch/arm/mach-s5p6442/include/mach/memory.h b/arch/arm/mach-s5p6442/include/mach/memory.h
deleted file mode 100644
index cfe259dded33..000000000000
--- a/arch/arm/mach-s5p6442/include/mach/memory.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/* linux/arch/arm/mach-s5p6442/include/mach/memory.h
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- *
- * S5P6442 - Memory definitions
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __ASM_ARCH_MEMORY_H
-#define __ASM_ARCH_MEMORY_H
-
-#define PLAT_PHYS_OFFSET UL(0x20000000)
-#define CONSISTENT_DMA_SIZE SZ_8M
-
-#endif /* __ASM_ARCH_MEMORY_H */
diff --git a/arch/arm/mach-s5p6442/include/mach/pwm-clock.h b/arch/arm/mach-s5p6442/include/mach/pwm-clock.h
deleted file mode 100644
index 2724b37def31..000000000000
--- a/arch/arm/mach-s5p6442/include/mach/pwm-clock.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/* linux/arch/arm/mach-s5p6442/include/mach/pwm-clock.h
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- *
- * Copyright 2008 Openmoko, Inc.
- * Copyright 2008 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- * http://armlinux.simtec.co.uk/
- *
- * Based on arch/arm/mach-s3c64xx/include/mach/pwm-clock.h
- *
- * S5P6442 - pwm clock and timer support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __ASM_ARCH_PWMCLK_H
-#define __ASM_ARCH_PWMCLK_H __FILE__
-
-/**
- * pwm_cfg_src_is_tclk() - return whether the given mux config is a tclk
- * @tcfg: The timer TCFG1 register bits shifted down to 0.
- *
- * Return true if the given configuration from TCFG1 is a TCLK instead
- * any of the TDIV clocks.
- */
-static inline int pwm_cfg_src_is_tclk(unsigned long tcfg)
-{
- return tcfg == S3C64XX_TCFG1_MUX_TCLK;
-}
-
-/**
- * tcfg_to_divisor() - convert tcfg1 setting to a divisor
- * @tcfg1: The tcfg1 setting, shifted down.
- *
- * Get the divisor value for the given tcfg1 setting. We assume the
- * caller has already checked to see if this is not a TCLK source.
- */
-static inline unsigned long tcfg_to_divisor(unsigned long tcfg1)
-{
- return 1 << tcfg1;
-}
-
-/**
- * pwm_tdiv_has_div1() - does the tdiv setting have a /1
- *
- * Return true if we have a /1 in the tdiv setting.
- */
-static inline unsigned int pwm_tdiv_has_div1(void)
-{
- return 1;
-}
-
-/**
- * pwm_tdiv_div_bits() - calculate TCFG1 divisor value.
- * @div: The divisor to calculate the bit information for.
- *
- * Turn a divisor into the necessary bit field for TCFG1.
- */
-static inline unsigned long pwm_tdiv_div_bits(unsigned int div)
-{
- return ilog2(div);
-}
-
-#define S3C_TCFG1_MUX_TCLK S3C64XX_TCFG1_MUX_TCLK
-
-#endif /* __ASM_ARCH_PWMCLK_H */
diff --git a/arch/arm/mach-s5p6442/include/mach/regs-clock.h b/arch/arm/mach-s5p6442/include/mach/regs-clock.h
deleted file mode 100644
index 00828a336991..000000000000
--- a/arch/arm/mach-s5p6442/include/mach/regs-clock.h
+++ /dev/null
@@ -1,104 +0,0 @@
-/* linux/arch/arm/mach-s5p6442/include/mach/regs-clock.h
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- *
- * S5P6442 - Clock register definitions
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __ASM_ARCH_REGS_CLOCK_H
-#define __ASM_ARCH_REGS_CLOCK_H __FILE__
-
-#include <mach/map.h>
-
-#define S5P_CLKREG(x) (S3C_VA_SYS + (x))
-
-#define S5P_APLL_LOCK S5P_CLKREG(0x00)
-#define S5P_MPLL_LOCK S5P_CLKREG(0x08)
-#define S5P_EPLL_LOCK S5P_CLKREG(0x10)
-#define S5P_VPLL_LOCK S5P_CLKREG(0x20)
-
-#define S5P_APLL_CON S5P_CLKREG(0x100)
-#define S5P_MPLL_CON S5P_CLKREG(0x108)
-#define S5P_EPLL_CON S5P_CLKREG(0x110)
-#define S5P_VPLL_CON S5P_CLKREG(0x120)
-
-#define S5P_CLK_SRC0 S5P_CLKREG(0x200)
-#define S5P_CLK_SRC1 S5P_CLKREG(0x204)
-#define S5P_CLK_SRC2 S5P_CLKREG(0x208)
-#define S5P_CLK_SRC3 S5P_CLKREG(0x20C)
-#define S5P_CLK_SRC4 S5P_CLKREG(0x210)
-#define S5P_CLK_SRC5 S5P_CLKREG(0x214)
-#define S5P_CLK_SRC6 S5P_CLKREG(0x218)
-
-#define S5P_CLK_SRC_MASK0 S5P_CLKREG(0x280)
-#define S5P_CLK_SRC_MASK1 S5P_CLKREG(0x284)
-
-#define S5P_CLK_DIV0 S5P_CLKREG(0x300)
-#define S5P_CLK_DIV1 S5P_CLKREG(0x304)
-#define S5P_CLK_DIV2 S5P_CLKREG(0x308)
-#define S5P_CLK_DIV3 S5P_CLKREG(0x30C)
-#define S5P_CLK_DIV4 S5P_CLKREG(0x310)
-#define S5P_CLK_DIV5 S5P_CLKREG(0x314)
-#define S5P_CLK_DIV6 S5P_CLKREG(0x318)
-
-#define S5P_CLKGATE_IP0 S5P_CLKREG(0x460)
-#define S5P_CLKGATE_IP3 S5P_CLKREG(0x46C)
-
-/* CLK_OUT */
-#define S5P_CLK_OUT_SHIFT (12)
-#define S5P_CLK_OUT_MASK (0x1F << S5P_CLK_OUT_SHIFT)
-#define S5P_CLK_OUT S5P_CLKREG(0x500)
-
-#define S5P_CLK_DIV_STAT0 S5P_CLKREG(0x1000)
-#define S5P_CLK_DIV_STAT1 S5P_CLKREG(0x1004)
-
-#define S5P_CLK_MUX_STAT0 S5P_CLKREG(0x1100)
-#define S5P_CLK_MUX_STAT1 S5P_CLKREG(0x1104)
-
-#define S5P_MDNIE_SEL S5P_CLKREG(0x7008)
-
-/* Register Bit definition */
-#define S5P_EPLL_EN (1<<31)
-#define S5P_EPLL_MASK 0xffffffff
-#define S5P_EPLLVAL(_m, _p, _s) ((_m) << 16 | ((_p) << 8) | ((_s)))
-
-/* CLKDIV0 */
-#define S5P_CLKDIV0_APLL_SHIFT (0)
-#define S5P_CLKDIV0_APLL_MASK (0x7 << S5P_CLKDIV0_APLL_SHIFT)
-#define S5P_CLKDIV0_A2M_SHIFT (4)
-#define S5P_CLKDIV0_A2M_MASK (0x7 << S5P_CLKDIV0_A2M_SHIFT)
-#define S5P_CLKDIV0_D0CLK_SHIFT (16)
-#define S5P_CLKDIV0_D0CLK_MASK (0xF << S5P_CLKDIV0_D0CLK_SHIFT)
-#define S5P_CLKDIV0_P0CLK_SHIFT (20)
-#define S5P_CLKDIV0_P0CLK_MASK (0x7 << S5P_CLKDIV0_P0CLK_SHIFT)
-#define S5P_CLKDIV0_D1CLK_SHIFT (24)
-#define S5P_CLKDIV0_D1CLK_MASK (0xF << S5P_CLKDIV0_D1CLK_SHIFT)
-#define S5P_CLKDIV0_P1CLK_SHIFT (28)
-#define S5P_CLKDIV0_P1CLK_MASK (0x7 << S5P_CLKDIV0_P1CLK_SHIFT)
-
-/* Clock MUX status Registers */
-#define S5P_CLK_MUX_STAT0_APLL_SHIFT (0)
-#define S5P_CLK_MUX_STAT0_APLL_MASK (0x7 << S5P_CLK_MUX_STAT0_APLL_SHIFT)
-#define S5P_CLK_MUX_STAT0_MPLL_SHIFT (4)
-#define S5P_CLK_MUX_STAT0_MPLL_MASK (0x7 << S5P_CLK_MUX_STAT0_MPLL_SHIFT)
-#define S5P_CLK_MUX_STAT0_EPLL_SHIFT (8)
-#define S5P_CLK_MUX_STAT0_EPLL_MASK (0x7 << S5P_CLK_MUX_STAT0_EPLL_SHIFT)
-#define S5P_CLK_MUX_STAT0_VPLL_SHIFT (12)
-#define S5P_CLK_MUX_STAT0_VPLL_MASK (0x7 << S5P_CLK_MUX_STAT0_VPLL_SHIFT)
-#define S5P_CLK_MUX_STAT0_MUXARM_SHIFT (16)
-#define S5P_CLK_MUX_STAT0_MUXARM_MASK (0x7 << S5P_CLK_MUX_STAT0_MUXARM_SHIFT)
-#define S5P_CLK_MUX_STAT0_MUXD0_SHIFT (20)
-#define S5P_CLK_MUX_STAT0_MUXD0_MASK (0x7 << S5P_CLK_MUX_STAT0_MUXD0_SHIFT)
-#define S5P_CLK_MUX_STAT0_MUXD1_SHIFT (24)
-#define S5P_CLK_MUX_STAT0_MUXD1_MASK (0x7 << S5P_CLK_MUX_STAT0_MUXD1_SHIFT)
-#define S5P_CLK_MUX_STAT1_D1SYNC_SHIFT (24)
-#define S5P_CLK_MUX_STAT1_D1SYNC_MASK (0x7 << S5P_CLK_MUX_STAT1_D1SYNC_SHIFT)
-#define S5P_CLK_MUX_STAT1_D0SYNC_SHIFT (28)
-#define S5P_CLK_MUX_STAT1_D0SYNC_MASK (0x7 << S5P_CLK_MUX_STAT1_D0SYNC_SHIFT)
-
-#endif /* __ASM_ARCH_REGS_CLOCK_H */
diff --git a/arch/arm/mach-s5p6442/include/mach/regs-irq.h b/arch/arm/mach-s5p6442/include/mach/regs-irq.h
deleted file mode 100644
index 73782b52a83b..000000000000
--- a/arch/arm/mach-s5p6442/include/mach/regs-irq.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/* linux/arch/arm/mach-s5p6442/include/mach/regs-irq.h
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- *
- * S5P6442 - IRQ register definitions
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __ASM_ARCH_REGS_IRQ_H
-#define __ASM_ARCH_REGS_IRQ_H __FILE__
-
-#include <asm/hardware/vic.h>
-#include <mach/map.h>
-
-#endif /* __ASM_ARCH_REGS_IRQ_H */
diff --git a/arch/arm/mach-s5p6442/include/mach/spi-clocks.h b/arch/arm/mach-s5p6442/include/mach/spi-clocks.h
deleted file mode 100644
index 7fd88205a97c..000000000000
--- a/arch/arm/mach-s5p6442/include/mach/spi-clocks.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* linux/arch/arm/mach-s5p6442/include/mach/spi-clocks.h
- *
- * Copyright (C) 2010 Samsung Electronics Co. Ltd.
- * Jaswinder Singh <jassi.brar@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __S5P6442_PLAT_SPI_CLKS_H
-#define __S5P6442_PLAT_SPI_CLKS_H __FILE__
-
-#define S5P6442_SPI_SRCCLK_PCLK 0
-#define S5P6442_SPI_SRCCLK_SCLK 1
-
-#endif /* __S5P6442_PLAT_SPI_CLKS_H */
diff --git a/arch/arm/mach-s5p6442/include/mach/system.h b/arch/arm/mach-s5p6442/include/mach/system.h
deleted file mode 100644
index c30c1cc1b97e..000000000000
--- a/arch/arm/mach-s5p6442/include/mach/system.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* linux/arch/arm/mach-s5p6442/include/mach/system.h
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- *
- * S5P6442 - system support header
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __ASM_ARCH_SYSTEM_H
-#define __ASM_ARCH_SYSTEM_H __FILE__
-
-#include <plat/system-reset.h>
-
-static void arch_idle(void)
-{
- /* nothing here yet */
-}
-
-#endif /* __ASM_ARCH_SYSTEM_H */
diff --git a/arch/arm/mach-s5p6442/include/mach/tick.h b/arch/arm/mach-s5p6442/include/mach/tick.h
deleted file mode 100644
index e1d4cabf8297..000000000000
--- a/arch/arm/mach-s5p6442/include/mach/tick.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/* linux/arch/arm/mach-s5p6442/include/mach/tick.h
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- *
- * Based on arch/arm/mach-s3c6400/include/mach/tick.h
- *
- * S5P6442 - Timer tick support definitions
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __ASM_ARCH_TICK_H
-#define __ASM_ARCH_TICK_H __FILE__
-
-static inline u32 s3c24xx_ostimer_pending(void)
-{
- u32 pend = __raw_readl(VA_VIC0 + VIC_RAW_STATUS);
- return pend & (1 << (IRQ_TIMER4_VIC - S5P_IRQ_VIC0(0)));
-}
-
-#define TICK_MAX (0xffffffff)
-
-#endif /* __ASM_ARCH_TICK_H */
diff --git a/arch/arm/mach-s5p6442/include/mach/timex.h b/arch/arm/mach-s5p6442/include/mach/timex.h
deleted file mode 100644
index ff8f2fcadeb7..000000000000
--- a/arch/arm/mach-s5p6442/include/mach/timex.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/* arch/arm/mach-s5p6442/include/mach/timex.h
- *
- * Copyright (c) 2003-2010 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- *
- * S5P6442 - time parameters
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __ASM_ARCH_TIMEX_H
-#define __ASM_ARCH_TIMEX_H
-
-/* CLOCK_TICK_RATE needs to be evaluatable by the cpp, so making it
- * a variable is useless. It seems as long as we make our timers an
- * exact multiple of HZ, any value that makes a 1->1 correspondence
- * for the time conversion functions to/from jiffies is acceptable.
-*/
-
-#define CLOCK_TICK_RATE 12000000
-
-#endif /* __ASM_ARCH_TIMEX_H */
diff --git a/arch/arm/mach-s5p6442/include/mach/uncompress.h b/arch/arm/mach-s5p6442/include/mach/uncompress.h
deleted file mode 100644
index 5ac7cbeeb987..000000000000
--- a/arch/arm/mach-s5p6442/include/mach/uncompress.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/* linux/arch/arm/mach-s5p6442/include/mach/uncompress.h
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- *
- * S5P6442 - uncompress code
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __ASM_ARCH_UNCOMPRESS_H
-#define __ASM_ARCH_UNCOMPRESS_H
-
-#include <mach/map.h>
-#include <plat/uncompress.h>
-
-static void arch_detect_cpu(void)
-{
- /* we do not need to do any cpu detection here at the moment. */
-}
-
-#endif /* __ASM_ARCH_UNCOMPRESS_H */
diff --git a/arch/arm/mach-s5p6442/include/mach/vmalloc.h b/arch/arm/mach-s5p6442/include/mach/vmalloc.h
deleted file mode 100644
index 4aa55e55ac47..000000000000
--- a/arch/arm/mach-s5p6442/include/mach/vmalloc.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* arch/arm/mach-s5p6442/include/mach/vmalloc.h
- *
- * Copyright 2010 Ben Dooks <ben-linux@fluff.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * S5P6442 vmalloc definition
-*/
-
-#ifndef __ASM_ARCH_VMALLOC_H
-#define __ASM_ARCH_VMALLOC_H
-
-#define VMALLOC_END 0xF6000000UL
-
-#endif /* __ASM_ARCH_VMALLOC_H */
diff --git a/arch/arm/mach-s5p6442/init.c b/arch/arm/mach-s5p6442/init.c
deleted file mode 100644
index 1874bdb71e1d..000000000000
--- a/arch/arm/mach-s5p6442/init.c
+++ /dev/null
@@ -1,44 +0,0 @@
-/* linux/arch/arm/mach-s5p6442/s5p6442-init.c
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/serial_core.h>
-
-#include <plat/cpu.h>
-#include <plat/devs.h>
-#include <plat/s5p6442.h>
-#include <plat/regs-serial.h>
-
-static struct s3c24xx_uart_clksrc s5p6442_serial_clocks[] = {
- [0] = {
- .name = "pclk",
- .divisor = 1,
- .min_baud = 0,
- .max_baud = 0,
- },
-};
-
-/* uart registration process */
-void __init s5p6442_common_init_uarts(struct s3c2410_uartcfg *cfg, int no)
-{
- struct s3c2410_uartcfg *tcfg = cfg;
- u32 ucnt;
-
- for (ucnt = 0; ucnt < no; ucnt++, tcfg++) {
- if (!tcfg->clocks) {
- tcfg->clocks = s5p6442_serial_clocks;
- tcfg->clocks_size = ARRAY_SIZE(s5p6442_serial_clocks);
- }
- }
-
- s3c24xx_init_uartdevs("s5pv210-uart", s5p_uart_resources, cfg, no);
-}
diff --git a/arch/arm/mach-s5p6442/mach-smdk6442.c b/arch/arm/mach-s5p6442/mach-smdk6442.c
deleted file mode 100644
index eaf6b9c489ff..000000000000
--- a/arch/arm/mach-s5p6442/mach-smdk6442.c
+++ /dev/null
@@ -1,102 +0,0 @@
-/* linux/arch/arm/mach-s5p6442/mach-smdk6442.c
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/serial_core.h>
-#include <linux/i2c.h>
-
-#include <asm/mach/arch.h>
-#include <asm/mach/map.h>
-#include <asm/setup.h>
-#include <asm/mach-types.h>
-
-#include <mach/map.h>
-#include <mach/regs-clock.h>
-
-#include <plat/regs-serial.h>
-#include <plat/s5p6442.h>
-#include <plat/devs.h>
-#include <plat/cpu.h>
-#include <plat/iic.h>
-
-/* Following are default values for UCON, ULCON and UFCON UART registers */
-#define SMDK6442_UCON_DEFAULT (S3C2410_UCON_TXILEVEL | \
- S3C2410_UCON_RXILEVEL | \
- S3C2410_UCON_TXIRQMODE | \
- S3C2410_UCON_RXIRQMODE | \
- S3C2410_UCON_RXFIFO_TOI | \
- S3C2443_UCON_RXERR_IRQEN)
-
-#define SMDK6442_ULCON_DEFAULT S3C2410_LCON_CS8
-
-#define SMDK6442_UFCON_DEFAULT (S3C2410_UFCON_FIFOMODE | \
- S5PV210_UFCON_TXTRIG4 | \
- S5PV210_UFCON_RXTRIG4)
-
-static struct s3c2410_uartcfg smdk6442_uartcfgs[] __initdata = {
- [0] = {
- .hwport = 0,
- .flags = 0,
- .ucon = SMDK6442_UCON_DEFAULT,
- .ulcon = SMDK6442_ULCON_DEFAULT,
- .ufcon = SMDK6442_UFCON_DEFAULT,
- },
- [1] = {
- .hwport = 1,
- .flags = 0,
- .ucon = SMDK6442_UCON_DEFAULT,
- .ulcon = SMDK6442_ULCON_DEFAULT,
- .ufcon = SMDK6442_UFCON_DEFAULT,
- },
- [2] = {
- .hwport = 2,
- .flags = 0,
- .ucon = SMDK6442_UCON_DEFAULT,
- .ulcon = SMDK6442_ULCON_DEFAULT,
- .ufcon = SMDK6442_UFCON_DEFAULT,
- },
-};
-
-static struct platform_device *smdk6442_devices[] __initdata = {
- &s3c_device_i2c0,
- &samsung_asoc_dma,
- &s5p6442_device_iis0,
- &s3c_device_wdt,
-};
-
-static struct i2c_board_info smdk6442_i2c_devs0[] __initdata = {
- { I2C_BOARD_INFO("wm8580", 0x1b), },
-};
-
-static void __init smdk6442_map_io(void)
-{
- s5p_init_io(NULL, 0, S5P_VA_CHIPID);
- s3c24xx_init_clocks(12000000);
- s3c24xx_init_uarts(smdk6442_uartcfgs, ARRAY_SIZE(smdk6442_uartcfgs));
-}
-
-static void __init smdk6442_machine_init(void)
-{
- s3c_i2c0_set_platdata(NULL);
- i2c_register_board_info(0, smdk6442_i2c_devs0,
- ARRAY_SIZE(smdk6442_i2c_devs0));
- platform_add_devices(smdk6442_devices, ARRAY_SIZE(smdk6442_devices));
-}
-
-MACHINE_START(SMDK6442, "SMDK6442")
- /* Maintainer: Kukjin Kim <kgene.kim@samsung.com> */
- .boot_params = S5P_PA_SDRAM + 0x100,
- .init_irq = s5p6442_init_irq,
- .map_io = smdk6442_map_io,
- .init_machine = smdk6442_machine_init,
- .timer = &s3c24xx_timer,
-MACHINE_END
diff --git a/arch/arm/mach-s5p6442/setup-i2c0.c b/arch/arm/mach-s5p6442/setup-i2c0.c
deleted file mode 100644
index aad85656b0cc..000000000000
--- a/arch/arm/mach-s5p6442/setup-i2c0.c
+++ /dev/null
@@ -1,28 +0,0 @@
-/* linux/arch/arm/mach-s5p6442/setup-i2c0.c
- *
- * Copyright (c) 2009 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- *
- * I2C0 GPIO configuration.
- *
- * Based on plat-s3c64xx/setup-i2c0.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/gpio.h>
-
-struct platform_device; /* don't need the contents */
-
-#include <plat/gpio-cfg.h>
-#include <plat/iic.h>
-
-void s3c_i2c0_cfg_gpio(struct platform_device *dev)
-{
- s3c_gpio_cfgall_range(S5P6442_GPD1(0), 2,
- S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP);
-}
diff --git a/arch/arm/mach-s5pc100/Makefile b/arch/arm/mach-s5pc100/Makefile
index eecab57d2e5d..a5e6e608b498 100644
--- a/arch/arm/mach-s5pc100/Makefile
+++ b/arch/arm/mach-s5pc100/Makefile
@@ -11,7 +11,7 @@ obj- :=
# Core support for S5PC100 system
-obj-$(CONFIG_CPU_S5PC100) += cpu.o init.o clock.o gpiolib.o
+obj-$(CONFIG_CPU_S5PC100) += cpu.o init.o clock.o
obj-$(CONFIG_CPU_S5PC100) += setup-i2c0.o
obj-$(CONFIG_CPU_S5PC100) += dma.o
diff --git a/arch/arm/mach-s5pv210/Makefile b/arch/arm/mach-s5pv210/Makefile
index 11f17907b4e8..50907aca006c 100644
--- a/arch/arm/mach-s5pv210/Makefile
+++ b/arch/arm/mach-s5pv210/Makefile
@@ -12,7 +12,7 @@ obj- :=
# Core support for S5PV210 system
-obj-$(CONFIG_CPU_S5PV210) += cpu.o init.o clock.o dma.o gpiolib.o
+obj-$(CONFIG_CPU_S5PV210) += cpu.o init.o clock.o dma.o
obj-$(CONFIG_CPU_S5PV210) += setup-i2c0.o
obj-$(CONFIG_S5PV210_PM) += pm.o sleep.o
obj-$(CONFIG_CPU_FREQ) += cpufreq.o
diff --git a/arch/arm/mach-u300/Makefile b/arch/arm/mach-u300/Makefile
index fab46fe9a71f..8fd354aaf0a7 100644
--- a/arch/arm/mach-u300/Makefile
+++ b/arch/arm/mach-u300/Makefile
@@ -2,7 +2,7 @@
# Makefile for the linux kernel, U300 machine.
#
-obj-y := core.o clock.o timer.o gpio.o padmux.o
+obj-y := core.o clock.o timer.o padmux.o
obj-m :=
obj-n :=
obj- :=
diff --git a/arch/arm/mach-ux500/Kconfig b/arch/arm/mach-ux500/Kconfig
index 54429d015954..f8b9392ee347 100644
--- a/arch/arm/mach-ux500/Kconfig
+++ b/arch/arm/mach-ux500/Kconfig
@@ -5,7 +5,6 @@ config UX500_SOC_COMMON
default y
select ARM_GIC
select HAS_MTU
- select NOMADIK_GPIO
select ARM_ERRATA_753970
menu "Ux500 SoC"
diff --git a/arch/arm/mach-ux500/board-mop500-sdi.c b/arch/arm/mach-ux500/board-mop500-sdi.c
index bf0b02414e5b..7c6cb4fa47a9 100644
--- a/arch/arm/mach-ux500/board-mop500-sdi.c
+++ b/arch/arm/mach-ux500/board-mop500-sdi.c
@@ -99,8 +99,11 @@ static void sdi0_configure(void)
gpio_direction_output(sdi0_vsel, 0);
gpio_direction_output(sdi0_en, 1);
- /* Add the device */
- db8500_add_sdi0(&mop500_sdi0_data);
+ /* Add the device, force v2 to subrevision 1 */
+ if (cpu_is_u8500v2())
+ db8500_add_sdi0(&mop500_sdi0_data, 0x10480180);
+ else
+ db8500_add_sdi0(&mop500_sdi0_data, 0);
}
void mop500_sdi_tc35892_init(void)
@@ -188,13 +191,18 @@ static struct mmci_platform_data mop500_sdi4_data = {
void __init mop500_sdi_init(void)
{
+ u32 periphid = 0;
+
+ /* v2 has a new version of this block that need to be forced */
+ if (cpu_is_u8500v2())
+ periphid = 0x10480180;
/* PoP:ed eMMC on top of DB8500 v1.0 has problems with high speed */
if (!cpu_is_u8500v10())
mop500_sdi2_data.capabilities |= MMC_CAP_MMC_HIGHSPEED;
- db8500_add_sdi2(&mop500_sdi2_data);
+ db8500_add_sdi2(&mop500_sdi2_data, periphid);
/* On-board eMMC */
- db8500_add_sdi4(&mop500_sdi4_data);
+ db8500_add_sdi4(&mop500_sdi4_data, periphid);
if (machine_is_hrefv60()) {
mop500_sdi0_data.gpio_cd = HREFV60_SDMMC_CD_GPIO;
diff --git a/arch/arm/mach-ux500/devices-common.h b/arch/arm/mach-ux500/devices-common.h
index c719b5a1d913..7825705033bf 100644
--- a/arch/arm/mach-ux500/devices-common.h
+++ b/arch/arm/mach-ux500/devices-common.h
@@ -28,18 +28,20 @@ dbx500_add_msp_spi(const char *name, resource_size_t base, int irq,
static inline struct amba_device *
dbx500_add_spi(const char *name, resource_size_t base, int irq,
- struct spi_master_cntlr *pdata)
+ struct spi_master_cntlr *pdata,
+ u32 periphid)
{
- return dbx500_add_amba_device(name, base, irq, pdata, 0);
+ return dbx500_add_amba_device(name, base, irq, pdata, periphid);
}
struct mmci_platform_data;
static inline struct amba_device *
dbx500_add_sdi(const char *name, resource_size_t base, int irq,
- struct mmci_platform_data *pdata)
+ struct mmci_platform_data *pdata,
+ u32 periphid)
{
- return dbx500_add_amba_device(name, base, irq, pdata, 0);
+ return dbx500_add_amba_device(name, base, irq, pdata, periphid);
}
struct amba_pl011_data;
diff --git a/arch/arm/mach-ux500/devices-db5500.h b/arch/arm/mach-ux500/devices-db5500.h
index 94627f7783b0..0c4bccd02b90 100644
--- a/arch/arm/mach-ux500/devices-db5500.h
+++ b/arch/arm/mach-ux500/devices-db5500.h
@@ -38,24 +38,34 @@
ux500_add_usb(U5500_USBOTG_BASE, IRQ_DB5500_USBOTG, rx_cfg, tx_cfg)
#define db5500_add_sdi0(pdata) \
- dbx500_add_sdi("sdi0", U5500_SDI0_BASE, IRQ_DB5500_SDMMC0, pdata)
+ dbx500_add_sdi("sdi0", U5500_SDI0_BASE, IRQ_DB5500_SDMMC0, pdata, \
+ 0x10480180)
#define db5500_add_sdi1(pdata) \
- dbx500_add_sdi("sdi1", U5500_SDI1_BASE, IRQ_DB5500_SDMMC1, pdata)
+ dbx500_add_sdi("sdi1", U5500_SDI1_BASE, IRQ_DB5500_SDMMC1, pdata, \
+ 0x10480180)
#define db5500_add_sdi2(pdata) \
- dbx500_add_sdi("sdi2", U5500_SDI2_BASE, IRQ_DB5500_SDMMC2, pdata)
+ dbx500_add_sdi("sdi2", U5500_SDI2_BASE, IRQ_DB5500_SDMMC2, pdata \
+ 0x10480180)
#define db5500_add_sdi3(pdata) \
- dbx500_add_sdi("sdi3", U5500_SDI3_BASE, IRQ_DB5500_SDMMC3, pdata)
+ dbx500_add_sdi("sdi3", U5500_SDI3_BASE, IRQ_DB5500_SDMMC3, pdata \
+ 0x10480180)
#define db5500_add_sdi4(pdata) \
- dbx500_add_sdi("sdi4", U5500_SDI4_BASE, IRQ_DB5500_SDMMC4, pdata)
+ dbx500_add_sdi("sdi4", U5500_SDI4_BASE, IRQ_DB5500_SDMMC4, pdata \
+ 0x10480180)
+/* This one has a bad peripheral ID in the U5500 silicon */
#define db5500_add_spi0(pdata) \
- dbx500_add_spi("spi0", U5500_SPI0_BASE, IRQ_DB5500_SPI0, pdata)
+ dbx500_add_spi("spi0", U5500_SPI0_BASE, IRQ_DB5500_SPI0, pdata, \
+ 0x10080023)
#define db5500_add_spi1(pdata) \
- dbx500_add_spi("spi1", U5500_SPI1_BASE, IRQ_DB5500_SPI1, pdata)
+ dbx500_add_spi("spi1", U5500_SPI1_BASE, IRQ_DB5500_SPI1, pdata, \
+ 0x10080023)
#define db5500_add_spi2(pdata) \
- dbx500_add_spi("spi2", U5500_SPI2_BASE, IRQ_DB5500_SPI2, pdata)
+ dbx500_add_spi("spi2", U5500_SPI2_BASE, IRQ_DB5500_SPI2, pdata \
+ 0x10080023)
#define db5500_add_spi3(pdata) \
- dbx500_add_spi("spi3", U5500_SPI3_BASE, IRQ_DB5500_SPI3, pdata)
+ dbx500_add_spi("spi3", U5500_SPI3_BASE, IRQ_DB5500_SPI3, pdata \
+ 0x10080023)
#define db5500_add_uart0(plat) \
dbx500_add_uart("uart0", U5500_UART0_BASE, IRQ_DB5500_UART0, plat)
diff --git a/arch/arm/mach-ux500/devices-db8500.h b/arch/arm/mach-ux500/devices-db8500.h
index 9cc6f8f5d3e6..cbd4a9ae8109 100644
--- a/arch/arm/mach-ux500/devices-db8500.h
+++ b/arch/arm/mach-ux500/devices-db8500.h
@@ -25,7 +25,7 @@ static inline struct amba_device *
db8500_add_ssp(const char *name, resource_size_t base, int irq,
struct pl022_ssp_controller *pdata)
{
- return dbx500_add_amba_device(name, base, irq, pdata, SSP_PER_ID);
+ return dbx500_add_amba_device(name, base, irq, pdata, 0);
}
@@ -64,18 +64,18 @@ db8500_add_ssp(const char *name, resource_size_t base, int irq,
#define db8500_add_usb(rx_cfg, tx_cfg) \
ux500_add_usb(U8500_USBOTG_BASE, IRQ_DB8500_USBOTG, rx_cfg, tx_cfg)
-#define db8500_add_sdi0(pdata) \
- dbx500_add_sdi("sdi0", U8500_SDI0_BASE, IRQ_DB8500_SDMMC0, pdata)
-#define db8500_add_sdi1(pdata) \
- dbx500_add_sdi("sdi1", U8500_SDI1_BASE, IRQ_DB8500_SDMMC1, pdata)
-#define db8500_add_sdi2(pdata) \
- dbx500_add_sdi("sdi2", U8500_SDI2_BASE, IRQ_DB8500_SDMMC2, pdata)
-#define db8500_add_sdi3(pdata) \
- dbx500_add_sdi("sdi3", U8500_SDI3_BASE, IRQ_DB8500_SDMMC3, pdata)
-#define db8500_add_sdi4(pdata) \
- dbx500_add_sdi("sdi4", U8500_SDI4_BASE, IRQ_DB8500_SDMMC4, pdata)
-#define db8500_add_sdi5(pdata) \
- dbx500_add_sdi("sdi5", U8500_SDI5_BASE, IRQ_DB8500_SDMMC5, pdata)
+#define db8500_add_sdi0(pdata, pid) \
+ dbx500_add_sdi("sdi0", U8500_SDI0_BASE, IRQ_DB8500_SDMMC0, pdata, pid)
+#define db8500_add_sdi1(pdata, pid) \
+ dbx500_add_sdi("sdi1", U8500_SDI1_BASE, IRQ_DB8500_SDMMC1, pdata, pid)
+#define db8500_add_sdi2(pdata, pid) \
+ dbx500_add_sdi("sdi2", U8500_SDI2_BASE, IRQ_DB8500_SDMMC2, pdata, pid)
+#define db8500_add_sdi3(pdata, pid) \
+ dbx500_add_sdi("sdi3", U8500_SDI3_BASE, IRQ_DB8500_SDMMC3, pdata, pid)
+#define db8500_add_sdi4(pdata, pid) \
+ dbx500_add_sdi("sdi4", U8500_SDI4_BASE, IRQ_DB8500_SDMMC4, pdata, pid)
+#define db8500_add_sdi5(pdata, pid) \
+ dbx500_add_sdi("sdi5", U8500_SDI5_BASE, IRQ_DB8500_SDMMC5, pdata, pid)
#define db8500_add_ssp0(pdata) \
db8500_add_ssp("ssp0", U8500_SSP0_BASE, IRQ_DB8500_SSP0, pdata)
@@ -83,13 +83,13 @@ db8500_add_ssp(const char *name, resource_size_t base, int irq,
db8500_add_ssp("ssp1", U8500_SSP1_BASE, IRQ_DB8500_SSP1, pdata)
#define db8500_add_spi0(pdata) \
- dbx500_add_spi("spi0", U8500_SPI0_BASE, IRQ_DB8500_SPI0, pdata)
+ dbx500_add_spi("spi0", U8500_SPI0_BASE, IRQ_DB8500_SPI0, pdata, 0)
#define db8500_add_spi1(pdata) \
- dbx500_add_spi("spi1", U8500_SPI1_BASE, IRQ_DB8500_SPI1, pdata)
+ dbx500_add_spi("spi1", U8500_SPI1_BASE, IRQ_DB8500_SPI1, pdata, 0)
#define db8500_add_spi2(pdata) \
- dbx500_add_spi("spi2", U8500_SPI2_BASE, IRQ_DB8500_SPI2, pdata)
+ dbx500_add_spi("spi2", U8500_SPI2_BASE, IRQ_DB8500_SPI2, pdata, 0)
#define db8500_add_spi3(pdata) \
- dbx500_add_spi("spi3", U8500_SPI3_BASE, IRQ_DB8500_SPI3, pdata)
+ dbx500_add_spi("spi3", U8500_SPI3_BASE, IRQ_DB8500_SPI3, pdata, 0)
#define db8500_add_uart0(pdata) \
dbx500_add_uart("uart0", U8500_UART0_BASE, IRQ_DB8500_UART0, pdata)
diff --git a/arch/arm/mach-ux500/include/mach/hardware.h b/arch/arm/mach-ux500/include/mach/hardware.h
index 2c6f71049f2e..470ac52663d6 100644
--- a/arch/arm/mach-ux500/include/mach/hardware.h
+++ b/arch/arm/mach-ux500/include/mach/hardware.h
@@ -29,9 +29,6 @@
#include <mach/db8500-regs.h>
#include <mach/db5500-regs.h>
-/* ST-Ericsson modified pl022 id */
-#define SSP_PER_ID 0x01080022
-
#ifndef __ASSEMBLY__
#include <mach/id.h>
diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S
index c96fa1b3f49f..73b4a8b66a57 100644
--- a/arch/arm/mm/cache-v6.S
+++ b/arch/arm/mm/cache-v6.S
@@ -176,6 +176,7 @@ ENDPROC(v6_coherent_kern_range)
*/
ENTRY(v6_flush_kern_dcache_area)
add r1, r0, r1
+ bic r0, r0, #D_CACHE_LINE_SIZE - 1
1:
#ifdef HARVARD_CACHE
mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index dc18d81ef8ce..d32f02b61866 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -221,6 +221,8 @@ ENDPROC(v7_coherent_user_range)
ENTRY(v7_flush_kern_dcache_area)
dcache_line_size r2, r3
add r1, r0, r1
+ sub r3, r2, #1
+ bic r0, r0, r3
1:
mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line / unified line
add r0, r0, r2
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index b0ee9ba3cfab..8bfae964b133 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -24,9 +24,7 @@ DEFINE_PER_CPU(struct mm_struct *, current_mm);
/*
* We fork()ed a process, and we need a new context for the child
- * to run in. We reserve version 0 for initial tasks so we will
- * always allocate an ASID. The ASID 0 is reserved for the TTBR
- * register changing sequence.
+ * to run in.
*/
void __init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
@@ -36,8 +34,11 @@ void __init_new_context(struct task_struct *tsk, struct mm_struct *mm)
static void flush_context(void)
{
- /* set the reserved ASID before flushing the TLB */
- asm("mcr p15, 0, %0, c13, c0, 1\n" : : "r" (0));
+ u32 ttb;
+ /* Copy TTBR1 into TTBR0 */
+ asm volatile("mrc p15, 0, %0, c2, c0, 1\n"
+ "mcr p15, 0, %0, c2, c0, 0"
+ : "=r" (ttb));
isb();
local_flush_tlb_all();
if (icache_is_vivt_asid_tagged()) {
@@ -93,7 +94,7 @@ static void reset_context(void *info)
return;
smp_rmb();
- asid = cpu_last_asid + cpu + 1;
+ asid = cpu_last_asid + cpu;
flush_context();
set_mm_context(mm, asid);
@@ -143,13 +144,13 @@ void __new_context(struct mm_struct *mm)
* to start a new version and flush the TLB.
*/
if (unlikely((asid & ~ASID_MASK) == 0)) {
- asid = cpu_last_asid + smp_processor_id() + 1;
+ asid = cpu_last_asid + smp_processor_id();
flush_context();
#ifdef CONFIG_SMP
smp_wmb();
smp_call_function(reset_context, NULL, 1);
#endif
- cpu_last_asid += NR_CPUS;
+ cpu_last_asid += NR_CPUS - 1;
}
set_mm_context(mm, asid);
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 3f17ea146f0e..2c2cce9cd8c8 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -15,12 +15,14 @@
#include <linux/mman.h>
#include <linux/nodemask.h>
#include <linux/initrd.h>
+#include <linux/of_fdt.h>
#include <linux/highmem.h>
#include <linux/gfp.h>
#include <linux/memblock.h>
#include <linux/sort.h>
#include <asm/mach-types.h>
+#include <asm/prom.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/sizes.h>
@@ -71,6 +73,14 @@ static int __init parse_tag_initrd2(const struct tag *tag)
__tagtable(ATAG_INITRD2, parse_tag_initrd2);
+#ifdef CONFIG_OF_FLATTREE
+void __init early_init_dt_setup_initrd_arch(unsigned long start, unsigned long end)
+{
+ phys_initrd_start = start;
+ phys_initrd_size = end - start;
+}
+#endif /* CONFIG_OF_FLATTREE */
+
/*
* This keeps memory configuration data used by a couple memory
* initialization functions, as well as show_mem() for the skipping
@@ -273,13 +283,15 @@ static void __init arm_bootmem_free(unsigned long min, unsigned long max_low,
free_area_init_node(0, zone_size, min, zhole_size);
}
-#ifndef CONFIG_SPARSEMEM
+#ifdef CONFIG_HAVE_ARCH_PFN_VALID
int pfn_valid(unsigned long pfn)
{
return memblock_is_memory(pfn << PAGE_SHIFT);
}
EXPORT_SYMBOL(pfn_valid);
+#endif
+#ifndef CONFIG_SPARSEMEM
static void arm_memory_present(void)
{
}
@@ -334,6 +346,7 @@ void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc)
#endif
arm_mm_memblock_reserve();
+ arm_dt_memblock_reserve();
/* reserve any platform specific memblock areas */
if (mdesc->reserve)
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index d2384106af9c..5b3d7d543659 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -5,14 +5,9 @@ extern pmd_t *top_pmd;
#define TOP_PTE(x) pte_offset_kernel(top_pmd, x)
-static inline pmd_t *pmd_off(pgd_t *pgd, unsigned long virt)
-{
- return pmd_offset(pud_offset(pgd, virt), virt);
-}
-
static inline pmd_t *pmd_off_k(unsigned long virt)
{
- return pmd_off(pgd_offset_k(virt), virt);
+ return pmd_offset(pud_offset(pgd_offset_k(virt), virt), virt);
}
struct mem_type {
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 08a92368d9d3..9d9e736c2b4f 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -763,15 +763,12 @@ static void __init sanity_check_meminfo(void)
{
int i, j, highmem = 0;
- lowmem_limit = __pa(vmalloc_min - 1) + 1;
- memblock_set_current_limit(lowmem_limit);
-
for (i = 0, j = 0; i < meminfo.nr_banks; i++) {
struct membank *bank = &meminfo.bank[j];
*bank = meminfo.bank[i];
#ifdef CONFIG_HIGHMEM
- if (__va(bank->start) > vmalloc_min ||
+ if (__va(bank->start) >= vmalloc_min ||
__va(bank->start) < (void *)PAGE_OFFSET)
highmem = 1;
@@ -829,6 +826,9 @@ static void __init sanity_check_meminfo(void)
bank->size = newsize;
}
#endif
+ if (!bank->highmem && bank->start + bank->size > lowmem_limit)
+ lowmem_limit = bank->start + bank->size;
+
j++;
}
#ifdef CONFIG_HIGHMEM
@@ -852,6 +852,7 @@ static void __init sanity_check_meminfo(void)
}
#endif
meminfo.nr_banks = j;
+ memblock_set_current_limit(lowmem_limit);
}
static inline void prepare_page_table(void)
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index ab17cc0d3fa7..1d2b8451bf25 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -213,7 +213,9 @@ __v6_setup:
mcr p15, 0, r0, c2, c0, 2 @ TTB control register
ALT_SMP(orr r4, r4, #TTB_FLAGS_SMP)
ALT_UP(orr r4, r4, #TTB_FLAGS_UP)
- mcr p15, 0, r4, c2, c0, 1 @ load TTB1
+ ALT_SMP(orr r8, r8, #TTB_FLAGS_SMP)
+ ALT_UP(orr r8, r8, #TTB_FLAGS_UP)
+ mcr p15, 0, r8, c2, c0, 1 @ load TTB1
#endif /* CONFIG_MMU */
adr r5, v6_crval
ldmia r5, {r5, r6}
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index babfba09c89f..b3b566ec83d3 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -108,18 +108,16 @@ ENTRY(cpu_v7_switch_mm)
#ifdef CONFIG_ARM_ERRATA_430973
mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
#endif
-#ifdef CONFIG_ARM_ERRATA_754322
- dsb
-#endif
- mcr p15, 0, r2, c13, c0, 1 @ set reserved context ID
- isb
-1: mcr p15, 0, r0, c2, c0, 0 @ set TTB 0
+ mrc p15, 0, r2, c2, c0, 1 @ load TTB 1
+ mcr p15, 0, r2, c2, c0, 0 @ into TTB 0
isb
#ifdef CONFIG_ARM_ERRATA_754322
dsb
#endif
mcr p15, 0, r1, c13, c0, 1 @ set context ID
isb
+ mcr p15, 0, r0, c2, c0, 0 @ set TTB 0
+ isb
#endif
mov pc, lr
ENDPROC(cpu_v7_switch_mm)
@@ -368,7 +366,9 @@ __v7_setup:
mcr p15, 0, r10, c2, c0, 2 @ TTB control register
ALT_SMP(orr r4, r4, #TTB_FLAGS_SMP)
ALT_UP(orr r4, r4, #TTB_FLAGS_UP)
- mcr p15, 0, r4, c2, c0, 1 @ load TTB1
+ ALT_SMP(orr r8, r8, #TTB_FLAGS_SMP)
+ ALT_UP(orr r8, r8, #TTB_FLAGS_UP)
+ mcr p15, 0, r8, c2, c0, 1 @ load TTB1
ldr r5, =PRRR @ PRRR
ldr r6, =NMRR @ NMRR
mcr p15, 0, r5, c10, c2, 0 @ write PRRR
diff --git a/arch/arm/plat-nomadik/Kconfig b/arch/arm/plat-nomadik/Kconfig
index 18296ee68802..ce659015535e 100644
--- a/arch/arm/plat-nomadik/Kconfig
+++ b/arch/arm/plat-nomadik/Kconfig
@@ -21,9 +21,4 @@ config HAS_MTU
to multiple interrupt generating programmable
32-bit free running decrementing counters.
-config NOMADIK_GPIO
- bool
- help
- Support for the Nomadik GPIO controller.
-
endif
diff --git a/arch/arm/plat-nomadik/Makefile b/arch/arm/plat-nomadik/Makefile
index c33547361bd7..37c7cdd0f8f0 100644
--- a/arch/arm/plat-nomadik/Makefile
+++ b/arch/arm/plat-nomadik/Makefile
@@ -3,4 +3,3 @@
# Licensed under GPLv2
obj-$(CONFIG_HAS_MTU) += timer.o
-obj-$(CONFIG_NOMADIK_GPIO) += gpio.o
diff --git a/arch/arm/plat-nomadik/include/plat/gpio.h b/arch/arm/plat-nomadik/include/plat/gpio.h
index 1b9f6f0843d1..ea19a5b2f227 100644
--- a/arch/arm/plat-nomadik/include/plat/gpio.h
+++ b/arch/arm/plat-nomadik/include/plat/gpio.h
@@ -78,6 +78,8 @@ extern int nmk_gpio_get_mode(int gpio);
extern void nmk_gpio_wakeups_suspend(void);
extern void nmk_gpio_wakeups_resume(void);
+extern void nmk_gpio_read_pull(int gpio_bank, u32 *pull_up);
+
/*
* Platform data to register a block: only the initial gpio/irq number.
*/
diff --git a/arch/arm/plat-omap/Makefile b/arch/arm/plat-omap/Makefile
index a4a12859fdd5..f0233e6abcdf 100644
--- a/arch/arm/plat-omap/Makefile
+++ b/arch/arm/plat-omap/Makefile
@@ -3,7 +3,7 @@
#
# Common support
-obj-y := common.o sram.o clock.o devices.o dma.o mux.o gpio.o \
+obj-y := common.o sram.o clock.o devices.o dma.o mux.o \
usb.o fb.o io.o counter_32k.o
obj-m :=
obj-n :=
diff --git a/arch/arm/plat-omap/include/plat/gpio.h b/arch/arm/plat-omap/include/plat/gpio.h
index cac2e8ac6968..ec97e00cb581 100644
--- a/arch/arm/plat-omap/include/plat/gpio.h
+++ b/arch/arm/plat-omap/include/plat/gpio.h
@@ -52,6 +52,109 @@
#define OMAP34XX_NR_GPIOS 6
+/*
+ * OMAP1510 GPIO registers
+ */
+#define OMAP1510_GPIO_DATA_INPUT 0x00
+#define OMAP1510_GPIO_DATA_OUTPUT 0x04
+#define OMAP1510_GPIO_DIR_CONTROL 0x08
+#define OMAP1510_GPIO_INT_CONTROL 0x0c
+#define OMAP1510_GPIO_INT_MASK 0x10
+#define OMAP1510_GPIO_INT_STATUS 0x14
+#define OMAP1510_GPIO_PIN_CONTROL 0x18
+
+#define OMAP1510_IH_GPIO_BASE 64
+
+/*
+ * OMAP1610 specific GPIO registers
+ */
+#define OMAP1610_GPIO_REVISION 0x0000
+#define OMAP1610_GPIO_SYSCONFIG 0x0010
+#define OMAP1610_GPIO_SYSSTATUS 0x0014
+#define OMAP1610_GPIO_IRQSTATUS1 0x0018
+#define OMAP1610_GPIO_IRQENABLE1 0x001c
+#define OMAP1610_GPIO_WAKEUPENABLE 0x0028
+#define OMAP1610_GPIO_DATAIN 0x002c
+#define OMAP1610_GPIO_DATAOUT 0x0030
+#define OMAP1610_GPIO_DIRECTION 0x0034
+#define OMAP1610_GPIO_EDGE_CTRL1 0x0038
+#define OMAP1610_GPIO_EDGE_CTRL2 0x003c
+#define OMAP1610_GPIO_CLEAR_IRQENABLE1 0x009c
+#define OMAP1610_GPIO_CLEAR_WAKEUPENA 0x00a8
+#define OMAP1610_GPIO_CLEAR_DATAOUT 0x00b0
+#define OMAP1610_GPIO_SET_IRQENABLE1 0x00dc
+#define OMAP1610_GPIO_SET_WAKEUPENA 0x00e8
+#define OMAP1610_GPIO_SET_DATAOUT 0x00f0
+
+/*
+ * OMAP7XX specific GPIO registers
+ */
+#define OMAP7XX_GPIO_DATA_INPUT 0x00
+#define OMAP7XX_GPIO_DATA_OUTPUT 0x04
+#define OMAP7XX_GPIO_DIR_CONTROL 0x08
+#define OMAP7XX_GPIO_INT_CONTROL 0x0c
+#define OMAP7XX_GPIO_INT_MASK 0x10
+#define OMAP7XX_GPIO_INT_STATUS 0x14
+
+/*
+ * omap2+ specific GPIO registers
+ */
+#define OMAP24XX_GPIO_REVISION 0x0000
+#define OMAP24XX_GPIO_IRQSTATUS1 0x0018
+#define OMAP24XX_GPIO_IRQSTATUS2 0x0028
+#define OMAP24XX_GPIO_IRQENABLE2 0x002c
+#define OMAP24XX_GPIO_IRQENABLE1 0x001c
+#define OMAP24XX_GPIO_WAKE_EN 0x0020
+#define OMAP24XX_GPIO_CTRL 0x0030
+#define OMAP24XX_GPIO_OE 0x0034
+#define OMAP24XX_GPIO_DATAIN 0x0038
+#define OMAP24XX_GPIO_DATAOUT 0x003c
+#define OMAP24XX_GPIO_LEVELDETECT0 0x0040
+#define OMAP24XX_GPIO_LEVELDETECT1 0x0044
+#define OMAP24XX_GPIO_RISINGDETECT 0x0048
+#define OMAP24XX_GPIO_FALLINGDETECT 0x004c
+#define OMAP24XX_GPIO_DEBOUNCE_EN 0x0050
+#define OMAP24XX_GPIO_DEBOUNCE_VAL 0x0054
+#define OMAP24XX_GPIO_CLEARIRQENABLE1 0x0060
+#define OMAP24XX_GPIO_SETIRQENABLE1 0x0064
+#define OMAP24XX_GPIO_CLEARWKUENA 0x0080
+#define OMAP24XX_GPIO_SETWKUENA 0x0084
+#define OMAP24XX_GPIO_CLEARDATAOUT 0x0090
+#define OMAP24XX_GPIO_SETDATAOUT 0x0094
+
+#define OMAP4_GPIO_REVISION 0x0000
+#define OMAP4_GPIO_EOI 0x0020
+#define OMAP4_GPIO_IRQSTATUSRAW0 0x0024
+#define OMAP4_GPIO_IRQSTATUSRAW1 0x0028
+#define OMAP4_GPIO_IRQSTATUS0 0x002c
+#define OMAP4_GPIO_IRQSTATUS1 0x0030
+#define OMAP4_GPIO_IRQSTATUSSET0 0x0034
+#define OMAP4_GPIO_IRQSTATUSSET1 0x0038
+#define OMAP4_GPIO_IRQSTATUSCLR0 0x003c
+#define OMAP4_GPIO_IRQSTATUSCLR1 0x0040
+#define OMAP4_GPIO_IRQWAKEN0 0x0044
+#define OMAP4_GPIO_IRQWAKEN1 0x0048
+#define OMAP4_GPIO_IRQENABLE1 0x011c
+#define OMAP4_GPIO_WAKE_EN 0x0120
+#define OMAP4_GPIO_IRQSTATUS2 0x0128
+#define OMAP4_GPIO_IRQENABLE2 0x012c
+#define OMAP4_GPIO_CTRL 0x0130
+#define OMAP4_GPIO_OE 0x0134
+#define OMAP4_GPIO_DATAIN 0x0138
+#define OMAP4_GPIO_DATAOUT 0x013c
+#define OMAP4_GPIO_LEVELDETECT0 0x0140
+#define OMAP4_GPIO_LEVELDETECT1 0x0144
+#define OMAP4_GPIO_RISINGDETECT 0x0148
+#define OMAP4_GPIO_FALLINGDETECT 0x014c
+#define OMAP4_GPIO_DEBOUNCENABLE 0x0150
+#define OMAP4_GPIO_DEBOUNCINGTIME 0x0154
+#define OMAP4_GPIO_CLEARIRQENABLE1 0x0160
+#define OMAP4_GPIO_SETIRQENABLE1 0x0164
+#define OMAP4_GPIO_CLEARWKUENA 0x0180
+#define OMAP4_GPIO_SETWKUENA 0x0184
+#define OMAP4_GPIO_CLEARDATAOUT 0x0190
+#define OMAP4_GPIO_SETDATAOUT 0x0194
+
#define OMAP_MPUIO(nr) (OMAP_MAX_GPIO_LINES + (nr))
#define OMAP_GPIO_IS_MPUIO(nr) ((nr) >= OMAP_MAX_GPIO_LINES)
diff --git a/arch/arm/plat-s5p/Kconfig b/arch/arm/plat-s5p/Kconfig
index 6751bcf7b888..e98f5c5c7879 100644
--- a/arch/arm/plat-s5p/Kconfig
+++ b/arch/arm/plat-s5p/Kconfig
@@ -7,7 +7,7 @@
config PLAT_S5P
bool
- depends on (ARCH_S5P64X0 || ARCH_S5P6442 || ARCH_S5PC100 || ARCH_S5PV210 || ARCH_EXYNOS4)
+ depends on (ARCH_S5P64X0 || ARCH_S5PC100 || ARCH_S5PV210 || ARCH_EXYNOS4)
default y
select ARM_VIC if !ARCH_EXYNOS4
select ARM_GIC if ARCH_EXYNOS4
diff --git a/arch/arm/plat-s5p/cpu.c b/arch/arm/plat-s5p/cpu.c
index 5cf5e721e6ca..bbc2aa7449ca 100644
--- a/arch/arm/plat-s5p/cpu.c
+++ b/arch/arm/plat-s5p/cpu.c
@@ -21,7 +21,6 @@
#include <plat/cpu.h>
#include <plat/s5p6440.h>
-#include <plat/s5p6442.h>
#include <plat/s5p6450.h>
#include <plat/s5pc100.h>
#include <plat/s5pv210.h>
@@ -30,7 +29,6 @@
/* table of supported CPUs */
static const char name_s5p6440[] = "S5P6440";
-static const char name_s5p6442[] = "S5P6442";
static const char name_s5p6450[] = "S5P6450";
static const char name_s5pc100[] = "S5PC100";
static const char name_s5pv210[] = "S5PV210/S5PC110";
@@ -46,14 +44,6 @@ static struct cpu_table cpu_ids[] __initdata = {
.init = s5p64x0_init,
.name = name_s5p6440,
}, {
- .idcode = 0x36442000,
- .idmask = 0xfffff000,
- .map_io = s5p6442_map_io,
- .init_clocks = s5p6442_init_clocks,
- .init_uarts = s5p6442_init_uarts,
- .init = s5p6442_init,
- .name = name_s5p6442,
- }, {
.idcode = 0x36450000,
.idmask = 0xfffff000,
.map_io = s5p6450_map_io,
diff --git a/arch/arm/plat-s5p/include/plat/s5p6442.h b/arch/arm/plat-s5p/include/plat/s5p6442.h
deleted file mode 100644
index 7b8801349c94..000000000000
--- a/arch/arm/plat-s5p/include/plat/s5p6442.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/* arch/arm/plat-s5p/include/plat/s5p6442.h
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- *
- * Header file for s5p6442 cpu support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-/* Common init code for S5P6442 related SoCs */
-
-extern void s5p6442_common_init_uarts(struct s3c2410_uartcfg *cfg, int no);
-extern void s5p6442_register_clocks(void);
-extern void s5p6442_setup_clocks(void);
-
-#ifdef CONFIG_CPU_S5P6442
-
-extern int s5p6442_init(void);
-extern void s5p6442_init_irq(void);
-extern void s5p6442_map_io(void);
-extern void s5p6442_init_clocks(int xtal);
-
-#define s5p6442_init_uarts s5p6442_common_init_uarts
-
-#else
-#define s5p6442_init_clocks NULL
-#define s5p6442_init_uarts NULL
-#define s5p6442_map_io NULL
-#define s5p6442_init NULL
-#endif
diff --git a/arch/arm/plat-samsung/Makefile b/arch/arm/plat-samsung/Makefile
index e9de58a2e294..53eb15b0a07d 100644
--- a/arch/arm/plat-samsung/Makefile
+++ b/arch/arm/plat-samsung/Makefile
@@ -19,7 +19,6 @@ obj-y += gpio.o
obj-y += gpio-config.o
obj-y += dev-asocdma.o
-obj-$(CONFIG_SAMSUNG_GPIOLIB_4BIT) += gpiolib.o
obj-$(CONFIG_SAMSUNG_CLKSRC) += clock-clksrc.o
obj-$(CONFIG_SAMSUNG_IRQ_UART) += irq-uart.o
diff --git a/arch/arm/plat-samsung/include/plat/cpu.h b/arch/arm/plat-samsung/include/plat/cpu.h
index 3aedac0034ba..c0a5741b23e6 100644
--- a/arch/arm/plat-samsung/include/plat/cpu.h
+++ b/arch/arm/plat-samsung/include/plat/cpu.h
@@ -86,7 +86,6 @@ extern struct sysdev_class s3c2443_sysclass;
extern struct sysdev_class s3c6410_sysclass;
extern struct sysdev_class s3c64xx_sysclass;
extern struct sysdev_class s5p64x0_sysclass;
-extern struct sysdev_class s5p6442_sysclass;
extern struct sysdev_class s5pv210_sysclass;
extern struct sysdev_class exynos4_sysclass;
diff --git a/arch/arm/plat-samsung/include/plat/debug-macro.S b/arch/arm/plat-samsung/include/plat/debug-macro.S
index dc6efd90e8ff..207e275362a8 100644
--- a/arch/arm/plat-samsung/include/plat/debug-macro.S
+++ b/arch/arm/plat-samsung/include/plat/debug-macro.S
@@ -11,7 +11,7 @@
#include <plat/regs-serial.h>
-/* The S5PV210/S5PC110 and S5P6442 implementations are as belows. */
+/* The S5PV210/S5PC110 implementations are as belows. */
.macro fifo_level_s5pv210 rd, rx
ldr \rd, [ \rx, # S3C2410_UFSTAT ]
diff --git a/arch/arm/plat-samsung/include/plat/devs.h b/arch/arm/plat-samsung/include/plat/devs.h
index 39818d8da420..b61b8ee7cc52 100644
--- a/arch/arm/plat-samsung/include/plat/devs.h
+++ b/arch/arm/plat-samsung/include/plat/devs.h
@@ -111,12 +111,6 @@ extern struct platform_device exynos4_device_spdif;
extern struct platform_device exynos4_device_pd[];
extern struct platform_device exynos4_device_ahci;
-extern struct platform_device s5p6442_device_pcm0;
-extern struct platform_device s5p6442_device_pcm1;
-extern struct platform_device s5p6442_device_iis0;
-extern struct platform_device s5p6442_device_iis1;
-extern struct platform_device s5p6442_device_spi;
-
extern struct platform_device s5p6440_device_pcm;
extern struct platform_device s5p6440_device_iis;
diff --git a/arch/arm/plat-samsung/include/plat/regs-serial.h b/arch/arm/plat-samsung/include/plat/regs-serial.h
index 788837e99cb3..c151c5f94a87 100644
--- a/arch/arm/plat-samsung/include/plat/regs-serial.h
+++ b/arch/arm/plat-samsung/include/plat/regs-serial.h
@@ -194,7 +194,7 @@
#define S3C64XX_UINTSP 0x34
#define S3C64XX_UINTM 0x38
-/* Following are specific to S5PV210 and S5P6442 */
+/* Following are specific to S5PV210 */
#define S5PV210_UCON_CLKMASK (1<<10)
#define S5PV210_UCON_PCLK (0<<10)
#define S5PV210_UCON_UCLK (1<<10)
diff --git a/arch/arm/plat-samsung/include/plat/s3c64xx-spi.h b/arch/arm/plat-samsung/include/plat/s3c64xx-spi.h
index ff1a561b326e..0ffe34a21554 100644
--- a/arch/arm/plat-samsung/include/plat/s3c64xx-spi.h
+++ b/arch/arm/plat-samsung/include/plat/s3c64xx-spi.h
@@ -69,6 +69,5 @@ extern void s3c64xx_spi_set_info(int cntrlr, int src_clk_nr, int num_cs);
extern void s5pc100_spi_set_info(int cntrlr, int src_clk_nr, int num_cs);
extern void s5pv210_spi_set_info(int cntrlr, int src_clk_nr, int num_cs);
extern void s5p64x0_spi_set_info(int cntrlr, int src_clk_nr, int num_cs);
-extern void s5p6442_spi_set_info(int cntrlr, int src_clk_nr, int num_cs);
#endif /* __S3C64XX_PLAT_SPI_H */
diff --git a/arch/avr32/include/asm/unistd.h b/arch/avr32/include/asm/unistd.h
index 89861a27543e..f714544e5560 100644
--- a/arch/avr32/include/asm/unistd.h
+++ b/arch/avr32/include/asm/unistd.h
@@ -299,9 +299,10 @@
#define __NR_signalfd 279
/* 280 was __NR_timerfd */
#define __NR_eventfd 281
+#define __NR_setns 283
#ifdef __KERNEL__
-#define NR_syscalls 282
+#define NR_syscalls 284
/* Old stuff */
#define __IGNORE_uselib
diff --git a/arch/avr32/kernel/syscall_table.S b/arch/avr32/kernel/syscall_table.S
index e76bad16b0f0..c7fd394d28a4 100644
--- a/arch/avr32/kernel/syscall_table.S
+++ b/arch/avr32/kernel/syscall_table.S
@@ -296,4 +296,5 @@ sys_call_table:
.long sys_ni_syscall /* 280, was sys_timerfd */
.long sys_eventfd
.long sys_recvmmsg
+ .long sys_setns
.long sys_ni_syscall /* r8 is saturated at nr_syscalls */
diff --git a/arch/avr32/mach-at32ap/at32ap700x.c b/arch/avr32/mach-at32ap/at32ap700x.c
index bfc9d071db9b..aa677e2a3823 100644
--- a/arch/avr32/mach-at32ap/at32ap700x.c
+++ b/arch/avr32/mach-at32ap/at32ap700x.c
@@ -1014,6 +1014,7 @@ static struct platform_device *__initdata at32_usarts[4];
void __init at32_map_usart(unsigned int hw_id, unsigned int line, int flags)
{
struct platform_device *pdev;
+ struct atmel_uart_data *pdata;
switch (hw_id) {
case 0:
@@ -1042,7 +1043,8 @@ void __init at32_map_usart(unsigned int hw_id, unsigned int line, int flags)
data->regs = (void __iomem *)pdev->resource[0].start;
}
- pdev->id = line;
+ pdata = pdev->dev.platform_data;
+ pdata->num = portnr;
at32_usarts[line] = pdev;
}
diff --git a/arch/avr32/mach-at32ap/include/mach/board.h b/arch/avr32/mach-at32ap/include/mach/board.h
index 61740201b311..679458d9a622 100644
--- a/arch/avr32/mach-at32ap/include/mach/board.h
+++ b/arch/avr32/mach-at32ap/include/mach/board.h
@@ -33,6 +33,7 @@ extern struct platform_device *atmel_default_console_device;
#define ATMEL_USART_CLK 0x04
struct atmel_uart_data {
+ int num; /* port num */
short use_dma_tx; /* use transmit DMA? */
short use_dma_rx; /* use receive DMA? */
void __iomem *regs; /* virtual base address, if any */
diff --git a/arch/blackfin/include/asm/bfin_serial.h b/arch/blackfin/include/asm/bfin_serial.h
index 7dbc664eab1e..7fd0ec7b5b0f 100644
--- a/arch/blackfin/include/asm/bfin_serial.h
+++ b/arch/blackfin/include/asm/bfin_serial.h
@@ -184,7 +184,7 @@ struct bfin_uart_regs {
#undef __BFP
#ifndef port_membase
-# define port_membase(p) (((struct bfin_serial_port *)(p))->port.membase)
+# define port_membase(p) 0
#endif
#define UART_GET_CHAR(p) bfin_read16(port_membase(p) + OFFSET_RBR)
@@ -235,10 +235,10 @@ struct bfin_uart_regs {
#define UART_SET_DLAB(p) do { UART_PUT_LCR(p, UART_GET_LCR(p) | DLAB); SSYNC(); } while (0)
#ifndef put_lsr_cache
-# define put_lsr_cache(p, v) (((struct bfin_serial_port *)(p))->lsr = (v))
+# define put_lsr_cache(p, v)
#endif
#ifndef get_lsr_cache
-# define get_lsr_cache(p) (((struct bfin_serial_port *)(p))->lsr)
+# define get_lsr_cache(p) 0
#endif
/* The hardware clears the LSR bits upon read, so we need to cache
diff --git a/arch/blackfin/include/asm/gptimers.h b/arch/blackfin/include/asm/gptimers.h
index c722acdda0d3..38657dac1235 100644
--- a/arch/blackfin/include/asm/gptimers.h
+++ b/arch/blackfin/include/asm/gptimers.h
@@ -193,4 +193,22 @@ uint16_t get_enabled_gptimers(void);
uint32_t get_gptimer_status(unsigned int group);
void set_gptimer_status(unsigned int group, uint32_t value);
+/*
+ * All Blackfin system MMRs are padded to 32bits even if the register
+ * itself is only 16bits. So use a helper macro to streamline this.
+ */
+#define __BFP(m) u16 m; u16 __pad_##m
+
+/*
+ * bfin timer registers layout
+ */
+struct bfin_gptimer_regs {
+ __BFP(config);
+ u32 counter;
+ u32 period;
+ u32 width;
+};
+
+#undef __BFP
+
#endif
diff --git a/arch/blackfin/include/asm/unistd.h b/arch/blackfin/include/asm/unistd.h
index ff9a9f35d50b..0ccba60b9ccf 100644
--- a/arch/blackfin/include/asm/unistd.h
+++ b/arch/blackfin/include/asm/unistd.h
@@ -397,8 +397,10 @@
#define __NR_open_by_handle_at 376
#define __NR_clock_adjtime 377
#define __NR_syncfs 378
+#define __NR_setns 379
+#define __NR_sendmmsg 380
-#define __NR_syscall 379
+#define __NR_syscall 381
#define NR_syscalls __NR_syscall
/* Old optional stuff no one actually uses */
diff --git a/arch/blackfin/kernel/debug-mmrs.c b/arch/blackfin/kernel/debug-mmrs.c
index 94b1d8a0256a..fce4807ceef9 100644
--- a/arch/blackfin/kernel/debug-mmrs.c
+++ b/arch/blackfin/kernel/debug-mmrs.c
@@ -13,6 +13,7 @@
#include <asm/blackfin.h>
#include <asm/gpio.h>
+#include <asm/gptimers.h>
#include <asm/bfin_can.h>
#include <asm/bfin_dma.h>
#include <asm/bfin_ppi.h>
@@ -230,8 +231,8 @@ bfin_debug_mmrs_dma(struct dentry *parent, unsigned long base, int num, char mdm
#define DMA(num) _DMA(num, DMA##num##_NEXT_DESC_PTR, 0, "")
#define _MDMA(num, x) \
do { \
- _DMA(num, x##DMA_D##num##_CONFIG, 'D', #x); \
- _DMA(num, x##DMA_S##num##_CONFIG, 'S', #x); \
+ _DMA(num, x##DMA_D##num##_NEXT_DESC_PTR, 'D', #x); \
+ _DMA(num, x##DMA_S##num##_NEXT_DESC_PTR, 'S', #x); \
} while (0)
#define MDMA(num) _MDMA(num, M)
#define IMDMA(num) _MDMA(num, IM)
@@ -264,20 +265,15 @@ bfin_debug_mmrs_eppi(struct dentry *parent, unsigned long base, int num)
/*
* General Purpose Timers
*/
-#define GPTIMER_OFF(mmr) (TIMER0_##mmr - TIMER0_CONFIG)
-#define __GPTIMER(name) \
- do { \
- strcpy(_buf, #name); \
- debugfs_create_x16(buf, S_IRUSR|S_IWUSR, parent, (u16 *)(base + GPTIMER_OFF(name))); \
- } while (0)
+#define __GPTIMER(uname, lname) __REGS(gptimer, #uname, lname)
static void __init __maybe_unused
bfin_debug_mmrs_gptimer(struct dentry *parent, unsigned long base, int num)
{
char buf[32], *_buf = REGS_STR_PFX(buf, TIMER, num);
- __GPTIMER(CONFIG);
- __GPTIMER(COUNTER);
- __GPTIMER(PERIOD);
- __GPTIMER(WIDTH);
+ __GPTIMER(CONFIG, config);
+ __GPTIMER(COUNTER, counter);
+ __GPTIMER(PERIOD, period);
+ __GPTIMER(WIDTH, width);
}
#define GPTIMER(num) bfin_debug_mmrs_gptimer(parent, TIMER##num##_CONFIG, num)
@@ -355,7 +351,7 @@ bfin_debug_mmrs_ppi(struct dentry *parent, unsigned long base, int num)
__PPI(DELAY, delay);
__PPI(FRAME, frame);
}
-#define PPI(num) bfin_debug_mmrs_ppi(parent, PPI##num##_STATUS, num)
+#define PPI(num) bfin_debug_mmrs_ppi(parent, PPI##num##_CONTROL, num)
/*
* SPI
@@ -1288,15 +1284,15 @@ static int __init bfin_debug_mmrs_init(void)
D16(VR_CTL);
D32(CHIPID); /* it's part of this hardware block */
-#if defined(PPI_STATUS) || defined(PPI0_STATUS) || defined(PPI1_STATUS)
+#if defined(PPI_CONTROL) || defined(PPI0_CONTROL) || defined(PPI1_CONTROL)
parent = debugfs_create_dir("ppi", top);
-# ifdef PPI_STATUS
- bfin_debug_mmrs_ppi(parent, PPI_STATUS, -1);
+# ifdef PPI_CONTROL
+ bfin_debug_mmrs_ppi(parent, PPI_CONTROL, -1);
# endif
-# ifdef PPI0_STATUS
+# ifdef PPI0_CONTROL
PPI(0);
# endif
-# ifdef PPI1_STATUS
+# ifdef PPI1_CONTROL
PPI(1);
# endif
#endif
@@ -1341,6 +1337,10 @@ static int __init bfin_debug_mmrs_init(void)
D16(RSI_PID1);
D16(RSI_PID2);
D16(RSI_PID3);
+ D16(RSI_PID4);
+ D16(RSI_PID5);
+ D16(RSI_PID6);
+ D16(RSI_PID7);
D16(RSI_PWR_CONTROL);
D16(RSI_RD_WAIT_EN);
D32(RSI_RESPONSE0);
diff --git a/arch/blackfin/mach-bf518/include/mach/bfin_serial_5xx.h b/arch/blackfin/mach-bf518/include/mach/bfin_serial_5xx.h
deleted file mode 100644
index f6d924ac0c44..000000000000
--- a/arch/blackfin/mach-bf518/include/mach/bfin_serial_5xx.h
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Copyright 2008-2009 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later
- */
-
-#include <asm/dma.h>
-#include <asm/portmux.h>
-
-#if defined(CONFIG_BFIN_UART0_CTSRTS) || defined(CONFIG_BFIN_UART1_CTSRTS)
-# define CONFIG_SERIAL_BFIN_CTSRTS
-
-# ifndef CONFIG_UART0_CTS_PIN
-# define CONFIG_UART0_CTS_PIN -1
-# endif
-
-# ifndef CONFIG_UART0_RTS_PIN
-# define CONFIG_UART0_RTS_PIN -1
-# endif
-
-# ifndef CONFIG_UART1_CTS_PIN
-# define CONFIG_UART1_CTS_PIN -1
-# endif
-
-# ifndef CONFIG_UART1_RTS_PIN
-# define CONFIG_UART1_RTS_PIN -1
-# endif
-#endif
-
-struct bfin_serial_res {
- unsigned long uart_base_addr;
- int uart_irq;
- int uart_status_irq;
-#ifdef CONFIG_SERIAL_BFIN_DMA
- unsigned int uart_tx_dma_channel;
- unsigned int uart_rx_dma_channel;
-#endif
-#ifdef CONFIG_SERIAL_BFIN_CTSRTS
- int uart_cts_pin;
- int uart_rts_pin;
-#endif
-};
-
-struct bfin_serial_res bfin_serial_resource[] = {
-#ifdef CONFIG_SERIAL_BFIN_UART0
- {
- 0xFFC00400,
- IRQ_UART0_RX,
- IRQ_UART0_ERROR,
-#ifdef CONFIG_SERIAL_BFIN_DMA
- CH_UART0_TX,
- CH_UART0_RX,
-#endif
-#ifdef CONFIG_SERIAL_BFIN_CTSRTS
- CONFIG_UART0_CTS_PIN,
- CONFIG_UART0_RTS_PIN,
-#endif
- },
-#endif
-#ifdef CONFIG_SERIAL_BFIN_UART1
- {
- 0xFFC02000,
- IRQ_UART1_RX,
- IRQ_UART1_ERROR,
-#ifdef CONFIG_SERIAL_BFIN_DMA
- CH_UART1_TX,
- CH_UART1_RX,
-#endif
-#ifdef CONFIG_SERIAL_BFIN_CTSRTS
- CONFIG_UART1_CTS_PIN,
- CONFIG_UART1_RTS_PIN,
-#endif
- },
-#endif
-};
-
-#define DRIVER_NAME "bfin-uart"
-
-#include <asm/bfin_serial.h>
diff --git a/arch/blackfin/mach-bf518/include/mach/defBF514.h b/arch/blackfin/mach-bf518/include/mach/defBF514.h
index 98a51c479290..cfab428e577c 100644
--- a/arch/blackfin/mach-bf518/include/mach/defBF514.h
+++ b/arch/blackfin/mach-bf518/include/mach/defBF514.h
@@ -36,13 +36,13 @@
#define RSI_EMASK 0xFFC038C4 /* RSI Exception Mask Register */
#define RSI_CONFIG 0xFFC038C8 /* RSI Configuration Register */
#define RSI_RD_WAIT_EN 0xFFC038CC /* RSI Read Wait Enable Register */
-#define RSI_PID0 0xFFC03FE0 /* RSI Peripheral ID Register 0 */
-#define RSI_PID1 0xFFC03FE4 /* RSI Peripheral ID Register 1 */
-#define RSI_PID2 0xFFC03FE8 /* RSI Peripheral ID Register 2 */
-#define RSI_PID3 0xFFC03FEC /* RSI Peripheral ID Register 3 */
-#define RSI_PID4 0xFFC03FF0 /* RSI Peripheral ID Register 4 */
-#define RSI_PID5 0xFFC03FF4 /* RSI Peripheral ID Register 5 */
-#define RSI_PID6 0xFFC03FF8 /* RSI Peripheral ID Register 6 */
-#define RSI_PID7 0xFFC03FFC /* RSI Peripheral ID Register 7 */
+#define RSI_PID0 0xFFC038D0 /* RSI Peripheral ID Register 0 */
+#define RSI_PID1 0xFFC038D4 /* RSI Peripheral ID Register 1 */
+#define RSI_PID2 0xFFC038D8 /* RSI Peripheral ID Register 2 */
+#define RSI_PID3 0xFFC038DC /* RSI Peripheral ID Register 3 */
+#define RSI_PID4 0xFFC038E0 /* RSI Peripheral ID Register 0 */
+#define RSI_PID5 0xFFC038E4 /* RSI Peripheral ID Register 1 */
+#define RSI_PID6 0xFFC038E8 /* RSI Peripheral ID Register 2 */
+#define RSI_PID7 0xFFC038EC /* RSI Peripheral ID Register 3 */
#endif /* _DEF_BF514_H */
diff --git a/arch/blackfin/mach-bf527/include/mach/bfin_serial_5xx.h b/arch/blackfin/mach-bf527/include/mach/bfin_serial_5xx.h
deleted file mode 100644
index 960e08919def..000000000000
--- a/arch/blackfin/mach-bf527/include/mach/bfin_serial_5xx.h
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Copyright 2007-2009 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later
- */
-
-#include <asm/dma.h>
-#include <asm/portmux.h>
-
-#if defined(CONFIG_BFIN_UART0_CTSRTS) || defined(CONFIG_BFIN_UART1_CTSRTS)
-# define CONFIG_SERIAL_BFIN_CTSRTS
-
-# ifndef CONFIG_UART0_CTS_PIN
-# define CONFIG_UART0_CTS_PIN -1
-# endif
-
-# ifndef CONFIG_UART0_RTS_PIN
-# define CONFIG_UART0_RTS_PIN -1
-# endif
-
-# ifndef CONFIG_UART1_CTS_PIN
-# define CONFIG_UART1_CTS_PIN -1
-# endif
-
-# ifndef CONFIG_UART1_RTS_PIN
-# define CONFIG_UART1_RTS_PIN -1
-# endif
-#endif
-
-struct bfin_serial_res {
- unsigned long uart_base_addr;
- int uart_irq;
- int uart_status_irq;
-#ifdef CONFIG_SERIAL_BFIN_DMA
- unsigned int uart_tx_dma_channel;
- unsigned int uart_rx_dma_channel;
-#endif
-#ifdef CONFIG_SERIAL_BFIN_CTSRTS
- int uart_cts_pin;
- int uart_rts_pin;
-#endif
-};
-
-struct bfin_serial_res bfin_serial_resource[] = {
-#ifdef CONFIG_SERIAL_BFIN_UART0
- {
- 0xFFC00400,
- IRQ_UART0_RX,
- IRQ_UART0_ERROR,
-#ifdef CONFIG_SERIAL_BFIN_DMA
- CH_UART0_TX,
- CH_UART0_RX,
-#endif
-#ifdef CONFIG_SERIAL_BFIN_CTSRTS
- CONFIG_UART0_CTS_PIN,
- CONFIG_UART0_RTS_PIN,
-#endif
- },
-#endif
-#ifdef CONFIG_SERIAL_BFIN_UART1
- {
- 0xFFC02000,
- IRQ_UART1_RX,
- IRQ_UART1_ERROR,
-#ifdef CONFIG_SERIAL_BFIN_DMA
- CH_UART1_TX,
- CH_UART1_RX,
-#endif
-#ifdef CONFIG_SERIAL_BFIN_CTSRTS
- CONFIG_UART1_CTS_PIN,
- CONFIG_UART1_RTS_PIN,
-#endif
- },
-#endif
-};
-
-#define DRIVER_NAME "bfin-uart"
-
-#include <asm/bfin_serial.h>
diff --git a/arch/blackfin/mach-bf527/include/mach/defBF525.h b/arch/blackfin/mach-bf527/include/mach/defBF525.h
index cc383adfdffa..aab80bb1a683 100644
--- a/arch/blackfin/mach-bf527/include/mach/defBF525.h
+++ b/arch/blackfin/mach-bf527/include/mach/defBF525.h
@@ -185,8 +185,8 @@
#define USB_EP_NI7_TXTYPE 0xffc03bd4 /* Sets the transaction protocol and peripheral endpoint number for the Host Tx endpoint7 */
#define USB_EP_NI7_TXINTERVAL 0xffc03bd8 /* Sets the NAK response timeout on Endpoint7 */
#define USB_EP_NI7_RXTYPE 0xffc03bdc /* Sets the transaction protocol and peripheral endpoint number for the Host Rx endpoint7 */
-#define USB_EP_NI7_RXINTERVAL 0xffc03bf0 /* Sets the polling interval for Interrupt/Isochronous transfers or the NAK response timeout on Bulk transfers for Host Rx endpoint7 */
-#define USB_EP_NI7_TXCOUNT 0xffc03bf8 /* Number of bytes to be written to the endpoint7 Tx FIFO */
+#define USB_EP_NI7_RXINTERVAL 0xffc03be0 /* Sets the polling interval for Interrupt/Isochronous transfers or the NAK response timeout on Bulk transfers for Host Rx endpoint7 */
+#define USB_EP_NI7_TXCOUNT 0xffc03be8 /* Number of bytes to be written to the endpoint7 Tx FIFO */
#define USB_DMA_INTERRUPT 0xffc03c00 /* Indicates pending interrupts for the DMA channels */
diff --git a/arch/blackfin/mach-bf533/include/mach/bfin_serial_5xx.h b/arch/blackfin/mach-bf533/include/mach/bfin_serial_5xx.h
deleted file mode 100644
index 45dcaa4f3e41..000000000000
--- a/arch/blackfin/mach-bf533/include/mach/bfin_serial_5xx.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Copyright 2006-2009 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later
- */
-
-#include <asm/dma.h>
-#include <asm/portmux.h>
-
-#ifdef CONFIG_BFIN_UART0_CTSRTS
-# define CONFIG_SERIAL_BFIN_CTSRTS
-# ifndef CONFIG_UART0_CTS_PIN
-# define CONFIG_UART0_CTS_PIN -1
-# endif
-# ifndef CONFIG_UART0_RTS_PIN
-# define CONFIG_UART0_RTS_PIN -1
-# endif
-#endif
-
-struct bfin_serial_res {
- unsigned long uart_base_addr;
- int uart_irq;
- int uart_status_irq;
-#ifdef CONFIG_SERIAL_BFIN_DMA
- unsigned int uart_tx_dma_channel;
- unsigned int uart_rx_dma_channel;
-#endif
-#ifdef CONFIG_SERIAL_BFIN_CTSRTS
- int uart_cts_pin;
- int uart_rts_pin;
-#endif
-};
-
-struct bfin_serial_res bfin_serial_resource[] = {
- {
- 0xFFC00400,
- IRQ_UART0_RX,
- IRQ_UART0_ERROR,
-#ifdef CONFIG_SERIAL_BFIN_DMA
- CH_UART0_TX,
- CH_UART0_RX,
-#endif
-#ifdef CONFIG_SERIAL_BFIN_CTSRTS
- CONFIG_UART0_CTS_PIN,
- CONFIG_UART0_RTS_PIN,
-#endif
- }
-};
-
-#define DRIVER_NAME "bfin-uart"
-
-#include <asm/bfin_serial.h>
diff --git a/arch/blackfin/mach-bf537/boards/stamp.c b/arch/blackfin/mach-bf537/boards/stamp.c
index e16dc4560048..76db1d483173 100644
--- a/arch/blackfin/mach-bf537/boards/stamp.c
+++ b/arch/blackfin/mach-bf537/boards/stamp.c
@@ -382,7 +382,6 @@ static struct platform_device net2272_bfin_device = {
#endif
#if defined(CONFIG_MTD_NAND_PLATFORM) || defined(CONFIG_MTD_NAND_PLATFORM_MODULE)
-#ifdef CONFIG_MTD_PARTITIONS
const char *part_probes[] = { "cmdlinepart", "RedBoot", NULL };
static struct mtd_partition bfin_plat_nand_partitions[] = {
@@ -396,7 +395,6 @@ static struct mtd_partition bfin_plat_nand_partitions[] = {
.offset = MTDPART_OFS_APPEND,
},
};
-#endif
#define BFIN_NAND_PLAT_CLE 2
#define BFIN_NAND_PLAT_ALE 1
@@ -423,11 +421,9 @@ static struct platform_nand_data bfin_plat_nand_data = {
.chip = {
.nr_chips = 1,
.chip_delay = 30,
-#ifdef CONFIG_MTD_PARTITIONS
.part_probe_types = part_probes,
.partitions = bfin_plat_nand_partitions,
.nr_partitions = ARRAY_SIZE(bfin_plat_nand_partitions),
-#endif
},
.ctrl = {
.cmd_ctrl = bfin_plat_nand_cmd_ctrl,
diff --git a/arch/blackfin/mach-bf537/include/mach/bfin_serial_5xx.h b/arch/blackfin/mach-bf537/include/mach/bfin_serial_5xx.h
deleted file mode 100644
index 3e955dba8951..000000000000
--- a/arch/blackfin/mach-bf537/include/mach/bfin_serial_5xx.h
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Copyright 2006-2009 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later
- */
-
-#include <asm/dma.h>
-#include <asm/portmux.h>
-
-#if defined(CONFIG_BFIN_UART0_CTSRTS) || defined(CONFIG_BFIN_UART1_CTSRTS)
-# define CONFIG_SERIAL_BFIN_CTSRTS
-
-# ifndef CONFIG_UART0_CTS_PIN
-# define CONFIG_UART0_CTS_PIN -1
-# endif
-
-# ifndef CONFIG_UART0_RTS_PIN
-# define CONFIG_UART0_RTS_PIN -1
-# endif
-
-# ifndef CONFIG_UART1_CTS_PIN
-# define CONFIG_UART1_CTS_PIN -1
-# endif
-
-# ifndef CONFIG_UART1_RTS_PIN
-# define CONFIG_UART1_RTS_PIN -1
-# endif
-#endif
-
-struct bfin_serial_res {
- unsigned long uart_base_addr;
- int uart_irq;
- int uart_status_irq;
-#ifdef CONFIG_SERIAL_BFIN_DMA
- unsigned int uart_tx_dma_channel;
- unsigned int uart_rx_dma_channel;
-#endif
-#ifdef CONFIG_SERIAL_BFIN_CTSRTS
- int uart_cts_pin;
- int uart_rts_pin;
-#endif
-};
-
-struct bfin_serial_res bfin_serial_resource[] = {
-#ifdef CONFIG_SERIAL_BFIN_UART0
- {
- 0xFFC00400,
- IRQ_UART0_RX,
- IRQ_UART0_ERROR,
-#ifdef CONFIG_SERIAL_BFIN_DMA
- CH_UART0_TX,
- CH_UART0_RX,
-#endif
-#ifdef CONFIG_SERIAL_BFIN_CTSRTS
- CONFIG_UART0_CTS_PIN,
- CONFIG_UART0_RTS_PIN,
-#endif
- },
-#endif
-#ifdef CONFIG_SERIAL_BFIN_UART1
- {
- 0xFFC02000,
- IRQ_UART1_RX,
- IRQ_UART1_ERROR,
-#ifdef CONFIG_SERIAL_BFIN_DMA
- CH_UART1_TX,
- CH_UART1_RX,
-#endif
-#ifdef CONFIG_SERIAL_BFIN_CTSRTS
- CONFIG_UART1_CTS_PIN,
- CONFIG_UART1_RTS_PIN,
-#endif
- },
-#endif
-};
-
-#define DRIVER_NAME "bfin-uart"
-
-#include <asm/bfin_serial.h>
diff --git a/arch/blackfin/mach-bf538/include/mach/bfin_serial_5xx.h b/arch/blackfin/mach-bf538/include/mach/bfin_serial_5xx.h
deleted file mode 100644
index beb502e9cb33..000000000000
--- a/arch/blackfin/mach-bf538/include/mach/bfin_serial_5xx.h
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * Copyright 2008-2009 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
-
-#include <asm/dma.h>
-#include <asm/portmux.h>
-
-#if defined(CONFIG_BFIN_UART0_CTSRTS) || defined(CONFIG_BFIN_UART1_CTSRTS)
-# define CONFIG_SERIAL_BFIN_CTSRTS
-
-# ifndef CONFIG_UART0_CTS_PIN
-# define CONFIG_UART0_CTS_PIN -1
-# endif
-
-# ifndef CONFIG_UART0_RTS_PIN
-# define CONFIG_UART0_RTS_PIN -1
-# endif
-
-# ifndef CONFIG_UART1_CTS_PIN
-# define CONFIG_UART1_CTS_PIN -1
-# endif
-
-# ifndef CONFIG_UART1_RTS_PIN
-# define CONFIG_UART1_RTS_PIN -1
-# endif
-#endif
-
-struct bfin_serial_res {
- unsigned long uart_base_addr;
- int uart_irq;
- int uart_status_irq;
-#ifdef CONFIG_SERIAL_BFIN_DMA
- unsigned int uart_tx_dma_channel;
- unsigned int uart_rx_dma_channel;
-#endif
-#ifdef CONFIG_SERIAL_BFIN_CTSRTS
- int uart_cts_pin;
- int uart_rts_pin;
-#endif
-};
-
-struct bfin_serial_res bfin_serial_resource[] = {
-#ifdef CONFIG_SERIAL_BFIN_UART0
- {
- 0xFFC00400,
- IRQ_UART0_RX,
- IRQ_UART0_ERROR,
-#ifdef CONFIG_SERIAL_BFIN_DMA
- CH_UART0_TX,
- CH_UART0_RX,
-#endif
-#ifdef CONFIG_SERIAL_BFIN_CTSRTS
- CONFIG_UART0_CTS_PIN,
- CONFIG_UART0_RTS_PIN,
-#endif
- },
-#endif
-#ifdef CONFIG_SERIAL_BFIN_UART1
- {
- 0xFFC02000,
- IRQ_UART1_RX,
- IRQ_UART1_ERROR,
-#ifdef CONFIG_SERIAL_BFIN_DMA
- CH_UART1_TX,
- CH_UART1_RX,
-#endif
-#ifdef CONFIG_SERIAL_BFIN_CTSRTS
- CONFIG_UART1_CTS_PIN,
- CONFIG_UART1_RTS_PIN,
-#endif
- },
-#endif
-#ifdef CONFIG_SERIAL_BFIN_UART2
- {
- 0xFFC02100,
- IRQ_UART2_RX,
-#ifdef CONFIG_SERIAL_BFIN_DMA
- CH_UART2_TX,
- CH_UART2_RX,
-#endif
-#ifdef CONFIG_BFIN_UART2_CTSRTS
- CONFIG_UART2_CTS_PIN,
- CONFIG_UART2_RTS_PIN,
-#endif
- },
-#endif
-};
-
-#define DRIVER_NAME "bfin-uart"
-
-#include <asm/bfin_serial.h>
diff --git a/arch/blackfin/mach-bf548/include/mach/bfin_serial_5xx.h b/arch/blackfin/mach-bf548/include/mach/bfin_serial_5xx.h
deleted file mode 100644
index 0d94edaaaa2e..000000000000
--- a/arch/blackfin/mach-bf548/include/mach/bfin_serial_5xx.h
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Copyright 2007-2009 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
-
-#include <asm/dma.h>
-#include <asm/portmux.h>
-
-#if defined(CONFIG_BFIN_UART0_CTSRTS) || defined(CONFIG_BFIN_UART1_CTSRTS) || \
- defined(CONFIG_BFIN_UART2_CTSRTS) || defined(CONFIG_BFIN_UART3_CTSRTS)
-# define CONFIG_SERIAL_BFIN_HARD_CTSRTS
-#endif
-
-struct bfin_serial_res {
- unsigned long uart_base_addr;
- int uart_irq;
- int uart_status_irq;
-#ifdef CONFIG_SERIAL_BFIN_DMA
- unsigned int uart_tx_dma_channel;
- unsigned int uart_rx_dma_channel;
-#endif
-#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
- int uart_cts_pin;
- int uart_rts_pin;
-#endif
-};
-
-struct bfin_serial_res bfin_serial_resource[] = {
-#ifdef CONFIG_SERIAL_BFIN_UART0
- {
- 0xFFC00400,
- IRQ_UART0_RX,
- IRQ_UART0_ERROR,
-#ifdef CONFIG_SERIAL_BFIN_DMA
- CH_UART0_TX,
- CH_UART0_RX,
-#endif
-#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
- 0,
- 0,
-#endif
- },
-#endif
-#ifdef CONFIG_SERIAL_BFIN_UART1
- {
- 0xFFC02000,
- IRQ_UART1_RX,
- IRQ_UART1_ERROR,
-#ifdef CONFIG_SERIAL_BFIN_DMA
- CH_UART1_TX,
- CH_UART1_RX,
-#endif
-#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
- GPIO_PE10,
- GPIO_PE9,
-#endif
- },
-#endif
-#ifdef CONFIG_SERIAL_BFIN_UART2
- {
- 0xFFC02100,
- IRQ_UART2_RX,
- IRQ_UART2_ERROR,
-#ifdef CONFIG_SERIAL_BFIN_DMA
- CH_UART2_TX,
- CH_UART2_RX,
-#endif
-#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
- 0,
- 0,
-#endif
- },
-#endif
-#ifdef CONFIG_SERIAL_BFIN_UART3
- {
- 0xFFC03100,
- IRQ_UART3_RX,
- IRQ_UART3_ERROR,
-#ifdef CONFIG_SERIAL_BFIN_DMA
- CH_UART3_TX,
- CH_UART3_RX,
-#endif
-#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
- GPIO_PB3,
- GPIO_PB2,
-#endif
- },
-#endif
-};
-
-#define DRIVER_NAME "bfin-uart"
-
-#include <asm/bfin_serial.h>
diff --git a/arch/blackfin/mach-bf548/include/mach/defBF547.h b/arch/blackfin/mach-bf548/include/mach/defBF547.h
index 1cbba115f96f..1fa41ec03f31 100644
--- a/arch/blackfin/mach-bf548/include/mach/defBF547.h
+++ b/arch/blackfin/mach-bf548/include/mach/defBF547.h
@@ -271,10 +271,10 @@
#define USB_EP_NI0_TXINTERVAL 0xffc03e18 /* Sets the NAK response timeout on Endpoint 0 */
#define USB_EP_NI0_RXTYPE 0xffc03e1c /* Sets the transaction protocol and peripheral endpoint number for the Host Rx endpoint0 */
#define USB_EP_NI0_RXINTERVAL 0xffc03e20 /* Sets the polling interval for Interrupt/Isochronous transfers or the NAK response timeout on Bulk transfers for Host Rx endpoint0 */
+#define USB_EP_NI0_TXCOUNT 0xffc03e28 /* Number of bytes to be written to the endpoint0 Tx FIFO */
/* USB Endpoint 1 Control Registers */
-#define USB_EP_NI0_TXCOUNT 0xffc03e28 /* Number of bytes to be written to the endpoint0 Tx FIFO */
#define USB_EP_NI1_TXMAXP 0xffc03e40 /* Maximum packet size for Host Tx endpoint1 */
#define USB_EP_NI1_TXCSR 0xffc03e44 /* Control Status register for endpoint1 */
#define USB_EP_NI1_RXMAXP 0xffc03e48 /* Maximum packet size for Host Rx endpoint1 */
@@ -284,10 +284,10 @@
#define USB_EP_NI1_TXINTERVAL 0xffc03e58 /* Sets the NAK response timeout on Endpoint1 */
#define USB_EP_NI1_RXTYPE 0xffc03e5c /* Sets the transaction protocol and peripheral endpoint number for the Host Rx endpoint1 */
#define USB_EP_NI1_RXINTERVAL 0xffc03e60 /* Sets the polling interval for Interrupt/Isochronous transfers or the NAK response timeout on Bulk transfers for Host Rx endpoint1 */
+#define USB_EP_NI1_TXCOUNT 0xffc03e68 /* Number of bytes to be written to the+H102 endpoint1 Tx FIFO */
/* USB Endpoint 2 Control Registers */
-#define USB_EP_NI1_TXCOUNT 0xffc03e68 /* Number of bytes to be written to the+H102 endpoint1 Tx FIFO */
#define USB_EP_NI2_TXMAXP 0xffc03e80 /* Maximum packet size for Host Tx endpoint2 */
#define USB_EP_NI2_TXCSR 0xffc03e84 /* Control Status register for endpoint2 */
#define USB_EP_NI2_RXMAXP 0xffc03e88 /* Maximum packet size for Host Rx endpoint2 */
@@ -297,10 +297,10 @@
#define USB_EP_NI2_TXINTERVAL 0xffc03e98 /* Sets the NAK response timeout on Endpoint2 */
#define USB_EP_NI2_RXTYPE 0xffc03e9c /* Sets the transaction protocol and peripheral endpoint number for the Host Rx endpoint2 */
#define USB_EP_NI2_RXINTERVAL 0xffc03ea0 /* Sets the polling interval for Interrupt/Isochronous transfers or the NAK response timeout on Bulk transfers for Host Rx endpoint2 */
+#define USB_EP_NI2_TXCOUNT 0xffc03ea8 /* Number of bytes to be written to the endpoint2 Tx FIFO */
/* USB Endpoint 3 Control Registers */
-#define USB_EP_NI2_TXCOUNT 0xffc03ea8 /* Number of bytes to be written to the endpoint2 Tx FIFO */
#define USB_EP_NI3_TXMAXP 0xffc03ec0 /* Maximum packet size for Host Tx endpoint3 */
#define USB_EP_NI3_TXCSR 0xffc03ec4 /* Control Status register for endpoint3 */
#define USB_EP_NI3_RXMAXP 0xffc03ec8 /* Maximum packet size for Host Rx endpoint3 */
@@ -310,10 +310,10 @@
#define USB_EP_NI3_TXINTERVAL 0xffc03ed8 /* Sets the NAK response timeout on Endpoint3 */
#define USB_EP_NI3_RXTYPE 0xffc03edc /* Sets the transaction protocol and peripheral endpoint number for the Host Rx endpoint3 */
#define USB_EP_NI3_RXINTERVAL 0xffc03ee0 /* Sets the polling interval for Interrupt/Isochronous transfers or the NAK response timeout on Bulk transfers for Host Rx endpoint3 */
+#define USB_EP_NI3_TXCOUNT 0xffc03ee8 /* Number of bytes to be written to the H124endpoint3 Tx FIFO */
/* USB Endpoint 4 Control Registers */
-#define USB_EP_NI3_TXCOUNT 0xffc03ee8 /* Number of bytes to be written to the H124endpoint3 Tx FIFO */
#define USB_EP_NI4_TXMAXP 0xffc03f00 /* Maximum packet size for Host Tx endpoint4 */
#define USB_EP_NI4_TXCSR 0xffc03f04 /* Control Status register for endpoint4 */
#define USB_EP_NI4_RXMAXP 0xffc03f08 /* Maximum packet size for Host Rx endpoint4 */
@@ -323,10 +323,10 @@
#define USB_EP_NI4_TXINTERVAL 0xffc03f18 /* Sets the NAK response timeout on Endpoint4 */
#define USB_EP_NI4_RXTYPE 0xffc03f1c /* Sets the transaction protocol and peripheral endpoint number for the Host Rx endpoint4 */
#define USB_EP_NI4_RXINTERVAL 0xffc03f20 /* Sets the polling interval for Interrupt/Isochronous transfers or the NAK response timeout on Bulk transfers for Host Rx endpoint4 */
+#define USB_EP_NI4_TXCOUNT 0xffc03f28 /* Number of bytes to be written to the endpoint4 Tx FIFO */
/* USB Endpoint 5 Control Registers */
-#define USB_EP_NI4_TXCOUNT 0xffc03f28 /* Number of bytes to be written to the endpoint4 Tx FIFO */
#define USB_EP_NI5_TXMAXP 0xffc03f40 /* Maximum packet size for Host Tx endpoint5 */
#define USB_EP_NI5_TXCSR 0xffc03f44 /* Control Status register for endpoint5 */
#define USB_EP_NI5_RXMAXP 0xffc03f48 /* Maximum packet size for Host Rx endpoint5 */
@@ -336,10 +336,10 @@
#define USB_EP_NI5_TXINTERVAL 0xffc03f58 /* Sets the NAK response timeout on Endpoint5 */
#define USB_EP_NI5_RXTYPE 0xffc03f5c /* Sets the transaction protocol and peripheral endpoint number for the Host Rx endpoint5 */
#define USB_EP_NI5_RXINTERVAL 0xffc03f60 /* Sets the polling interval for Interrupt/Isochronous transfers or the NAK response timeout on Bulk transfers for Host Rx endpoint5 */
+#define USB_EP_NI5_TXCOUNT 0xffc03f68 /* Number of bytes to be written to the H145endpoint5 Tx FIFO */
/* USB Endpoint 6 Control Registers */
-#define USB_EP_NI5_TXCOUNT 0xffc03f68 /* Number of bytes to be written to the H145endpoint5 Tx FIFO */
#define USB_EP_NI6_TXMAXP 0xffc03f80 /* Maximum packet size for Host Tx endpoint6 */
#define USB_EP_NI6_TXCSR 0xffc03f84 /* Control Status register for endpoint6 */
#define USB_EP_NI6_RXMAXP 0xffc03f88 /* Maximum packet size for Host Rx endpoint6 */
@@ -349,10 +349,10 @@
#define USB_EP_NI6_TXINTERVAL 0xffc03f98 /* Sets the NAK response timeout on Endpoint6 */
#define USB_EP_NI6_RXTYPE 0xffc03f9c /* Sets the transaction protocol and peripheral endpoint number for the Host Rx endpoint6 */
#define USB_EP_NI6_RXINTERVAL 0xffc03fa0 /* Sets the polling interval for Interrupt/Isochronous transfers or the NAK response timeout on Bulk transfers for Host Rx endpoint6 */
+#define USB_EP_NI6_TXCOUNT 0xffc03fa8 /* Number of bytes to be written to the endpoint6 Tx FIFO */
/* USB Endpoint 7 Control Registers */
-#define USB_EP_NI6_TXCOUNT 0xffc03fa8 /* Number of bytes to be written to the endpoint6 Tx FIFO */
#define USB_EP_NI7_TXMAXP 0xffc03fc0 /* Maximum packet size for Host Tx endpoint7 */
#define USB_EP_NI7_TXCSR 0xffc03fc4 /* Control Status register for endpoint7 */
#define USB_EP_NI7_RXMAXP 0xffc03fc8 /* Maximum packet size for Host Rx endpoint7 */
@@ -361,8 +361,9 @@
#define USB_EP_NI7_TXTYPE 0xffc03fd4 /* Sets the transaction protocol and peripheral endpoint number for the Host Tx endpoint7 */
#define USB_EP_NI7_TXINTERVAL 0xffc03fd8 /* Sets the NAK response timeout on Endpoint7 */
#define USB_EP_NI7_RXTYPE 0xffc03fdc /* Sets the transaction protocol and peripheral endpoint number for the Host Rx endpoint7 */
-#define USB_EP_NI7_RXINTERVAL 0xffc03ff0 /* Sets the polling interval for Interrupt/Isochronous transfers or the NAK response timeout on Bulk transfers for Host Rx endpoint7 */
-#define USB_EP_NI7_TXCOUNT 0xffc03ff8 /* Number of bytes to be written to the endpoint7 Tx FIFO */
+#define USB_EP_NI7_RXINTERVAL 0xffc03fe0 /* Sets the polling interval for Interrupt/Isochronous transfers or the NAK response timeout on Bulk transfers for Host Rx endpoint7 */
+#define USB_EP_NI7_TXCOUNT 0xffc03fe8 /* Number of bytes to be written to the endpoint7 Tx FIFO */
+
#define USB_DMA_INTERRUPT 0xffc04000 /* Indicates pending interrupts for the DMA channels */
/* USB Channel 0 Config Registers */
diff --git a/arch/blackfin/mach-bf561/boards/acvilon.c b/arch/blackfin/mach-bf561/boards/acvilon.c
index 3926cd909b66..9231a942892b 100644
--- a/arch/blackfin/mach-bf561/boards/acvilon.c
+++ b/arch/blackfin/mach-bf561/boards/acvilon.c
@@ -243,7 +243,6 @@ static struct platform_device bfin_uart0_device = {
#if defined(CONFIG_MTD_NAND_PLATFORM) || defined(CONFIG_MTD_NAND_PLATFORM_MODULE)
-#ifdef CONFIG_MTD_PARTITIONS
const char *part_probes[] = { "cmdlinepart", NULL };
static struct mtd_partition bfin_plat_nand_partitions[] = {
@@ -257,7 +256,6 @@ static struct mtd_partition bfin_plat_nand_partitions[] = {
.offset = MTDPART_OFS_APPEND,
},
};
-#endif
#define BFIN_NAND_PLAT_CLE 2
#define BFIN_NAND_PLAT_ALE 3
@@ -286,11 +284,9 @@ static struct platform_nand_data bfin_plat_nand_data = {
.chip = {
.nr_chips = 1,
.chip_delay = 30,
-#ifdef CONFIG_MTD_PARTITIONS
.part_probe_types = part_probes,
.partitions = bfin_plat_nand_partitions,
.nr_partitions = ARRAY_SIZE(bfin_plat_nand_partitions),
-#endif
},
.ctrl = {
.cmd_ctrl = bfin_plat_nand_cmd_ctrl,
diff --git a/arch/blackfin/mach-bf561/include/mach/bfin_serial_5xx.h b/arch/blackfin/mach-bf561/include/mach/bfin_serial_5xx.h
deleted file mode 100644
index 3a6947456cf1..000000000000
--- a/arch/blackfin/mach-bf561/include/mach/bfin_serial_5xx.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Copyright 2006-2009 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
-
-#include <asm/dma.h>
-#include <asm/portmux.h>
-
-#ifdef CONFIG_BFIN_UART0_CTSRTS
-# define CONFIG_SERIAL_BFIN_CTSRTS
-# ifndef CONFIG_UART0_CTS_PIN
-# define CONFIG_UART0_CTS_PIN -1
-# endif
-# ifndef CONFIG_UART0_RTS_PIN
-# define CONFIG_UART0_RTS_PIN -1
-# endif
-#endif
-
-struct bfin_serial_res {
- unsigned long uart_base_addr;
- int uart_irq;
- int uart_status_irq;
-#ifdef CONFIG_SERIAL_BFIN_DMA
- unsigned int uart_tx_dma_channel;
- unsigned int uart_rx_dma_channel;
-#endif
-#ifdef CONFIG_SERIAL_BFIN_CTSRTS
- int uart_cts_pin;
- int uart_rts_pin;
-#endif
-};
-
-struct bfin_serial_res bfin_serial_resource[] = {
- {
- 0xFFC00400,
- IRQ_UART_RX,
- IRQ_UART_ERROR,
-#ifdef CONFIG_SERIAL_BFIN_DMA
- CH_UART_TX,
- CH_UART_RX,
-#endif
-#ifdef CONFIG_SERIAL_BFIN_CTSRTS
- CONFIG_UART0_CTS_PIN,
- CONFIG_UART0_RTS_PIN,
-#endif
- }
-};
-
-#define DRIVER_NAME "bfin-uart"
-
-#include <asm/bfin_serial.h>
diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S
index f96933f48a7f..225d311c9701 100644
--- a/arch/blackfin/mach-common/entry.S
+++ b/arch/blackfin/mach-common/entry.S
@@ -1753,6 +1753,8 @@ ENTRY(_sys_call_table)
.long _sys_open_by_handle_at
.long _sys_clock_adjtime
.long _sys_syncfs
+ .long _sys_setns
+ .long _sys_sendmmsg /* 380 */
.rept NR_syscalls-(.-_sys_call_table)/4
.long _sys_ni_syscall
diff --git a/arch/blackfin/mm/maccess.c b/arch/blackfin/mm/maccess.c
index b71cebc1f8a3..e2532114c5fd 100644
--- a/arch/blackfin/mm/maccess.c
+++ b/arch/blackfin/mm/maccess.c
@@ -16,7 +16,7 @@ static int validate_memory_access_address(unsigned long addr, int size)
return bfin_mem_access_type(addr, size);
}
-long probe_kernel_read(void *dst, void *src, size_t size)
+long probe_kernel_read(void *dst, const void *src, size_t size)
{
unsigned long lsrc = (unsigned long)src;
int mem_type;
@@ -55,7 +55,7 @@ long probe_kernel_read(void *dst, void *src, size_t size)
return -EFAULT;
}
-long probe_kernel_write(void *dst, void *src, size_t size)
+long probe_kernel_write(void *dst, const void *src, size_t size)
{
unsigned long ldst = (unsigned long)dst;
int mem_type;
diff --git a/arch/cris/Kconfig b/arch/cris/Kconfig
index b6b94a27d276..17addacb169e 100644
--- a/arch/cris/Kconfig
+++ b/arch/cris/Kconfig
@@ -270,7 +270,6 @@ config ETRAX_AXISFLASHMAP
select MTD_JEDECPROBE if ETRAX_ARCH_V32
select MTD_CHAR
select MTD_BLOCK
- select MTD_PARTITIONS
select MTD_COMPLEX_MAPPINGS
help
This option enables MTD mapping of flash devices. Needed to use
diff --git a/arch/cris/arch-v10/drivers/axisflashmap.c b/arch/cris/arch-v10/drivers/axisflashmap.c
index ed708e19d09e..a4bbdfd37bd8 100644
--- a/arch/cris/arch-v10/drivers/axisflashmap.c
+++ b/arch/cris/arch-v10/drivers/axisflashmap.c
@@ -372,7 +372,7 @@ static int __init init_axis_flash(void)
#ifdef CONFIG_ETRAX_AXISFLASHMAP_MTD0WHOLE
if (mymtd) {
main_partition.size = mymtd->size;
- err = add_mtd_partitions(mymtd, &main_partition, 1);
+ err = mtd_device_register(mymtd, &main_partition, 1);
if (err)
panic("axisflashmap: Could not initialize "
"partition for whole main mtd device!\n");
@@ -382,10 +382,12 @@ static int __init init_axis_flash(void)
if (mymtd) {
if (use_default_ptable) {
printk(KERN_INFO " Using default partition table.\n");
- err = add_mtd_partitions(mymtd, axis_default_partitions,
- NUM_DEFAULT_PARTITIONS);
+ err = mtd_device_register(mymtd,
+ axis_default_partitions,
+ NUM_DEFAULT_PARTITIONS);
} else {
- err = add_mtd_partitions(mymtd, axis_partitions, pidx);
+ err = mtd_device_register(mymtd, axis_partitions,
+ pidx);
}
if (err)
diff --git a/arch/cris/arch-v10/kernel/entry.S b/arch/cris/arch-v10/kernel/entry.S
index 0d6420d087fd..1161883eb582 100644
--- a/arch/cris/arch-v10/kernel/entry.S
+++ b/arch/cris/arch-v10/kernel/entry.S
@@ -937,6 +937,7 @@ sys_call_table:
.long sys_inotify_init1
.long sys_preadv
.long sys_pwritev
+ .long sys_setns /* 335 */
/*
* NOTE!! This doesn't have to be exact - we just have
diff --git a/arch/cris/arch-v32/drivers/Kconfig b/arch/cris/arch-v32/drivers/Kconfig
index 1633b120aa81..41a2732e8b9c 100644
--- a/arch/cris/arch-v32/drivers/Kconfig
+++ b/arch/cris/arch-v32/drivers/Kconfig
@@ -405,7 +405,6 @@ config ETRAX_AXISFLASHMAP
select MTD_JEDECPROBE
select MTD_CHAR
select MTD_BLOCK
- select MTD_PARTITIONS
select MTD_COMPLEX_MAPPINGS
help
This option enables MTD mapping of flash devices. Needed to use
diff --git a/arch/cris/arch-v32/drivers/axisflashmap.c b/arch/cris/arch-v32/drivers/axisflashmap.c
index 7b155f8203b8..a2bde3744622 100644
--- a/arch/cris/arch-v32/drivers/axisflashmap.c
+++ b/arch/cris/arch-v32/drivers/axisflashmap.c
@@ -561,7 +561,7 @@ static int __init init_axis_flash(void)
#ifdef CONFIG_ETRAX_AXISFLASHMAP_MTD0WHOLE
if (main_mtd) {
main_partition.size = main_mtd->size;
- err = add_mtd_partitions(main_mtd, &main_partition, 1);
+ err = mtd_device_register(main_mtd, &main_partition, 1);
if (err)
panic("axisflashmap: Could not initialize "
"partition for whole main mtd device!\n");
@@ -597,7 +597,8 @@ static int __init init_axis_flash(void)
mtd_ram->erasesize = (main_mtd ? main_mtd->erasesize :
CONFIG_ETRAX_PTABLE_SECTOR);
} else {
- err = add_mtd_partitions(main_mtd, &partition[part], 1);
+ err = mtd_device_register(main_mtd, &partition[part],
+ 1);
if (err)
panic("axisflashmap: Could not add mtd "
"partition %d\n", part);
@@ -633,7 +634,7 @@ static int __init init_axis_flash(void)
#ifndef CONFIG_ETRAX_VCS_SIM
if (aux_mtd) {
aux_partition.size = aux_mtd->size;
- err = add_mtd_partitions(aux_mtd, &aux_partition, 1);
+ err = mtd_device_register(aux_mtd, &aux_partition, 1);
if (err)
panic("axisflashmap: Could not initialize "
"aux mtd device!\n");
diff --git a/arch/cris/arch-v32/kernel/entry.S b/arch/cris/arch-v32/kernel/entry.S
index 3abf12c23e5f..84fed7e91ada 100644
--- a/arch/cris/arch-v32/kernel/entry.S
+++ b/arch/cris/arch-v32/kernel/entry.S
@@ -880,6 +880,7 @@ sys_call_table:
.long sys_inotify_init1
.long sys_preadv
.long sys_pwritev
+ .long sys_setns /* 335 */
/*
* NOTE!! This doesn't have to be exact - we just have
diff --git a/arch/cris/include/asm/unistd.h b/arch/cris/include/asm/unistd.h
index f6fad83b3a8c..f921b8b0f97e 100644
--- a/arch/cris/include/asm/unistd.h
+++ b/arch/cris/include/asm/unistd.h
@@ -339,10 +339,11 @@
#define __NR_inotify_init1 332
#define __NR_preadv 333
#define __NR_pwritev 334
+#define __NR_setns 335
#ifdef __KERNEL__
-#define NR_syscalls 335
+#define NR_syscalls 336
#include <arch/unistd.h>
diff --git a/arch/frv/include/asm/suspend.h b/arch/frv/include/asm/suspend.h
deleted file mode 100644
index 5fa7b5a6ee40..000000000000
--- a/arch/frv/include/asm/suspend.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* suspend.h: suspension stuff
- *
- * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#ifndef _ASM_SUSPEND_H
-#define _ASM_SUSPEND_H
-
-static inline int arch_prepare_suspend(void)
-{
- return 0;
-}
-
-#endif /* _ASM_SUSPEND_H */
diff --git a/arch/frv/include/asm/unistd.h b/arch/frv/include/asm/unistd.h
index b28da499e22a..a569dff7cd59 100644
--- a/arch/frv/include/asm/unistd.h
+++ b/arch/frv/include/asm/unistd.h
@@ -343,10 +343,11 @@
#define __NR_pwritev 334
#define __NR_rt_tgsigqueueinfo 335
#define __NR_perf_event_open 336
+#define __NR_setns 337
#ifdef __KERNEL__
-#define NR_syscalls 337
+#define NR_syscalls 338
#define __ARCH_WANT_IPC_PARSE_VERSION
/* #define __ARCH_WANT_OLD_READDIR */
diff --git a/arch/frv/kernel/entry.S b/arch/frv/kernel/entry.S
index 63d579bf1c29..017d6d7b784f 100644
--- a/arch/frv/kernel/entry.S
+++ b/arch/frv/kernel/entry.S
@@ -1526,5 +1526,6 @@ sys_call_table:
.long sys_pwritev
.long sys_rt_tgsigqueueinfo /* 335 */
.long sys_perf_event_open
+ .long sys_setns
syscall_table_size = (. - sys_call_table)
diff --git a/arch/h8300/include/asm/unistd.h b/arch/h8300/include/asm/unistd.h
index 50f2c5a36591..2c3f8e60b1e0 100644
--- a/arch/h8300/include/asm/unistd.h
+++ b/arch/h8300/include/asm/unistd.h
@@ -325,10 +325,11 @@
#define __NR_move_pages 317
#define __NR_getcpu 318
#define __NR_epoll_pwait 319
+#define __NR_setns 320
#ifdef __KERNEL__
-#define NR_syscalls 320
+#define NR_syscalls 321
#define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_OLD_READDIR
diff --git a/arch/h8300/kernel/syscalls.S b/arch/h8300/kernel/syscalls.S
index faefaff7d43d..f4b2e67bcc34 100644
--- a/arch/h8300/kernel/syscalls.S
+++ b/arch/h8300/kernel/syscalls.S
@@ -333,6 +333,7 @@ SYMBOL_NAME_LABEL(sys_call_table)
.long SYMBOL_NAME(sys_ni_syscall) /* sys_move_pages */
.long SYMBOL_NAME(sys_getcpu)
.long SYMBOL_NAME(sys_ni_syscall) /* sys_epoll_pwait */
+ .long SYMBOL_NAME(sys_setns) /* 320 */
.macro call_sp addr
mov.l #SYMBOL_NAME(\addr),er6
diff --git a/arch/ia64/include/asm/unistd.h b/arch/ia64/include/asm/unistd.h
index 404d037c5e10..1cf0f496f744 100644
--- a/arch/ia64/include/asm/unistd.h
+++ b/arch/ia64/include/asm/unistd.h
@@ -319,11 +319,12 @@
#define __NR_open_by_handle_at 1327
#define __NR_clock_adjtime 1328
#define __NR_syncfs 1329
+#define __NR_setns 1330
#ifdef __KERNEL__
-#define NR_syscalls 306 /* length of syscall table */
+#define NR_syscalls 307 /* length of syscall table */
/*
* The following defines stop scripts/checksyscalls.sh from complaining about
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index 6de2e23b3636..9ca80193cd4e 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -1775,6 +1775,7 @@ sys_call_table:
data8 sys_open_by_handle_at
data8 sys_clock_adjtime
data8 sys_syncfs
+ data8 sys_setns // 1330
.org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
diff --git a/arch/m32r/include/asm/unistd.h b/arch/m32r/include/asm/unistd.h
index c70545689da8..3e1db561aacc 100644
--- a/arch/m32r/include/asm/unistd.h
+++ b/arch/m32r/include/asm/unistd.h
@@ -330,10 +330,11 @@
/* #define __NR_timerfd 322 removed */
#define __NR_eventfd 323
#define __NR_fallocate 324
+#define __NR_setns 325
#ifdef __KERNEL__
-#define NR_syscalls 325
+#define NR_syscalls 326
#define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_STAT64
diff --git a/arch/m32r/kernel/syscall_table.S b/arch/m32r/kernel/syscall_table.S
index 60536e271233..528f2e6ad064 100644
--- a/arch/m32r/kernel/syscall_table.S
+++ b/arch/m32r/kernel/syscall_table.S
@@ -324,3 +324,4 @@ ENTRY(sys_call_table)
.long sys_ni_syscall
.long sys_eventfd
.long sys_fallocate
+ .long sys_setns /* 325 */
diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h
index f3b649de2a1b..43f984e93970 100644
--- a/arch/m68k/include/asm/unistd.h
+++ b/arch/m68k/include/asm/unistd.h
@@ -349,10 +349,11 @@
#define __NR_open_by_handle_at 341
#define __NR_clock_adjtime 342
#define __NR_syncfs 343
+#define __NR_setns 344
#ifdef __KERNEL__
-#define NR_syscalls 344
+#define NR_syscalls 345
#define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_OLD_READDIR
diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S
index 6f7b09122a00..00d1452f9571 100644
--- a/arch/m68k/kernel/syscalltable.S
+++ b/arch/m68k/kernel/syscalltable.S
@@ -364,4 +364,5 @@ ENTRY(sys_call_table)
.long sys_open_by_handle_at
.long sys_clock_adjtime
.long sys_syncfs
+ .long sys_setns
diff --git a/arch/microblaze/include/asm/unistd.h b/arch/microblaze/include/asm/unistd.h
index 30edd61a6b8f..7d7092b917ac 100644
--- a/arch/microblaze/include/asm/unistd.h
+++ b/arch/microblaze/include/asm/unistd.h
@@ -390,8 +390,9 @@
#define __NR_open_by_handle_at 372
#define __NR_clock_adjtime 373
#define __NR_syncfs 374
+#define __NR_setns 375
-#define __NR_syscalls 375
+#define __NR_syscalls 376
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
diff --git a/arch/microblaze/kernel/prom.c b/arch/microblaze/kernel/prom.c
index 00ee90f08343..b15cc219b1d9 100644
--- a/arch/microblaze/kernel/prom.c
+++ b/arch/microblaze/kernel/prom.c
@@ -130,7 +130,7 @@ void __init early_init_devtree(void *params)
* device-tree, including the platform type, initrd location and
* size, TCE reserve, and more ...
*/
- of_scan_flat_dt(early_init_dt_scan_chosen, NULL);
+ of_scan_flat_dt(early_init_dt_scan_chosen, cmd_line);
/* Scan memory nodes and rebuild MEMBLOCKs */
memblock_init();
diff --git a/arch/microblaze/kernel/syscall_table.S b/arch/microblaze/kernel/syscall_table.S
index 85cea81d1ca1..d915a122c865 100644
--- a/arch/microblaze/kernel/syscall_table.S
+++ b/arch/microblaze/kernel/syscall_table.S
@@ -379,3 +379,4 @@ ENTRY(sys_call_table)
.long sys_open_by_handle_at
.long sys_clock_adjtime
.long sys_syncfs
+ .long sys_setns /* 375 */
diff --git a/arch/mips/cavium-octeon/flash_setup.c b/arch/mips/cavium-octeon/flash_setup.c
index 008f657116eb..0ee02f5e51cc 100644
--- a/arch/mips/cavium-octeon/flash_setup.c
+++ b/arch/mips/cavium-octeon/flash_setup.c
@@ -16,7 +16,6 @@
static struct map_info flash_map;
static struct mtd_info *mymtd;
-#ifdef CONFIG_MTD_PARTITIONS
static int nr_parts;
static struct mtd_partition *parts;
static const char *part_probe_types[] = {
@@ -26,7 +25,6 @@ static const char *part_probe_types[] = {
#endif
NULL
};
-#endif
/**
* Module/ driver initialization.
@@ -63,17 +61,10 @@ static int __init flash_init(void)
if (mymtd) {
mymtd->owner = THIS_MODULE;
-#ifdef CONFIG_MTD_PARTITIONS
nr_parts = parse_mtd_partitions(mymtd,
part_probe_types,
&parts, 0);
- if (nr_parts > 0)
- add_mtd_partitions(mymtd, parts, nr_parts);
- else
- add_mtd_device(mymtd);
-#else
- add_mtd_device(mymtd);
-#endif
+ mtd_device_register(mymtd, parts, nr_parts);
} else {
pr_err("Failed to register MTD device for flash\n");
}
diff --git a/arch/mips/include/asm/prom.h b/arch/mips/include/asm/prom.h
index f29b862d9db3..857d9b7858ad 100644
--- a/arch/mips/include/asm/prom.h
+++ b/arch/mips/include/asm/prom.h
@@ -14,9 +14,6 @@
#ifdef CONFIG_OF
#include <asm/bootinfo.h>
-/* which is compatible with the flattened device tree (FDT) */
-#define cmd_line arcs_cmdline
-
extern int early_init_dt_scan_memory_arch(unsigned long node,
const char *uname, int depth, void *data);
diff --git a/arch/mips/include/asm/suspend.h b/arch/mips/include/asm/suspend.h
index 294cdb66c5fc..3adac3b53d19 100644
--- a/arch/mips/include/asm/suspend.h
+++ b/arch/mips/include/asm/suspend.h
@@ -1,8 +1,6 @@
#ifndef __ASM_SUSPEND_H
#define __ASM_SUSPEND_H
-static inline int arch_prepare_suspend(void) { return 0; }
-
/* References to section boundaries */
extern const void __nosave_begin, __nosave_end;
diff --git a/arch/mips/include/asm/unistd.h b/arch/mips/include/asm/unistd.h
index fa2e37ea2be1..6fcfc480e9d0 100644
--- a/arch/mips/include/asm/unistd.h
+++ b/arch/mips/include/asm/unistd.h
@@ -363,16 +363,17 @@
#define __NR_open_by_handle_at (__NR_Linux + 340)
#define __NR_clock_adjtime (__NR_Linux + 341)
#define __NR_syncfs (__NR_Linux + 342)
+#define __NR_setns (__NR_Linux + 343)
/*
* Offset of the last Linux o32 flavoured syscall
*/
-#define __NR_Linux_syscalls 342
+#define __NR_Linux_syscalls 343
#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
#define __NR_O32_Linux 4000
-#define __NR_O32_Linux_syscalls 342
+#define __NR_O32_Linux_syscalls 343
#if _MIPS_SIM == _MIPS_SIM_ABI64
@@ -682,16 +683,17 @@
#define __NR_open_by_handle_at (__NR_Linux + 299)
#define __NR_clock_adjtime (__NR_Linux + 300)
#define __NR_syncfs (__NR_Linux + 301)
+#define __NR_setns (__NR_Linux + 302)
/*
* Offset of the last Linux 64-bit flavoured syscall
*/
-#define __NR_Linux_syscalls 301
+#define __NR_Linux_syscalls 302
#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
#define __NR_64_Linux 5000
-#define __NR_64_Linux_syscalls 301
+#define __NR_64_Linux_syscalls 302
#if _MIPS_SIM == _MIPS_SIM_NABI32
@@ -1006,16 +1008,17 @@
#define __NR_open_by_handle_at (__NR_Linux + 304)
#define __NR_clock_adjtime (__NR_Linux + 305)
#define __NR_syncfs (__NR_Linux + 306)
+#define __NR_setns (__NR_Linux + 307)
/*
* Offset of the last N32 flavoured syscall
*/
-#define __NR_Linux_syscalls 306
+#define __NR_Linux_syscalls 307
#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
#define __NR_N32_Linux 6000
-#define __NR_N32_Linux_syscalls 306
+#define __NR_N32_Linux_syscalls 307
#ifdef __KERNEL__
diff --git a/arch/mips/kernel/prom.c b/arch/mips/kernel/prom.c
index a19811e98a41..5b7eade41fa3 100644
--- a/arch/mips/kernel/prom.c
+++ b/arch/mips/kernel/prom.c
@@ -83,7 +83,8 @@ void __init early_init_devtree(void *params)
* device-tree, including the platform type, initrd location and
* size, and more ...
*/
- of_scan_flat_dt(early_init_dt_scan_chosen, NULL);
+ of_scan_flat_dt(early_init_dt_scan_chosen, arcs_cmdline);
+
/* Scan memory nodes */
of_scan_flat_dt(early_init_dt_scan_root, NULL);
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index 7a8e1dd7f6f2..99e656e425f3 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -589,6 +589,7 @@ einval: li v0, -ENOSYS
sys sys_open_by_handle_at 3 /* 4340 */
sys sys_clock_adjtime 2
sys sys_syncfs 1
+ sys sys_setns 2
.endm
/* We pre-compute the number of _instruction_ bytes needed to
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index 2d31c83224f9..fb0575f47f3d 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -428,4 +428,5 @@ sys_call_table:
PTR sys_open_by_handle_at
PTR sys_clock_adjtime /* 5300 */
PTR sys_syncfs
+ PTR sys_setns
.size sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 38a0503b9a4a..4de0c5534e73 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -428,4 +428,5 @@ EXPORT(sysn32_call_table)
PTR sys_open_by_handle_at
PTR compat_sys_clock_adjtime /* 6305 */
PTR sys_syncfs
+ PTR sys_setns
.size sysn32_call_table,.-sysn32_call_table
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index 91ea5e4041dd..4a387de08bfa 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -546,4 +546,5 @@ sys_call_table:
PTR compat_sys_open_by_handle_at /* 4340 */
PTR compat_sys_clock_adjtime
PTR sys_syncfs
+ PTR sys_setns
.size sys_call_table,.-sys_call_table
diff --git a/arch/mips/txx9/generic/setup.c b/arch/mips/txx9/generic/setup.c
index 812816c45662..ec38e00b2559 100644
--- a/arch/mips/txx9/generic/setup.c
+++ b/arch/mips/txx9/generic/setup.c
@@ -639,7 +639,6 @@ void __init txx9_physmap_flash_init(int no, unsigned long addr,
.flags = IORESOURCE_MEM,
};
struct platform_device *pdev;
-#ifdef CONFIG_MTD_PARTITIONS
static struct mtd_partition parts[2];
struct physmap_flash_data pdata_part;
@@ -658,7 +657,7 @@ void __init txx9_physmap_flash_init(int no, unsigned long addr,
pdata_part.parts = parts;
pdata = &pdata_part;
}
-#endif
+
pdev = platform_device_alloc("physmap-flash", no);
if (!pdev ||
platform_device_add_resources(pdev, &res, 1) ||
diff --git a/arch/mn10300/include/asm/unistd.h b/arch/mn10300/include/asm/unistd.h
index 9d056f515929..9051f921cbc7 100644
--- a/arch/mn10300/include/asm/unistd.h
+++ b/arch/mn10300/include/asm/unistd.h
@@ -349,10 +349,11 @@
#define __NR_rt_tgsigqueueinfo 336
#define __NR_perf_event_open 337
#define __NR_recvmmsg 338
+#define __NR_setns 339
#ifdef __KERNEL__
-#define NR_syscalls 339
+#define NR_syscalls 340
/*
* specify the deprecated syscalls we want to support on this arch
diff --git a/arch/mn10300/kernel/entry.S b/arch/mn10300/kernel/entry.S
index fb93ad720b82..ae435e1d5669 100644
--- a/arch/mn10300/kernel/entry.S
+++ b/arch/mn10300/kernel/entry.S
@@ -759,6 +759,7 @@ ENTRY(sys_call_table)
.long sys_rt_tgsigqueueinfo
.long sys_perf_event_open
.long sys_recvmmsg
+ .long sys_setns
nr_syscalls=(.-sys_call_table)/4
diff --git a/arch/parisc/include/asm/unistd.h b/arch/parisc/include/asm/unistd.h
index 9cbc2c3bf630..3392de3e7be0 100644
--- a/arch/parisc/include/asm/unistd.h
+++ b/arch/parisc/include/asm/unistd.h
@@ -820,8 +820,9 @@
#define __NR_name_to_handle_at (__NR_Linux + 325)
#define __NR_open_by_handle_at (__NR_Linux + 326)
#define __NR_syncfs (__NR_Linux + 327)
+#define __NR_setns (__NR_Linux + 328)
-#define __NR_Linux_syscalls (__NR_syncfs + 1)
+#define __NR_Linux_syscalls (__NR_setns + 1)
#define __IGNORE_select /* newselect */
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
index a5b02ce4d41e..34a4f5a2fffb 100644
--- a/arch/parisc/kernel/syscall_table.S
+++ b/arch/parisc/kernel/syscall_table.S
@@ -426,6 +426,7 @@
ENTRY_SAME(name_to_handle_at) /* 325 */
ENTRY_COMP(open_by_handle_at)
ENTRY_SAME(syncfs)
+ ENTRY_SAME(setns)
/* Nothing yet */
diff --git a/arch/powerpc/include/asm/fsl_lbc.h b/arch/powerpc/include/asm/fsl_lbc.h
index 5c1bf3466749..8a0b5ece8f76 100644
--- a/arch/powerpc/include/asm/fsl_lbc.h
+++ b/arch/powerpc/include/asm/fsl_lbc.h
@@ -157,6 +157,8 @@ struct fsl_lbc_regs {
#define LBCR_EPAR_SHIFT 16
#define LBCR_BMT 0x0000FF00
#define LBCR_BMT_SHIFT 8
+#define LBCR_BMTPS 0x0000000F
+#define LBCR_BMTPS_SHIFT 0
#define LBCR_INIT 0x00040000
__be32 lcrr; /**< Clock Ratio Register */
#define LCRR_DBYP 0x80000000
diff --git a/arch/powerpc/include/asm/rio.h b/arch/powerpc/include/asm/rio.h
index 0018bf80cb25..d902abd33995 100644
--- a/arch/powerpc/include/asm/rio.h
+++ b/arch/powerpc/include/asm/rio.h
@@ -14,5 +14,10 @@
#define ASM_PPC_RIO_H
extern void platform_rio_init(void);
+#ifdef CONFIG_RAPIDIO
+extern int fsl_rio_mcheck_exception(struct pt_regs *);
+#else
+static inline int fsl_rio_mcheck_exception(struct pt_regs *regs) {return 0; }
+#endif
#endif /* ASM_PPC_RIO_H */
diff --git a/arch/powerpc/include/asm/suspend.h b/arch/powerpc/include/asm/suspend.h
deleted file mode 100644
index c6efc3466aa6..000000000000
--- a/arch/powerpc/include/asm/suspend.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_POWERPC_SUSPEND_H
-#define __ASM_POWERPC_SUSPEND_H
-
-static inline int arch_prepare_suspend(void) { return 0; }
-
-#endif /* __ASM_POWERPC_SUSPEND_H */
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index 8489d372077f..f6736b7da463 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -353,3 +353,4 @@ COMPAT_SYS_SPU(open_by_handle_at)
COMPAT_SYS_SPU(clock_adjtime)
SYSCALL_SPU(syncfs)
COMPAT_SYS_SPU(sendmmsg)
+SYSCALL_SPU(setns)
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
index 6d23c8193caa..b8b3f599362b 100644
--- a/arch/powerpc/include/asm/unistd.h
+++ b/arch/powerpc/include/asm/unistd.h
@@ -372,10 +372,11 @@
#define __NR_clock_adjtime 347
#define __NR_syncfs 348
#define __NR_sendmmsg 349
+#define __NR_setns 350
#ifdef __KERNEL__
-#define __NR_syscalls 350
+#define __NR_syscalls 351
#define __NR__exit __NR_exit
#define NR_syscalls __NR_syscalls
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 48aeb55faae9..f2c906b1d8d3 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -694,7 +694,7 @@ void __init early_init_devtree(void *params)
* device-tree, including the platform type, initrd location and
* size, TCE reserve, and more ...
*/
- of_scan_flat_dt(early_init_dt_scan_chosen_ppc, NULL);
+ of_scan_flat_dt(early_init_dt_scan_chosen_ppc, cmd_line);
/* Scan memory nodes and rebuild MEMBLOCKs */
memblock_init();
diff --git a/arch/powerpc/kernel/swsusp.c b/arch/powerpc/kernel/swsusp.c
index 560c96119501..aa17b76dd427 100644
--- a/arch/powerpc/kernel/swsusp.c
+++ b/arch/powerpc/kernel/swsusp.c
@@ -10,7 +10,6 @@
*/
#include <linux/sched.h>
-#include <asm/suspend.h>
#include <asm/system.h>
#include <asm/current.h>
#include <asm/mmu_context.h>
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index b13306b0d925..0ff4ab98d50c 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -55,6 +55,7 @@
#endif
#include <asm/kexec.h>
#include <asm/ppc-opcode.h>
+#include <asm/rio.h>
#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
int (*__debugger)(struct pt_regs *regs) __read_mostly;
@@ -424,6 +425,12 @@ int machine_check_e500mc(struct pt_regs *regs)
unsigned long reason = mcsr;
int recoverable = 1;
+ if (reason & MCSR_BUS_RBERR) {
+ recoverable = fsl_rio_mcheck_exception(regs);
+ if (recoverable == 1)
+ goto silent_out;
+ }
+
printk("Machine check in kernel mode.\n");
printk("Caused by (from MCSR=%lx): ", reason);
@@ -499,6 +506,7 @@ int machine_check_e500mc(struct pt_regs *regs)
reason & MCSR_MEA ? "Effective" : "Physical", addr);
}
+silent_out:
mtspr(SPRN_MCSR, mcsr);
return mfspr(SPRN_MCSR) == 0 && recoverable;
}
@@ -507,6 +515,11 @@ int machine_check_e500(struct pt_regs *regs)
{
unsigned long reason = get_mc_reason(regs);
+ if (reason & MCSR_BUS_RBERR) {
+ if (fsl_rio_mcheck_exception(regs))
+ return 1;
+ }
+
printk("Machine check in kernel mode.\n");
printk("Caused by (from MCSR=%lx): ", reason);
diff --git a/arch/powerpc/sysdev/fsl_lbc.c b/arch/powerpc/sysdev/fsl_lbc.c
index 4fcb5a4e60dd..0608b1657da4 100644
--- a/arch/powerpc/sysdev/fsl_lbc.c
+++ b/arch/powerpc/sysdev/fsl_lbc.c
@@ -184,7 +184,8 @@ int fsl_upm_run_pattern(struct fsl_upm *upm, void __iomem *io_base, u32 mar)
}
EXPORT_SYMBOL(fsl_upm_run_pattern);
-static int __devinit fsl_lbc_ctrl_init(struct fsl_lbc_ctrl *ctrl)
+static int __devinit fsl_lbc_ctrl_init(struct fsl_lbc_ctrl *ctrl,
+ struct device_node *node)
{
struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
@@ -198,6 +199,10 @@ static int __devinit fsl_lbc_ctrl_init(struct fsl_lbc_ctrl *ctrl)
/* Enable interrupts for any detected events */
out_be32(&lbc->lteir, LTEIR_ENABLE);
+ /* Set the monitor timeout value to the maximum for erratum A001 */
+ if (of_device_is_compatible(node, "fsl,elbc"))
+ clrsetbits_be32(&lbc->lbcr, LBCR_BMT, LBCR_BMTPS);
+
return 0;
}
@@ -304,7 +309,7 @@ static int __devinit fsl_lbc_ctrl_probe(struct platform_device *dev)
fsl_lbc_ctrl_dev->dev = &dev->dev;
- ret = fsl_lbc_ctrl_init(fsl_lbc_ctrl_dev);
+ ret = fsl_lbc_ctrl_init(fsl_lbc_ctrl_dev, dev->dev.of_node);
if (ret < 0)
goto err;
diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c
index 49798532b477..5b206a2fe17c 100644
--- a/arch/powerpc/sysdev/fsl_rio.c
+++ b/arch/powerpc/sysdev/fsl_rio.c
@@ -10,7 +10,7 @@
* - Added Port-Write message handling
* - Added Machine Check exception handling
*
- * Copyright (C) 2007, 2008 Freescale Semiconductor, Inc.
+ * Copyright (C) 2007, 2008, 2010 Freescale Semiconductor, Inc.
* Zhang Wei <wei.zhang@freescale.com>
*
* Copyright 2005 MontaVista Software, Inc.
@@ -47,15 +47,33 @@
#define IRQ_RIO_RX(m) (((struct rio_priv *)(m->priv))->rxirq)
#define IRQ_RIO_PW(m) (((struct rio_priv *)(m->priv))->pwirq)
+#define IPWSR_CLEAR 0x98
+#define OMSR_CLEAR 0x1cb3
+#define IMSR_CLEAR 0x491
+#define IDSR_CLEAR 0x91
+#define ODSR_CLEAR 0x1c00
+#define LTLEECSR_ENABLE_ALL 0xFFC000FC
+#define ESCSR_CLEAR 0x07120204
+
+#define RIO_PORT1_EDCSR 0x0640
+#define RIO_PORT2_EDCSR 0x0680
+#define RIO_PORT1_IECSR 0x10130
+#define RIO_PORT2_IECSR 0x101B0
+#define RIO_IM0SR 0x13064
+#define RIO_IM1SR 0x13164
+#define RIO_OM0SR 0x13004
+#define RIO_OM1SR 0x13104
+
#define RIO_ATMU_REGS_OFFSET 0x10c00
#define RIO_P_MSG_REGS_OFFSET 0x11000
#define RIO_S_MSG_REGS_OFFSET 0x13000
#define RIO_GCCSR 0x13c
#define RIO_ESCSR 0x158
+#define RIO_PORT2_ESCSR 0x178
#define RIO_CCSR 0x15c
#define RIO_LTLEDCSR 0x0608
-#define RIO_LTLEDCSR_IER 0x80000000
-#define RIO_LTLEDCSR_PRT 0x01000000
+#define RIO_LTLEDCSR_IER 0x80000000
+#define RIO_LTLEDCSR_PRT 0x01000000
#define RIO_LTLEECSR 0x060c
#define RIO_EPWISR 0x10010
#define RIO_ISR_AACR 0x10120
@@ -88,7 +106,10 @@
#define RIO_IPWSR_PWD 0x00000008
#define RIO_IPWSR_PWB 0x00000004
-#define RIO_EPWISR_PINT 0x80000000
+/* EPWISR Error match value */
+#define RIO_EPWISR_PINT1 0x80000000
+#define RIO_EPWISR_PINT2 0x40000000
+#define RIO_EPWISR_MU 0x00000002
#define RIO_EPWISR_PW 0x00000001
#define RIO_MSG_DESC_SIZE 32
@@ -260,9 +281,7 @@ struct rio_priv {
static void __iomem *rio_regs_win;
#ifdef CONFIG_E500
-static int (*saved_mcheck_exception)(struct pt_regs *regs);
-
-static int fsl_rio_mcheck_exception(struct pt_regs *regs)
+int fsl_rio_mcheck_exception(struct pt_regs *regs)
{
const struct exception_table_entry *entry = NULL;
unsigned long reason = mfspr(SPRN_MCSR);
@@ -284,11 +303,9 @@ static int fsl_rio_mcheck_exception(struct pt_regs *regs)
}
}
- if (saved_mcheck_exception)
- return saved_mcheck_exception(regs);
- else
- return cur_cpu_spec->machine_check(regs);
+ return 0;
}
+EXPORT_SYMBOL_GPL(fsl_rio_mcheck_exception);
#endif
/**
@@ -1064,6 +1081,40 @@ static int fsl_rio_doorbell_init(struct rio_mport *mport)
return rc;
}
+static void port_error_handler(struct rio_mport *port, int offset)
+{
+ /*XXX: Error recovery is not implemented, we just clear errors */
+ out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR), 0);
+
+ if (offset == 0) {
+ out_be32((u32 *)(rio_regs_win + RIO_PORT1_EDCSR), 0);
+ out_be32((u32 *)(rio_regs_win + RIO_PORT1_IECSR), 0);
+ out_be32((u32 *)(rio_regs_win + RIO_ESCSR), ESCSR_CLEAR);
+ } else {
+ out_be32((u32 *)(rio_regs_win + RIO_PORT2_EDCSR), 0);
+ out_be32((u32 *)(rio_regs_win + RIO_PORT2_IECSR), 0);
+ out_be32((u32 *)(rio_regs_win + RIO_PORT2_ESCSR), ESCSR_CLEAR);
+ }
+}
+
+static void msg_unit_error_handler(struct rio_mport *port)
+{
+ struct rio_priv *priv = port->priv;
+
+ /*XXX: Error recovery is not implemented, we just clear errors */
+ out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR), 0);
+
+ out_be32((u32 *)(rio_regs_win + RIO_IM0SR), IMSR_CLEAR);
+ out_be32((u32 *)(rio_regs_win + RIO_IM1SR), IMSR_CLEAR);
+ out_be32((u32 *)(rio_regs_win + RIO_OM0SR), OMSR_CLEAR);
+ out_be32((u32 *)(rio_regs_win + RIO_OM1SR), OMSR_CLEAR);
+
+ out_be32(&priv->msg_regs->odsr, ODSR_CLEAR);
+ out_be32(&priv->msg_regs->dsr, IDSR_CLEAR);
+
+ out_be32(&priv->msg_regs->pwsr, IPWSR_CLEAR);
+}
+
/**
* fsl_rio_port_write_handler - MPC85xx port write interrupt handler
* @irq: Linux interrupt number
@@ -1144,10 +1195,22 @@ fsl_rio_port_write_handler(int irq, void *dev_instance)
}
pw_done:
- if (epwisr & RIO_EPWISR_PINT) {
+ if (epwisr & RIO_EPWISR_PINT1) {
+ tmp = in_be32(priv->regs_win + RIO_LTLEDCSR);
+ pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp);
+ port_error_handler(port, 0);
+ }
+
+ if (epwisr & RIO_EPWISR_PINT2) {
tmp = in_be32(priv->regs_win + RIO_LTLEDCSR);
pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp);
- out_be32(priv->regs_win + RIO_LTLEDCSR, 0);
+ port_error_handler(port, 1);
+ }
+
+ if (epwisr & RIO_EPWISR_MU) {
+ tmp = in_be32(priv->regs_win + RIO_LTLEDCSR);
+ pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp);
+ msg_unit_error_handler(port);
}
return IRQ_HANDLED;
@@ -1258,12 +1321,14 @@ static int fsl_rio_port_write_init(struct rio_mport *mport)
/* Hook up port-write handler */
- rc = request_irq(IRQ_RIO_PW(mport), fsl_rio_port_write_handler, 0,
- "port-write", (void *)mport);
+ rc = request_irq(IRQ_RIO_PW(mport), fsl_rio_port_write_handler,
+ IRQF_SHARED, "port-write", (void *)mport);
if (rc < 0) {
pr_err("MPC85xx RIO: unable to request inbound doorbell irq");
goto err_out;
}
+ /* Enable Error Interrupt */
+ out_be32((u32 *)(rio_regs_win + RIO_LTLEECSR), LTLEECSR_ENABLE_ALL);
INIT_WORK(&priv->pw_work, fsl_pw_dpc);
spin_lock_init(&priv->pw_fifo_lock);
@@ -1538,11 +1603,6 @@ int fsl_rio_setup(struct platform_device *dev)
fsl_rio_doorbell_init(port);
fsl_rio_port_write_init(port);
-#ifdef CONFIG_E500
- saved_mcheck_exception = ppc_md.machine_check_exception;
- ppc_md.machine_check_exception = fsl_rio_mcheck_exception;
-#endif
-
return 0;
err:
iounmap(priv->regs_win);
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index ff2d2371b2e9..9fab2aa9c2c8 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -2,7 +2,7 @@ config MMU
def_bool y
config ZONE_DMA
- def_bool y if 64BIT
+ def_bool y
config LOCKDEP_SUPPORT
def_bool y
diff --git a/arch/s390/appldata/appldata_mem.c b/arch/s390/appldata/appldata_mem.c
index e43fe7537031..f7d3dc555bdb 100644
--- a/arch/s390/appldata/appldata_mem.c
+++ b/arch/s390/appldata/appldata_mem.c
@@ -92,9 +92,7 @@ static void appldata_get_mem_data(void *data)
mem_data->pswpin = ev[PSWPIN];
mem_data->pswpout = ev[PSWPOUT];
mem_data->pgalloc = ev[PGALLOC_NORMAL];
-#ifdef CONFIG_ZONE_DMA
mem_data->pgalloc += ev[PGALLOC_DMA];
-#endif
mem_data->pgfault = ev[PGFAULT];
mem_data->pgmajfault = ev[PGMAJFAULT];
diff --git a/arch/s390/include/asm/delay.h b/arch/s390/include/asm/delay.h
index 8a096b83f51f..0e3b35f96be1 100644
--- a/arch/s390/include/asm/delay.h
+++ b/arch/s390/include/asm/delay.h
@@ -14,10 +14,12 @@
#ifndef _S390_DELAY_H
#define _S390_DELAY_H
-extern void __udelay(unsigned long long usecs);
-extern void udelay_simple(unsigned long long usecs);
-extern void __delay(unsigned long loops);
+void __ndelay(unsigned long long nsecs);
+void __udelay(unsigned long long usecs);
+void udelay_simple(unsigned long long usecs);
+void __delay(unsigned long loops);
+#define ndelay(n) __ndelay((unsigned long long) (n))
#define udelay(n) __udelay((unsigned long long) (n))
#define mdelay(n) __udelay((unsigned long long) (n) * 1000)
diff --git a/arch/s390/include/asm/irq.h b/arch/s390/include/asm/irq.h
index 1544b90bd6d6..ba7b01c726a3 100644
--- a/arch/s390/include/asm/irq.h
+++ b/arch/s390/include/asm/irq.h
@@ -2,6 +2,7 @@
#define _ASM_IRQ_H
#include <linux/hardirq.h>
+#include <linux/types.h>
enum interruption_class {
EXTERNAL_INTERRUPT,
@@ -31,4 +32,11 @@ enum interruption_class {
NR_IRQS,
};
+typedef void (*ext_int_handler_t)(unsigned int, unsigned int, unsigned long);
+
+int register_external_interrupt(u16 code, ext_int_handler_t handler);
+int unregister_external_interrupt(u16 code, ext_int_handler_t handler);
+void service_subclass_irq_register(void);
+void service_subclass_irq_unregister(void);
+
#endif /* _ASM_IRQ_H */
diff --git a/arch/s390/include/asm/s390_ext.h b/arch/s390/include/asm/s390_ext.h
deleted file mode 100644
index 080876d5f196..000000000000
--- a/arch/s390/include/asm/s390_ext.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/*
- * Copyright IBM Corp. 1999,2010
- * Author(s): Holger Smolinski <Holger.Smolinski@de.ibm.com>,
- * Martin Schwidefsky <schwidefsky@de.ibm.com>,
- */
-
-#ifndef _S390_EXTINT_H
-#define _S390_EXTINT_H
-
-#include <linux/types.h>
-
-typedef void (*ext_int_handler_t)(unsigned int, unsigned int, unsigned long);
-
-int register_external_interrupt(__u16 code, ext_int_handler_t handler);
-int unregister_external_interrupt(__u16 code, ext_int_handler_t handler);
-
-#endif /* _S390_EXTINT_H */
diff --git a/arch/s390/include/asm/suspend.h b/arch/s390/include/asm/suspend.h
deleted file mode 100644
index dc75c616eafe..000000000000
--- a/arch/s390/include/asm/suspend.h
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef __ASM_S390_SUSPEND_H
-#define __ASM_S390_SUSPEND_H
-
-static inline int arch_prepare_suspend(void)
-{
- return 0;
-}
-
-#endif
-
diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h
index c5338834ddbd..005d77d8ae2a 100644
--- a/arch/s390/include/asm/topology.h
+++ b/arch/s390/include/asm/topology.h
@@ -7,7 +7,7 @@
extern unsigned char cpu_core_id[NR_CPUS];
extern cpumask_t cpu_core_map[NR_CPUS];
-static inline const struct cpumask *cpu_coregroup_mask(unsigned int cpu)
+static inline const struct cpumask *cpu_coregroup_mask(int cpu)
{
return &cpu_core_map[cpu];
}
@@ -21,7 +21,7 @@ static inline const struct cpumask *cpu_coregroup_mask(unsigned int cpu)
extern unsigned char cpu_book_id[NR_CPUS];
extern cpumask_t cpu_book_map[NR_CPUS];
-static inline const struct cpumask *cpu_book_mask(unsigned int cpu)
+static inline const struct cpumask *cpu_book_mask(int cpu)
{
return &cpu_book_map[cpu];
}
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index 2d9ea11f919a..2b23885e81e9 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -49,12 +49,13 @@
#define segment_eq(a,b) ((a).ar4 == (b).ar4)
+#define __access_ok(addr, size) \
+({ \
+ __chk_user_ptr(addr); \
+ 1; \
+})
-static inline int __access_ok(const void __user *addr, unsigned long size)
-{
- return 1;
-}
-#define access_ok(type,addr,size) __access_ok(addr,size)
+#define access_ok(type, addr, size) __access_ok(addr, size)
/*
* The exception table consists of pairs of addresses: the first is the
diff --git a/arch/s390/include/asm/unistd.h b/arch/s390/include/asm/unistd.h
index 9208e69245a0..404bdb9671b4 100644
--- a/arch/s390/include/asm/unistd.h
+++ b/arch/s390/include/asm/unistd.h
@@ -276,7 +276,8 @@
#define __NR_open_by_handle_at 336
#define __NR_clock_adjtime 337
#define __NR_syncfs 338
-#define NR_syscalls 339
+#define __NR_setns 339
+#define NR_syscalls 340
/*
* There are some system calls that are not present on 64 bit, some
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 5ff15dacb571..df3732249baa 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -20,10 +20,10 @@ CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w
-obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o \
- processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \
- s390_ext.o debug.o irq.o ipl.o dis.o diag.o mem_detect.o \
- vdso.o vtime.o sysinfo.o nmi.o sclp.o jump_label.o
+obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o vtime.o \
+ processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o \
+ debug.o irq.o ipl.o dis.o diag.o mem_detect.o sclp.o vdso.o \
+ sysinfo.o jump_label.o
obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o)
obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o)
diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S
index 1dc96ea08fa8..1f5eb789c3a7 100644
--- a/arch/s390/kernel/compat_wrapper.S
+++ b/arch/s390/kernel/compat_wrapper.S
@@ -1904,3 +1904,9 @@ compat_sys_clock_adjtime_wrapper:
sys_syncfs_wrapper:
lgfr %r2,%r2 # int
jg sys_syncfs
+
+ .globl sys_setns_wrapper
+sys_setns_wrapper:
+ lgfr %r2,%r2 # int
+ lgfr %r3,%r3 # int
+ jg sys_setns
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
index 3d4a78fc1adc..1ca3d1d6a86c 100644
--- a/arch/s390/kernel/dis.c
+++ b/arch/s390/kernel/dis.c
@@ -30,9 +30,9 @@
#include <asm/atomic.h>
#include <asm/mathemu.h>
#include <asm/cpcmd.h>
-#include <asm/s390_ext.h>
#include <asm/lowcore.h>
#include <asm/debug.h>
+#include <asm/irq.h>
#ifndef CONFIG_64BIT
#define ONELONG "%08lx: "
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index e204f9597aaf..e3264f6a9720 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -1,19 +1,28 @@
/*
- * Copyright IBM Corp. 2004,2010
- * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
- * Thomas Spatzier (tspat@de.ibm.com)
+ * Copyright IBM Corp. 2004,2011
+ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
+ * Holger Smolinski <Holger.Smolinski@de.ibm.com>,
+ * Thomas Spatzier <tspat@de.ibm.com>,
*
* This file contains interrupt related functions.
*/
-#include <linux/module.h>
-#include <linux/kernel.h>
#include <linux/kernel_stat.h>
#include <linux/interrupt.h>
#include <linux/seq_file.h>
-#include <linux/cpu.h>
#include <linux/proc_fs.h>
#include <linux/profile.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/ftrace.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/cpu.h>
+#include <asm/irq_regs.h>
+#include <asm/cputime.h>
+#include <asm/lowcore.h>
+#include <asm/irq.h>
+#include "entry.h"
struct irq_class {
char *name;
@@ -82,8 +91,7 @@ int show_interrupts(struct seq_file *p, void *v)
* For compatibilty only. S/390 specific setup of interrupts et al. is done
* much later in init_channel_subsystem().
*/
-void __init
-init_IRQ(void)
+void __init init_IRQ(void)
{
/* nothing... */
}
@@ -134,3 +142,116 @@ void init_irq_proc(void)
create_prof_cpu_mask(root_irq_dir);
}
#endif
+
+/*
+ * ext_int_hash[index] is the start of the list for all external interrupts
+ * that hash to this index. With the current set of external interrupts
+ * (0x1202 external call, 0x1004 cpu timer, 0x2401 hwc console, 0x4000
+ * iucv and 0x2603 pfault) this is always the first element.
+ */
+
+struct ext_int_info {
+ struct ext_int_info *next;
+ ext_int_handler_t handler;
+ u16 code;
+};
+
+static struct ext_int_info *ext_int_hash[256];
+
+static inline int ext_hash(u16 code)
+{
+ return (code + (code >> 9)) & 0xff;
+}
+
+int register_external_interrupt(u16 code, ext_int_handler_t handler)
+{
+ struct ext_int_info *p;
+ int index;
+
+ p = kmalloc(sizeof(*p), GFP_ATOMIC);
+ if (!p)
+ return -ENOMEM;
+ p->code = code;
+ p->handler = handler;
+ index = ext_hash(code);
+ p->next = ext_int_hash[index];
+ ext_int_hash[index] = p;
+ return 0;
+}
+EXPORT_SYMBOL(register_external_interrupt);
+
+int unregister_external_interrupt(u16 code, ext_int_handler_t handler)
+{
+ struct ext_int_info *p, *q;
+ int index;
+
+ index = ext_hash(code);
+ q = NULL;
+ p = ext_int_hash[index];
+ while (p) {
+ if (p->code == code && p->handler == handler)
+ break;
+ q = p;
+ p = p->next;
+ }
+ if (!p)
+ return -ENOENT;
+ if (q)
+ q->next = p->next;
+ else
+ ext_int_hash[index] = p->next;
+ kfree(p);
+ return 0;
+}
+EXPORT_SYMBOL(unregister_external_interrupt);
+
+void __irq_entry do_extint(struct pt_regs *regs, unsigned int ext_int_code,
+ unsigned int param32, unsigned long param64)
+{
+ struct pt_regs *old_regs;
+ unsigned short code;
+ struct ext_int_info *p;
+ int index;
+
+ code = (unsigned short) ext_int_code;
+ old_regs = set_irq_regs(regs);
+ s390_idle_check(regs, S390_lowcore.int_clock,
+ S390_lowcore.async_enter_timer);
+ irq_enter();
+ if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
+ /* Serve timer interrupts first. */
+ clock_comparator_work();
+ kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++;
+ if (code != 0x1004)
+ __get_cpu_var(s390_idle).nohz_delay = 1;
+ index = ext_hash(code);
+ for (p = ext_int_hash[index]; p; p = p->next) {
+ if (likely(p->code == code))
+ p->handler(ext_int_code, param32, param64);
+ }
+ irq_exit();
+ set_irq_regs(old_regs);
+}
+
+static DEFINE_SPINLOCK(sc_irq_lock);
+static int sc_irq_refcount;
+
+void service_subclass_irq_register(void)
+{
+ spin_lock(&sc_irq_lock);
+ if (!sc_irq_refcount)
+ ctl_set_bit(0, 9);
+ sc_irq_refcount++;
+ spin_unlock(&sc_irq_lock);
+}
+EXPORT_SYMBOL(service_subclass_irq_register);
+
+void service_subclass_irq_unregister(void)
+{
+ spin_lock(&sc_irq_lock);
+ sc_irq_refcount--;
+ if (!sc_irq_refcount)
+ ctl_clear_bit(0, 9);
+ spin_unlock(&sc_irq_lock);
+}
+EXPORT_SYMBOL(service_subclass_irq_unregister);
diff --git a/arch/s390/kernel/s390_ext.c b/arch/s390/kernel/s390_ext.c
deleted file mode 100644
index 185029919c4d..000000000000
--- a/arch/s390/kernel/s390_ext.c
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * Copyright IBM Corp. 1999,2010
- * Author(s): Holger Smolinski <Holger.Smolinski@de.ibm.com>,
- * Martin Schwidefsky <schwidefsky@de.ibm.com>,
- */
-
-#include <linux/kernel_stat.h>
-#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/ftrace.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <asm/s390_ext.h>
-#include <asm/irq_regs.h>
-#include <asm/cputime.h>
-#include <asm/lowcore.h>
-#include <asm/irq.h>
-#include "entry.h"
-
-struct ext_int_info {
- struct ext_int_info *next;
- ext_int_handler_t handler;
- __u16 code;
-};
-
-/*
- * ext_int_hash[index] is the start of the list for all external interrupts
- * that hash to this index. With the current set of external interrupts
- * (0x1202 external call, 0x1004 cpu timer, 0x2401 hwc console, 0x4000
- * iucv and 0x2603 pfault) this is always the first element.
- */
-static struct ext_int_info *ext_int_hash[256];
-
-static inline int ext_hash(__u16 code)
-{
- return (code + (code >> 9)) & 0xff;
-}
-
-int register_external_interrupt(__u16 code, ext_int_handler_t handler)
-{
- struct ext_int_info *p;
- int index;
-
- p = kmalloc(sizeof(*p), GFP_ATOMIC);
- if (!p)
- return -ENOMEM;
- p->code = code;
- p->handler = handler;
- index = ext_hash(code);
- p->next = ext_int_hash[index];
- ext_int_hash[index] = p;
- return 0;
-}
-EXPORT_SYMBOL(register_external_interrupt);
-
-int unregister_external_interrupt(__u16 code, ext_int_handler_t handler)
-{
- struct ext_int_info *p, *q;
- int index;
-
- index = ext_hash(code);
- q = NULL;
- p = ext_int_hash[index];
- while (p) {
- if (p->code == code && p->handler == handler)
- break;
- q = p;
- p = p->next;
- }
- if (!p)
- return -ENOENT;
- if (q)
- q->next = p->next;
- else
- ext_int_hash[index] = p->next;
- kfree(p);
- return 0;
-}
-EXPORT_SYMBOL(unregister_external_interrupt);
-
-void __irq_entry do_extint(struct pt_regs *regs, unsigned int ext_int_code,
- unsigned int param32, unsigned long param64)
-{
- struct pt_regs *old_regs;
- unsigned short code;
- struct ext_int_info *p;
- int index;
-
- code = (unsigned short) ext_int_code;
- old_regs = set_irq_regs(regs);
- s390_idle_check(regs, S390_lowcore.int_clock,
- S390_lowcore.async_enter_timer);
- irq_enter();
- if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
- /* Serve timer interrupts first. */
- clock_comparator_work();
- kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++;
- if (code != 0x1004)
- __get_cpu_var(s390_idle).nohz_delay = 1;
- index = ext_hash(code);
- for (p = ext_int_hash[index]; p; p = p->next) {
- if (likely(p->code == code))
- p->handler(ext_int_code, param32, param64);
- }
- irq_exit();
- set_irq_regs(old_regs);
-}
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index f8e85ecbc459..52420d2785b3 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -44,7 +44,6 @@
#include <asm/sigp.h>
#include <asm/pgalloc.h>
#include <asm/irq.h>
-#include <asm/s390_ext.h>
#include <asm/cpcmd.h>
#include <asm/tlbflush.h>
#include <asm/timer.h>
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
index 9c65fd4ddce0..6ee39ef8fe4a 100644
--- a/arch/s390/kernel/syscalls.S
+++ b/arch/s390/kernel/syscalls.S
@@ -347,3 +347,4 @@ SYSCALL(sys_name_to_handle_at,sys_name_to_handle_at,sys_name_to_handle_at_wrappe
SYSCALL(sys_open_by_handle_at,sys_open_by_handle_at,compat_sys_open_by_handle_at_wrapper)
SYSCALL(sys_clock_adjtime,sys_clock_adjtime,compat_sys_clock_adjtime_wrapper)
SYSCALL(sys_syncfs,sys_syncfs,sys_syncfs_wrapper)
+SYSCALL(sys_setns,sys_setns,sys_setns_wrapper)
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index a59557f1fb5f..dff933065ab6 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -41,7 +41,6 @@
#include <linux/kprobes.h>
#include <asm/uaccess.h>
#include <asm/delay.h>
-#include <asm/s390_ext.h>
#include <asm/div64.h>
#include <asm/vdso.h>
#include <asm/irq.h>
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index 2eafb8c7a746..0cd340b72632 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -17,7 +17,6 @@
#include <linux/smp.h>
#include <linux/cpuset.h>
#include <asm/delay.h>
-#include <asm/s390_ext.h>
#define PTF_HORIZONTAL (0UL)
#define PTF_VERTICAL (1UL)
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index b5a4a739b477..a65d2e82f61d 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -39,7 +39,6 @@
#include <asm/atomic.h>
#include <asm/mathemu.h>
#include <asm/cpcmd.h>
-#include <asm/s390_ext.h>
#include <asm/lowcore.h>
#include <asm/debug.h>
#include "entry.h"
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index 5e8ead4b4aba..2d6228f60cd6 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -22,10 +22,10 @@
#include <linux/cpu.h>
#include <linux/kprobes.h>
-#include <asm/s390_ext.h>
#include <asm/timer.h>
#include <asm/irq_regs.h>
#include <asm/cputime.h>
+#include <asm/irq.h>
static DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer);
diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c
index 0f53110e1d09..a65229d91c92 100644
--- a/arch/s390/lib/delay.c
+++ b/arch/s390/lib/delay.c
@@ -12,6 +12,7 @@
#include <linux/module.h>
#include <linux/irqflags.h>
#include <linux/interrupt.h>
+#include <asm/div64.h>
void __delay(unsigned long loops)
{
@@ -116,3 +117,17 @@ void udelay_simple(unsigned long long usecs)
while (get_clock() < end)
cpu_relax();
}
+
+void __ndelay(unsigned long long nsecs)
+{
+ u64 end;
+
+ nsecs <<= 9;
+ do_div(nsecs, 125);
+ end = get_clock() + nsecs;
+ if (nsecs & ~0xfffUL)
+ __udelay(nsecs >> 12);
+ while (get_clock() < end)
+ barrier();
+}
+EXPORT_SYMBOL(__ndelay);
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index a0f9e730f26a..fe103e891e7a 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -34,7 +34,7 @@
#include <asm/asm-offsets.h>
#include <asm/system.h>
#include <asm/pgtable.h>
-#include <asm/s390_ext.h>
+#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/compat.h>
#include "../kernel/entry.h"
@@ -245,9 +245,12 @@ static noinline void do_fault_error(struct pt_regs *regs, long int_code,
do_no_context(regs, int_code, trans_exc_code);
break;
default: /* fault & VM_FAULT_ERROR */
- if (fault & VM_FAULT_OOM)
- pagefault_out_of_memory();
- else if (fault & VM_FAULT_SIGBUS) {
+ if (fault & VM_FAULT_OOM) {
+ if (!(regs->psw.mask & PSW_MASK_PSTATE))
+ do_no_context(regs, int_code, trans_exc_code);
+ else
+ pagefault_out_of_memory();
+ } else if (fault & VM_FAULT_SIGBUS) {
/* Kernel mode? Handle exceptions or die */
if (!(regs->psw.mask & PSW_MASK_PSTATE))
do_no_context(regs, int_code, trans_exc_code);
@@ -277,7 +280,8 @@ static inline int do_exception(struct pt_regs *regs, int access,
struct mm_struct *mm;
struct vm_area_struct *vma;
unsigned long address;
- int fault, write;
+ unsigned int flags;
+ int fault;
if (notify_page_fault(regs))
return 0;
@@ -296,6 +300,10 @@ static inline int do_exception(struct pt_regs *regs, int access,
address = trans_exc_code & __FAIL_ADDR_MASK;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
+ flags = FAULT_FLAG_ALLOW_RETRY;
+ if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
+ flags |= FAULT_FLAG_WRITE;
+retry:
down_read(&mm->mmap_sem);
fault = VM_FAULT_BADMAP;
@@ -325,21 +333,31 @@ static inline int do_exception(struct pt_regs *regs, int access,
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
- write = (access == VM_WRITE ||
- (trans_exc_code & store_indication) == 0x400) ?
- FAULT_FLAG_WRITE : 0;
- fault = handle_mm_fault(mm, vma, address, write);
+ fault = handle_mm_fault(mm, vma, address, flags);
if (unlikely(fault & VM_FAULT_ERROR))
goto out_up;
- if (fault & VM_FAULT_MAJOR) {
- tsk->maj_flt++;
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
- regs, address);
- } else {
- tsk->min_flt++;
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
- regs, address);
+ /*
+ * Major/minor page fault accounting is only done on the
+ * initial attempt. If we go through a retry, it is extremely
+ * likely that the page will be found in page cache at that point.
+ */
+ if (flags & FAULT_FLAG_ALLOW_RETRY) {
+ if (fault & VM_FAULT_MAJOR) {
+ tsk->maj_flt++;
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
+ regs, address);
+ } else {
+ tsk->min_flt++;
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
+ regs, address);
+ }
+ if (fault & VM_FAULT_RETRY) {
+ /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
+ * of starvation. */
+ flags &= ~FAULT_FLAG_ALLOW_RETRY;
+ goto retry;
+ }
}
/*
* The instruction that caused the program check will
@@ -429,10 +447,9 @@ int __handle_fault(unsigned long uaddr, unsigned long pgm_int_code, int write)
access = write ? VM_WRITE : VM_READ;
fault = do_exception(&regs, access, uaddr | 2);
if (unlikely(fault)) {
- if (fault & VM_FAULT_OOM) {
- pagefault_out_of_memory();
- fault = 0;
- } else if (fault & VM_FAULT_SIGBUS)
+ if (fault & VM_FAULT_OOM)
+ return -EFAULT;
+ else if (fault & VM_FAULT_SIGBUS)
do_sigbus(&regs, pgm_int_code, uaddr);
}
return fault ? -EFAULT : 0;
@@ -485,7 +502,6 @@ int pfault_init(void)
"2:\n"
EX_TABLE(0b,1b)
: "=d" (rc) : "a" (&refbk), "m" (refbk) : "cc");
- __ctl_set_bit(0, 9);
return rc;
}
@@ -500,7 +516,6 @@ void pfault_fini(void)
if (!MACHINE_IS_VM || pfault_disable)
return;
- __ctl_clear_bit(0,9);
asm volatile(
" diag %0,0,0x258\n"
"0:\n"
@@ -615,6 +630,7 @@ static int __init pfault_irq_init(void)
rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP;
if (rc)
goto out_pfault;
+ service_subclass_irq_register();
hotcpu_notifier(pfault_cpu_notify, 0);
return 0;
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index dfefc2171691..59b663109d90 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -119,9 +119,7 @@ void __init paging_init(void)
sparse_memory_present_with_active_regions(MAX_NUMNODES);
sparse_init();
memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
-#ifdef CONFIG_ZONE_DMA
max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS);
-#endif
max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
free_area_init_nodes(max_zone_pfns);
fault_init();
diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c
index 71a4b0d34be0..51e5cd9b906a 100644
--- a/arch/s390/mm/maccess.c
+++ b/arch/s390/mm/maccess.c
@@ -19,7 +19,7 @@
* using the stura instruction.
* Returns the number of bytes copied or -EFAULT.
*/
-static long probe_kernel_write_odd(void *dst, void *src, size_t size)
+static long probe_kernel_write_odd(void *dst, const void *src, size_t size)
{
unsigned long count, aligned;
int offset, mask;
@@ -45,7 +45,7 @@ static long probe_kernel_write_odd(void *dst, void *src, size_t size)
return rc ? rc : count;
}
-long probe_kernel_write(void *dst, void *src, size_t size)
+long probe_kernel_write(void *dst, const void *src, size_t size)
{
long copied = 0;
diff --git a/arch/s390/oprofile/hwsampler.c b/arch/s390/oprofile/hwsampler.c
index 053caa0fd276..4552ce40c81a 100644
--- a/arch/s390/oprofile/hwsampler.c
+++ b/arch/s390/oprofile/hwsampler.c
@@ -19,7 +19,7 @@
#include <linux/oprofile.h>
#include <asm/lowcore.h>
-#include <asm/s390_ext.h>
+#include <asm/irq.h>
#include "hwsampler.h"
@@ -580,7 +580,7 @@ static int hws_cpu_callback(struct notifier_block *nfb,
{
/* We do not have sampler space available for all possible CPUs.
All CPUs should be online when hw sampling is activated. */
- return NOTIFY_BAD;
+ return (hws_state <= HWS_DEALLOCATED) ? NOTIFY_OK : NOTIFY_BAD;
}
static struct notifier_block hws_cpu_notifier = {
diff --git a/arch/sh/include/asm/suspend.h b/arch/sh/include/asm/suspend.h
index 64eb41a063e8..e14567a7e9a1 100644
--- a/arch/sh/include/asm/suspend.h
+++ b/arch/sh/include/asm/suspend.h
@@ -3,7 +3,6 @@
#ifndef __ASSEMBLY__
#include <linux/notifier.h>
-static inline int arch_prepare_suspend(void) { return 0; }
#include <asm/ptrace.h>
diff --git a/arch/sh/include/asm/unistd_32.h b/arch/sh/include/asm/unistd_32.h
index bb7d2702c2c9..3432008d2888 100644
--- a/arch/sh/include/asm/unistd_32.h
+++ b/arch/sh/include/asm/unistd_32.h
@@ -374,8 +374,9 @@
#define __NR_clock_adjtime 361
#define __NR_syncfs 362
#define __NR_sendmmsg 363
+#define __NR_setns 364
-#define NR_syscalls 364
+#define NR_syscalls 365
#ifdef __KERNEL__
diff --git a/arch/sh/include/asm/unistd_64.h b/arch/sh/include/asm/unistd_64.h
index 46327cea1e5c..ec9898665f23 100644
--- a/arch/sh/include/asm/unistd_64.h
+++ b/arch/sh/include/asm/unistd_64.h
@@ -395,10 +395,11 @@
#define __NR_clock_adjtime 372
#define __NR_syncfs 373
#define __NR_sendmmsg 374
+#define __NR_setns 375
#ifdef __KERNEL__
-#define NR_syscalls 375
+#define NR_syscalls 376
#define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_OLD_READDIR
diff --git a/arch/sh/kernel/syscalls_32.S b/arch/sh/kernel/syscalls_32.S
index 7c486f3e3a3c..39b051de4c7c 100644
--- a/arch/sh/kernel/syscalls_32.S
+++ b/arch/sh/kernel/syscalls_32.S
@@ -381,3 +381,4 @@ ENTRY(sys_call_table)
.long sys_clock_adjtime
.long sys_syncfs
.long sys_sendmmsg
+ .long sys_setns
diff --git a/arch/sh/kernel/syscalls_64.S b/arch/sh/kernel/syscalls_64.S
index ba1a737afe80..089c4d825d08 100644
--- a/arch/sh/kernel/syscalls_64.S
+++ b/arch/sh/kernel/syscalls_64.S
@@ -401,3 +401,4 @@ sys_call_table:
.long sys_clock_adjtime
.long sys_syncfs
.long sys_sendmmsg
+ .long sys_setns /* 375 */
diff --git a/arch/sparc/include/asm/unistd.h b/arch/sparc/include/asm/unistd.h
index c5387ed0add8..6260d5deeabc 100644
--- a/arch/sparc/include/asm/unistd.h
+++ b/arch/sparc/include/asm/unistd.h
@@ -405,8 +405,9 @@
#define __NR_clock_adjtime 334
#define __NR_syncfs 335
#define __NR_sendmmsg 336
+#define __NR_setns 337
-#define NR_syscalls 337
+#define NR_syscalls 338
#ifdef __32bit_syscall_numbers__
/* Sparc 32-bit only has the "setresuid32", "getresuid32" variants,
diff --git a/arch/sparc/kernel/systbls_32.S b/arch/sparc/kernel/systbls_32.S
index 332c83ff7701..6e492d59f6b1 100644
--- a/arch/sparc/kernel/systbls_32.S
+++ b/arch/sparc/kernel/systbls_32.S
@@ -84,4 +84,4 @@ sys_call_table:
/*320*/ .long sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv
/*325*/ .long sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg, sys_fanotify_init
/*330*/ .long sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime
-/*335*/ .long sys_syncfs, sys_sendmmsg
+/*335*/ .long sys_syncfs, sys_sendmmsg, sys_setns
diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S
index 43887ca0be0e..f566518483b5 100644
--- a/arch/sparc/kernel/systbls_64.S
+++ b/arch/sparc/kernel/systbls_64.S
@@ -85,7 +85,7 @@ sys_call_table32:
/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, compat_sys_preadv
.word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo, sys_perf_event_open, compat_sys_recvmmsg, sys_fanotify_init
/*330*/ .word sys32_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, compat_sys_open_by_handle_at, compat_sys_clock_adjtime
- .word sys_syncfs, compat_sys_sendmmsg
+ .word sys_syncfs, compat_sys_sendmmsg, sys_setns
#endif /* CONFIG_COMPAT */
@@ -162,4 +162,4 @@ sys_call_table:
/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv
.word sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg, sys_fanotify_init
/*330*/ .word sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime
- .word sys_syncfs, sys_sendmmsg
+ .word sys_syncfs, sys_sendmmsg, sys_setns
diff --git a/arch/unicore32/include/asm/suspend.h b/arch/unicore32/include/asm/suspend.h
index 88a9c0f32b21..65bad75c7e96 100644
--- a/arch/unicore32/include/asm/suspend.h
+++ b/arch/unicore32/include/asm/suspend.h
@@ -14,7 +14,6 @@
#define __UNICORE_SUSPEND_H__
#ifndef __ASSEMBLY__
-static inline int arch_prepare_suspend(void) { return 0; }
#include <asm/ptrace.h>
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
index 95f5826be458..c1870dddd322 100644
--- a/arch/x86/ia32/ia32entry.S
+++ b/arch/x86/ia32/ia32entry.S
@@ -849,4 +849,5 @@ ia32_sys_call_table:
.quad compat_sys_clock_adjtime
.quad sys_syncfs
.quad compat_sys_sendmmsg /* 345 */
+ .quad sys_setns
ia32_syscall_end:
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 5dc6acc98dbd..71cc3800712c 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -125,7 +125,7 @@
#define X86_FEATURE_OSXSAVE (4*32+27) /* "" XSAVE enabled in the OS */
#define X86_FEATURE_AVX (4*32+28) /* Advanced Vector Extensions */
#define X86_FEATURE_F16C (4*32+29) /* 16-bit fp conversions */
-#define X86_FEATURE_RDRND (4*32+30) /* The RDRAND instruction */
+#define X86_FEATURE_RDRAND (4*32+30) /* The RDRAND instruction */
#define X86_FEATURE_HYPERVISOR (4*32+31) /* Running on a hypervisor */
/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */
diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
index 617bd56b3070..7b439d9aea2a 100644
--- a/arch/x86/include/asm/desc.h
+++ b/arch/x86/include/asm/desc.h
@@ -4,30 +4,33 @@
#include <asm/desc_defs.h>
#include <asm/ldt.h>
#include <asm/mmu.h>
+
#include <linux/smp.h>
-static inline void fill_ldt(struct desc_struct *desc,
- const struct user_desc *info)
-{
- desc->limit0 = info->limit & 0x0ffff;
- desc->base0 = info->base_addr & 0x0000ffff;
-
- desc->base1 = (info->base_addr & 0x00ff0000) >> 16;
- desc->type = (info->read_exec_only ^ 1) << 1;
- desc->type |= info->contents << 2;
- desc->s = 1;
- desc->dpl = 0x3;
- desc->p = info->seg_not_present ^ 1;
- desc->limit = (info->limit & 0xf0000) >> 16;
- desc->avl = info->useable;
- desc->d = info->seg_32bit;
- desc->g = info->limit_in_pages;
- desc->base2 = (info->base_addr & 0xff000000) >> 24;
+static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *info)
+{
+ desc->limit0 = info->limit & 0x0ffff;
+
+ desc->base0 = (info->base_addr & 0x0000ffff);
+ desc->base1 = (info->base_addr & 0x00ff0000) >> 16;
+
+ desc->type = (info->read_exec_only ^ 1) << 1;
+ desc->type |= info->contents << 2;
+
+ desc->s = 1;
+ desc->dpl = 0x3;
+ desc->p = info->seg_not_present ^ 1;
+ desc->limit = (info->limit & 0xf0000) >> 16;
+ desc->avl = info->useable;
+ desc->d = info->seg_32bit;
+ desc->g = info->limit_in_pages;
+
+ desc->base2 = (info->base_addr & 0xff000000) >> 24;
/*
* Don't allow setting of the lm bit. It is useless anyway
* because 64bit system calls require __USER_CS:
*/
- desc->l = 0;
+ desc->l = 0;
}
extern struct desc_ptr idt_descr;
@@ -36,6 +39,7 @@ extern gate_desc idt_table[];
struct gdt_page {
struct desc_struct gdt[GDT_ENTRIES];
} __attribute__((aligned(PAGE_SIZE)));
+
DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
@@ -48,16 +52,16 @@ static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
static inline void pack_gate(gate_desc *gate, unsigned type, unsigned long func,
unsigned dpl, unsigned ist, unsigned seg)
{
- gate->offset_low = PTR_LOW(func);
- gate->segment = __KERNEL_CS;
- gate->ist = ist;
- gate->p = 1;
- gate->dpl = dpl;
- gate->zero0 = 0;
- gate->zero1 = 0;
- gate->type = type;
- gate->offset_middle = PTR_MIDDLE(func);
- gate->offset_high = PTR_HIGH(func);
+ gate->offset_low = PTR_LOW(func);
+ gate->segment = __KERNEL_CS;
+ gate->ist = ist;
+ gate->p = 1;
+ gate->dpl = dpl;
+ gate->zero0 = 0;
+ gate->zero1 = 0;
+ gate->type = type;
+ gate->offset_middle = PTR_MIDDLE(func);
+ gate->offset_high = PTR_HIGH(func);
}
#else
@@ -66,8 +70,7 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
unsigned short seg)
{
gate->a = (seg << 16) | (base & 0xffff);
- gate->b = (base & 0xffff0000) |
- (((0x80 | type | (dpl << 5)) & 0xff) << 8);
+ gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
}
#endif
@@ -75,31 +78,29 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
static inline int desc_empty(const void *ptr)
{
const u32 *desc = ptr;
+
return !(desc[0] | desc[1]);
}
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
#else
-#define load_TR_desc() native_load_tr_desc()
-#define load_gdt(dtr) native_load_gdt(dtr)
-#define load_idt(dtr) native_load_idt(dtr)
-#define load_tr(tr) asm volatile("ltr %0"::"m" (tr))
-#define load_ldt(ldt) asm volatile("lldt %0"::"m" (ldt))
-
-#define store_gdt(dtr) native_store_gdt(dtr)
-#define store_idt(dtr) native_store_idt(dtr)
-#define store_tr(tr) (tr = native_store_tr())
-
-#define load_TLS(t, cpu) native_load_tls(t, cpu)
-#define set_ldt native_set_ldt
-
-#define write_ldt_entry(dt, entry, desc) \
- native_write_ldt_entry(dt, entry, desc)
-#define write_gdt_entry(dt, entry, desc, type) \
- native_write_gdt_entry(dt, entry, desc, type)
-#define write_idt_entry(dt, entry, g) \
- native_write_idt_entry(dt, entry, g)
+#define load_TR_desc() native_load_tr_desc()
+#define load_gdt(dtr) native_load_gdt(dtr)
+#define load_idt(dtr) native_load_idt(dtr)
+#define load_tr(tr) asm volatile("ltr %0"::"m" (tr))
+#define load_ldt(ldt) asm volatile("lldt %0"::"m" (ldt))
+
+#define store_gdt(dtr) native_store_gdt(dtr)
+#define store_idt(dtr) native_store_idt(dtr)
+#define store_tr(tr) (tr = native_store_tr())
+
+#define load_TLS(t, cpu) native_load_tls(t, cpu)
+#define set_ldt native_set_ldt
+
+#define write_ldt_entry(dt, entry, desc) native_write_ldt_entry(dt, entry, desc)
+#define write_gdt_entry(dt, entry, desc, type) native_write_gdt_entry(dt, entry, desc, type)
+#define write_idt_entry(dt, entry, g) native_write_idt_entry(dt, entry, g)
static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
{
@@ -112,33 +113,27 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
#define store_ldt(ldt) asm("sldt %0" : "=m"(ldt))
-static inline void native_write_idt_entry(gate_desc *idt, int entry,
- const gate_desc *gate)
+static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
{
memcpy(&idt[entry], gate, sizeof(*gate));
}
-static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry,
- const void *desc)
+static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
{
memcpy(&ldt[entry], desc, 8);
}
-static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
- const void *desc, int type)
+static inline void
+native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int type)
{
unsigned int size;
+
switch (type) {
- case DESC_TSS:
- size = sizeof(tss_desc);
- break;
- case DESC_LDT:
- size = sizeof(ldt_desc);
- break;
- default:
- size = sizeof(struct desc_struct);
- break;
+ case DESC_TSS: size = sizeof(tss_desc); break;
+ case DESC_LDT: size = sizeof(ldt_desc); break;
+ default: size = sizeof(*gdt); break;
}
+
memcpy(&gdt[entry], desc, size);
}
@@ -154,20 +149,21 @@ static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
}
-static inline void set_tssldt_descriptor(void *d, unsigned long addr,
- unsigned type, unsigned size)
+static inline void set_tssldt_descriptor(void *d, unsigned long addr, unsigned type, unsigned size)
{
#ifdef CONFIG_X86_64
struct ldttss_desc64 *desc = d;
+
memset(desc, 0, sizeof(*desc));
- desc->limit0 = size & 0xFFFF;
- desc->base0 = PTR_LOW(addr);
- desc->base1 = PTR_MIDDLE(addr) & 0xFF;
- desc->type = type;
- desc->p = 1;
- desc->limit1 = (size >> 16) & 0xF;
- desc->base2 = (PTR_MIDDLE(addr) >> 8) & 0xFF;
- desc->base3 = PTR_HIGH(addr);
+
+ desc->limit0 = size & 0xFFFF;
+ desc->base0 = PTR_LOW(addr);
+ desc->base1 = PTR_MIDDLE(addr) & 0xFF;
+ desc->type = type;
+ desc->p = 1;
+ desc->limit1 = (size >> 16) & 0xF;
+ desc->base2 = (PTR_MIDDLE(addr) >> 8) & 0xFF;
+ desc->base3 = PTR_HIGH(addr);
#else
pack_descriptor((struct desc_struct *)d, addr, size, 0x80 | type, 0);
#endif
@@ -237,14 +233,16 @@ static inline void native_store_idt(struct desc_ptr *dtr)
static inline unsigned long native_store_tr(void)
{
unsigned long tr;
+
asm volatile("str %0":"=r" (tr));
+
return tr;
}
static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
{
- unsigned int i;
struct desc_struct *gdt = get_cpu_gdt_table(cpu);
+ unsigned int i;
for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
@@ -313,6 +311,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
unsigned dpl, unsigned ist, unsigned seg)
{
gate_desc s;
+
pack_gate(&s, type, (unsigned long)addr, dpl, ist, seg);
/*
* does not need to be atomic because it is only done once at
@@ -343,8 +342,9 @@ static inline void alloc_system_vector(int vector)
set_bit(vector, used_vectors);
if (first_system_vector > vector)
first_system_vector = vector;
- } else
+ } else {
BUG();
+ }
}
static inline void alloc_intr_gate(unsigned int n, void *addr)
diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
index aeff3e89b222..5f55e6962769 100644
--- a/arch/x86/include/asm/mmu.h
+++ b/arch/x86/include/asm/mmu.h
@@ -11,14 +11,14 @@
typedef struct {
void *ldt;
int size;
- struct mutex lock;
- void *vdso;
#ifdef CONFIG_X86_64
/* True if mm supports a task running in 32 bit compatibility mode. */
unsigned short ia32_compat;
#endif
+ struct mutex lock;
+ void *vdso;
} mm_context_t;
#ifdef CONFIG_SMP
diff --git a/arch/x86/include/asm/suspend_32.h b/arch/x86/include/asm/suspend_32.h
index fd921c3a6841..487055c8c1aa 100644
--- a/arch/x86/include/asm/suspend_32.h
+++ b/arch/x86/include/asm/suspend_32.h
@@ -9,8 +9,6 @@
#include <asm/desc.h>
#include <asm/i387.h>
-static inline int arch_prepare_suspend(void) { return 0; }
-
/* image of the saved processor state */
struct saved_context {
u16 es, fs, gs, ss;
diff --git a/arch/x86/include/asm/suspend_64.h b/arch/x86/include/asm/suspend_64.h
index 8d942afae681..09b0bf104156 100644
--- a/arch/x86/include/asm/suspend_64.h
+++ b/arch/x86/include/asm/suspend_64.h
@@ -9,11 +9,6 @@
#include <asm/desc.h>
#include <asm/i387.h>
-static inline int arch_prepare_suspend(void)
-{
- return 0;
-}
-
/*
* Image of the saved processor state, used by the low level ACPI suspend to
* RAM code and by the low level hibernation code.
diff --git a/arch/x86/include/asm/unistd_32.h b/arch/x86/include/asm/unistd_32.h
index fb6a625c99bf..593485b38ab3 100644
--- a/arch/x86/include/asm/unistd_32.h
+++ b/arch/x86/include/asm/unistd_32.h
@@ -351,10 +351,11 @@
#define __NR_clock_adjtime 343
#define __NR_syncfs 344
#define __NR_sendmmsg 345
+#define __NR_setns 346
#ifdef __KERNEL__
-#define NR_syscalls 346
+#define NR_syscalls 347
#define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_OLD_READDIR
diff --git a/arch/x86/include/asm/unistd_64.h b/arch/x86/include/asm/unistd_64.h
index 79f90eb15aad..705bf139288c 100644
--- a/arch/x86/include/asm/unistd_64.h
+++ b/arch/x86/include/asm/unistd_64.h
@@ -679,6 +679,8 @@ __SYSCALL(__NR_clock_adjtime, sys_clock_adjtime)
__SYSCALL(__NR_syncfs, sys_syncfs)
#define __NR_sendmmsg 307
__SYSCALL(__NR_sendmmsg, sys_sendmmsg)
+#define __NR_setns 308
+__SYSCALL(__NR_setns, sys_setns)
#ifndef __NO_STUBS
#define __ARCH_WANT_OLD_READDIR
diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h
index 130f1eeee5fe..a291c40efd43 100644
--- a/arch/x86/include/asm/uv/uv_bau.h
+++ b/arch/x86/include/asm/uv/uv_bau.h
@@ -5,7 +5,7 @@
*
* SGI UV Broadcast Assist Unit definitions
*
- * Copyright (C) 2008 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2008-2011 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_X86_UV_UV_BAU_H
@@ -35,17 +35,20 @@
#define MAX_CPUS_PER_UVHUB 64
#define MAX_CPUS_PER_SOCKET 32
-#define UV_ADP_SIZE 64 /* hardware-provided max. */
-#define UV_CPUS_PER_ACT_STATUS 32 /* hardware-provided max. */
-#define UV_ITEMS_PER_DESCRIPTOR 8
+#define ADP_SZ 64 /* hardware-provided max. */
+#define UV_CPUS_PER_AS 32 /* hardware-provided max. */
+#define ITEMS_PER_DESC 8
/* the 'throttle' to prevent the hardware stay-busy bug */
#define MAX_BAU_CONCURRENT 3
#define UV_ACT_STATUS_MASK 0x3
#define UV_ACT_STATUS_SIZE 2
#define UV_DISTRIBUTION_SIZE 256
#define UV_SW_ACK_NPENDING 8
-#define UV_NET_ENDPOINT_INTD 0x38
-#define UV_DESC_BASE_PNODE_SHIFT 49
+#define UV1_NET_ENDPOINT_INTD 0x38
+#define UV2_NET_ENDPOINT_INTD 0x28
+#define UV_NET_ENDPOINT_INTD (is_uv1_hub() ? \
+ UV1_NET_ENDPOINT_INTD : UV2_NET_ENDPOINT_INTD)
+#define UV_DESC_PSHIFT 49
#define UV_PAYLOADQ_PNODE_SHIFT 49
#define UV_PTC_BASENAME "sgi_uv/ptc_statistics"
#define UV_BAU_BASENAME "sgi_uv/bau_tunables"
@@ -53,29 +56,64 @@
#define UV_BAU_TUNABLES_FILE "bau_tunables"
#define WHITESPACE " \t\n"
#define uv_physnodeaddr(x) ((__pa((unsigned long)(x)) & uv_mmask))
-#define UV_ENABLE_INTD_SOFT_ACK_MODE_SHIFT 15
-#define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHIFT 16
-#define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD 0x0000000009UL
+#define cpubit_isset(cpu, bau_local_cpumask) \
+ test_bit((cpu), (bau_local_cpumask).bits)
+
/* [19:16] SOFT_ACK timeout period 19: 1 is urgency 7 17:16 1 is multiplier */
-#define BAU_MISC_CONTROL_MULT_MASK 3
+/*
+ * UV2: Bit 19 selects between
+ * (0): 10 microsecond timebase and
+ * (1): 80 microseconds
+ * we're using 655us, similar to UV1: 65 units of 10us
+ */
+#define UV1_INTD_SOFT_ACK_TIMEOUT_PERIOD (9UL)
+#define UV2_INTD_SOFT_ACK_TIMEOUT_PERIOD (65*10UL)
+
+#define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD (is_uv1_hub() ? \
+ UV1_INTD_SOFT_ACK_TIMEOUT_PERIOD : \
+ UV2_INTD_SOFT_ACK_TIMEOUT_PERIOD)
-#define UVH_AGING_PRESCALE_SEL 0x000000b000UL
+#define BAU_MISC_CONTROL_MULT_MASK 3
+
+#define UVH_AGING_PRESCALE_SEL 0x000000b000UL
/* [30:28] URGENCY_7 an index into a table of times */
-#define BAU_URGENCY_7_SHIFT 28
-#define BAU_URGENCY_7_MASK 7
+#define BAU_URGENCY_7_SHIFT 28
+#define BAU_URGENCY_7_MASK 7
-#define UVH_TRANSACTION_TIMEOUT 0x000000b200UL
+#define UVH_TRANSACTION_TIMEOUT 0x000000b200UL
/* [45:40] BAU - BAU transaction timeout select - a multiplier */
-#define BAU_TRANS_SHIFT 40
-#define BAU_TRANS_MASK 0x3f
+#define BAU_TRANS_SHIFT 40
+#define BAU_TRANS_MASK 0x3f
+
+/*
+ * shorten some awkward names
+ */
+#define AS_PUSH_SHIFT UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_SHFT
+#define SOFTACK_MSHIFT UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT
+#define SOFTACK_PSHIFT UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT
+#define SOFTACK_TIMEOUT_PERIOD UV_INTD_SOFT_ACK_TIMEOUT_PERIOD
+#define write_gmmr uv_write_global_mmr64
+#define write_lmmr uv_write_local_mmr
+#define read_lmmr uv_read_local_mmr
+#define read_gmmr uv_read_global_mmr64
/*
* bits in UVH_LB_BAU_SB_ACTIVATION_STATUS_0/1
*/
-#define DESC_STATUS_IDLE 0
-#define DESC_STATUS_ACTIVE 1
-#define DESC_STATUS_DESTINATION_TIMEOUT 2
-#define DESC_STATUS_SOURCE_TIMEOUT 3
+#define DS_IDLE 0
+#define DS_ACTIVE 1
+#define DS_DESTINATION_TIMEOUT 2
+#define DS_SOURCE_TIMEOUT 3
+/*
+ * bits put together from HRP_LB_BAU_SB_ACTIVATION_STATUS_0/1/2
+ * values 1 and 5 will not occur
+ */
+#define UV2H_DESC_IDLE 0
+#define UV2H_DESC_DEST_TIMEOUT 2
+#define UV2H_DESC_DEST_STRONG_NACK 3
+#define UV2H_DESC_BUSY 4
+#define UV2H_DESC_SOURCE_TIMEOUT 6
+#define UV2H_DESC_DEST_PUT_ERR 7
/*
* delay for 'plugged' timeout retries, in microseconds
@@ -86,15 +124,24 @@
* threshholds at which to use IPI to free resources
*/
/* after this # consecutive 'plugged' timeouts, use IPI to release resources */
-#define PLUGSB4RESET 100
+#define PLUGSB4RESET 100
/* after this many consecutive timeouts, use IPI to release resources */
-#define TIMEOUTSB4RESET 1
+#define TIMEOUTSB4RESET 1
/* at this number uses of IPI to release resources, giveup the request */
-#define IPI_RESET_LIMIT 1
+#define IPI_RESET_LIMIT 1
/* after this # consecutive successes, bump up the throttle if it was lowered */
-#define COMPLETE_THRESHOLD 5
+#define COMPLETE_THRESHOLD 5
+
+#define UV_LB_SUBNODEID 0x10
-#define UV_LB_SUBNODEID 0x10
+/* these two are the same for UV1 and UV2: */
+#define UV_SA_SHFT UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT
+#define UV_SA_MASK UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK
+/* 4 bits of software ack period */
+#define UV2_ACK_MASK 0x7UL
+#define UV2_ACK_UNITS_SHFT 3
+#define UV2_LEG_SHFT UV2H_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_SHFT
+#define UV2_EXT_SHFT UV2H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_SHFT
/*
* number of entries in the destination side payload queue
@@ -115,9 +162,16 @@
/*
* tuning the action when the numalink network is extremely delayed
*/
-#define CONGESTED_RESPONSE_US 1000 /* 'long' response time, in microseconds */
-#define CONGESTED_REPS 10 /* long delays averaged over this many broadcasts */
-#define CONGESTED_PERIOD 30 /* time for the bau to be disabled, in seconds */
+#define CONGESTED_RESPONSE_US 1000 /* 'long' response time, in
+ microseconds */
+#define CONGESTED_REPS 10 /* long delays averaged over
+ this many broadcasts */
+#define CONGESTED_PERIOD 30 /* time for the bau to be
+ disabled, in seconds */
+/* see msg_type: */
+#define MSG_NOOP 0
+#define MSG_REGULAR 1
+#define MSG_RETRY 2
/*
* Distribution: 32 bytes (256 bits) (bytes 0-0x1f of descriptor)
@@ -129,8 +183,8 @@
* 'base_dest_nasid' field of the header corresponds to the
* destination nodeID associated with that specified bit.
*/
-struct bau_target_uvhubmask {
- unsigned long bits[BITS_TO_LONGS(UV_DISTRIBUTION_SIZE)];
+struct bau_targ_hubmask {
+ unsigned long bits[BITS_TO_LONGS(UV_DISTRIBUTION_SIZE)];
};
/*
@@ -139,7 +193,7 @@ struct bau_target_uvhubmask {
* enough bits for max. cpu's per uvhub)
*/
struct bau_local_cpumask {
- unsigned long bits;
+ unsigned long bits;
};
/*
@@ -160,14 +214,14 @@ struct bau_local_cpumask {
* The payload is software-defined for INTD transactions
*/
struct bau_msg_payload {
- unsigned long address; /* signifies a page or all TLB's
- of the cpu */
+ unsigned long address; /* signifies a page or all
+ TLB's of the cpu */
/* 64 bits */
- unsigned short sending_cpu; /* filled in by sender */
+ unsigned short sending_cpu; /* filled in by sender */
/* 16 bits */
- unsigned short acknowledge_count;/* filled in by destination */
+ unsigned short acknowledge_count; /* filled in by destination */
/* 16 bits */
- unsigned int reserved1:32; /* not usable */
+ unsigned int reserved1:32; /* not usable */
};
@@ -176,93 +230,96 @@ struct bau_msg_payload {
* see table 4.2.3.0.1 in broacast_assist spec.
*/
struct bau_msg_header {
- unsigned int dest_subnodeid:6; /* must be 0x10, for the LB */
+ unsigned int dest_subnodeid:6; /* must be 0x10, for the LB */
/* bits 5:0 */
- unsigned int base_dest_nasid:15; /* nasid of the */
- /* bits 20:6 */ /* first bit in uvhub map */
- unsigned int command:8; /* message type */
+ unsigned int base_dest_nasid:15; /* nasid of the first bit */
+ /* bits 20:6 */ /* in uvhub map */
+ unsigned int command:8; /* message type */
/* bits 28:21 */
- /* 0x38: SN3net EndPoint Message */
- unsigned int rsvd_1:3; /* must be zero */
+ /* 0x38: SN3net EndPoint Message */
+ unsigned int rsvd_1:3; /* must be zero */
/* bits 31:29 */
- /* int will align on 32 bits */
- unsigned int rsvd_2:9; /* must be zero */
+ /* int will align on 32 bits */
+ unsigned int rsvd_2:9; /* must be zero */
/* bits 40:32 */
- /* Suppl_A is 56-41 */
- unsigned int sequence:16;/* message sequence number */
- /* bits 56:41 */ /* becomes bytes 16-17 of msg */
- /* Address field (96:57) is never used as an
- address (these are address bits 42:3) */
-
- unsigned int rsvd_3:1; /* must be zero */
+ /* Suppl_A is 56-41 */
+ unsigned int sequence:16; /* message sequence number */
+ /* bits 56:41 */ /* becomes bytes 16-17 of msg */
+ /* Address field (96:57) is
+ never used as an address
+ (these are address bits
+ 42:3) */
+
+ unsigned int rsvd_3:1; /* must be zero */
/* bit 57 */
- /* address bits 27:4 are payload */
+ /* address bits 27:4 are payload */
/* these next 24 (58-81) bits become bytes 12-14 of msg */
-
/* bits 65:58 land in byte 12 */
- unsigned int replied_to:1;/* sent as 0 by the source to byte 12 */
+ unsigned int replied_to:1; /* sent as 0 by the source to
+ byte 12 */
/* bit 58 */
- unsigned int msg_type:3; /* software type of the message*/
+ unsigned int msg_type:3; /* software type of the
+ message */
/* bits 61:59 */
- unsigned int canceled:1; /* message canceled, resource to be freed*/
+ unsigned int canceled:1; /* message canceled, resource
+ is to be freed*/
/* bit 62 */
- unsigned int payload_1a:1;/* not currently used */
+ unsigned int payload_1a:1; /* not currently used */
/* bit 63 */
- unsigned int payload_1b:2;/* not currently used */
+ unsigned int payload_1b:2; /* not currently used */
/* bits 65:64 */
/* bits 73:66 land in byte 13 */
- unsigned int payload_1ca:6;/* not currently used */
+ unsigned int payload_1ca:6; /* not currently used */
/* bits 71:66 */
- unsigned int payload_1c:2;/* not currently used */
+ unsigned int payload_1c:2; /* not currently used */
/* bits 73:72 */
/* bits 81:74 land in byte 14 */
- unsigned int payload_1d:6;/* not currently used */
+ unsigned int payload_1d:6; /* not currently used */
/* bits 79:74 */
- unsigned int payload_1e:2;/* not currently used */
+ unsigned int payload_1e:2; /* not currently used */
/* bits 81:80 */
- unsigned int rsvd_4:7; /* must be zero */
+ unsigned int rsvd_4:7; /* must be zero */
/* bits 88:82 */
- unsigned int sw_ack_flag:1;/* software acknowledge flag */
+ unsigned int swack_flag:1; /* software acknowledge flag */
/* bit 89 */
- /* INTD trasactions at destination are to
- wait for software acknowledge */
- unsigned int rsvd_5:6; /* must be zero */
+ /* INTD trasactions at
+ destination are to wait for
+ software acknowledge */
+ unsigned int rsvd_5:6; /* must be zero */
/* bits 95:90 */
- unsigned int rsvd_6:5; /* must be zero */
+ unsigned int rsvd_6:5; /* must be zero */
/* bits 100:96 */
- unsigned int int_both:1;/* if 1, interrupt both sockets on the uvhub */
+ unsigned int int_both:1; /* if 1, interrupt both sockets
+ on the uvhub */
/* bit 101*/
- unsigned int fairness:3;/* usually zero */
+ unsigned int fairness:3; /* usually zero */
/* bits 104:102 */
- unsigned int multilevel:1; /* multi-level multicast format */
+ unsigned int multilevel:1; /* multi-level multicast
+ format */
/* bit 105 */
- /* 0 for TLB: endpoint multi-unicast messages */
- unsigned int chaining:1;/* next descriptor is part of this activation*/
+ /* 0 for TLB: endpoint multi-unicast messages */
+ unsigned int chaining:1; /* next descriptor is part of
+ this activation*/
/* bit 106 */
- unsigned int rsvd_7:21; /* must be zero */
+ unsigned int rsvd_7:21; /* must be zero */
/* bits 127:107 */
};
-/* see msg_type: */
-#define MSG_NOOP 0
-#define MSG_REGULAR 1
-#define MSG_RETRY 2
-
/*
* The activation descriptor:
* The format of the message to send, plus all accompanying control
* Should be 64 bytes
*/
struct bau_desc {
- struct bau_target_uvhubmask distribution;
+ struct bau_targ_hubmask distribution;
/*
* message template, consisting of header and payload:
*/
- struct bau_msg_header header;
- struct bau_msg_payload payload;
+ struct bau_msg_header header;
+ struct bau_msg_payload payload;
};
/*
* -payload-- ---------header------
@@ -281,59 +338,51 @@ struct bau_desc {
* are 32 bytes (2 micropackets) (256 bits) in length, but contain only 17
* bytes of usable data, including the sw ack vector in byte 15 (bits 127:120)
* (12 bytes come from bau_msg_payload, 3 from payload_1, 2 from
- * sw_ack_vector and payload_2)
+ * swack_vec and payload_2)
* "Enabling Software Acknowledgment mode (see Section 4.3.3 Software
* Acknowledge Processing) also selects 32 byte (17 bytes usable) payload
* operation."
*/
-struct bau_payload_queue_entry {
- unsigned long address; /* signifies a page or all TLB's
- of the cpu */
+struct bau_pq_entry {
+ unsigned long address; /* signifies a page or all TLB's
+ of the cpu */
/* 64 bits, bytes 0-7 */
-
- unsigned short sending_cpu; /* cpu that sent the message */
+ unsigned short sending_cpu; /* cpu that sent the message */
/* 16 bits, bytes 8-9 */
-
- unsigned short acknowledge_count; /* filled in by destination */
+ unsigned short acknowledge_count; /* filled in by destination */
/* 16 bits, bytes 10-11 */
-
/* these next 3 bytes come from bits 58-81 of the message header */
- unsigned short replied_to:1; /* sent as 0 by the source */
- unsigned short msg_type:3; /* software message type */
- unsigned short canceled:1; /* sent as 0 by the source */
- unsigned short unused1:3; /* not currently using */
+ unsigned short replied_to:1; /* sent as 0 by the source */
+ unsigned short msg_type:3; /* software message type */
+ unsigned short canceled:1; /* sent as 0 by the source */
+ unsigned short unused1:3; /* not currently using */
/* byte 12 */
-
- unsigned char unused2a; /* not currently using */
+ unsigned char unused2a; /* not currently using */
/* byte 13 */
- unsigned char unused2; /* not currently using */
+ unsigned char unused2; /* not currently using */
/* byte 14 */
-
- unsigned char sw_ack_vector; /* filled in by the hardware */
+ unsigned char swack_vec; /* filled in by the hardware */
/* byte 15 (bits 127:120) */
-
- unsigned short sequence; /* message sequence number */
+ unsigned short sequence; /* message sequence number */
/* bytes 16-17 */
- unsigned char unused4[2]; /* not currently using bytes 18-19 */
+ unsigned char unused4[2]; /* not currently using bytes 18-19 */
/* bytes 18-19 */
-
- int number_of_cpus; /* filled in at destination */
+ int number_of_cpus; /* filled in at destination */
/* 32 bits, bytes 20-23 (aligned) */
-
- unsigned char unused5[8]; /* not using */
+ unsigned char unused5[8]; /* not using */
/* bytes 24-31 */
};
struct msg_desc {
- struct bau_payload_queue_entry *msg;
- int msg_slot;
- int sw_ack_slot;
- struct bau_payload_queue_entry *va_queue_first;
- struct bau_payload_queue_entry *va_queue_last;
+ struct bau_pq_entry *msg;
+ int msg_slot;
+ int swack_slot;
+ struct bau_pq_entry *queue_first;
+ struct bau_pq_entry *queue_last;
};
struct reset_args {
- int sender;
+ int sender;
};
/*
@@ -341,112 +390,226 @@ struct reset_args {
*/
struct ptc_stats {
/* sender statistics */
- unsigned long s_giveup; /* number of fall backs to IPI-style flushes */
- unsigned long s_requestor; /* number of shootdown requests */
- unsigned long s_stimeout; /* source side timeouts */
- unsigned long s_dtimeout; /* destination side timeouts */
- unsigned long s_time; /* time spent in sending side */
- unsigned long s_retriesok; /* successful retries */
- unsigned long s_ntargcpu; /* total number of cpu's targeted */
- unsigned long s_ntargself; /* times the sending cpu was targeted */
- unsigned long s_ntarglocals; /* targets of cpus on the local blade */
- unsigned long s_ntargremotes; /* targets of cpus on remote blades */
- unsigned long s_ntarglocaluvhub; /* targets of the local hub */
- unsigned long s_ntargremoteuvhub; /* remotes hubs targeted */
- unsigned long s_ntarguvhub; /* total number of uvhubs targeted */
- unsigned long s_ntarguvhub16; /* number of times target hubs >= 16*/
- unsigned long s_ntarguvhub8; /* number of times target hubs >= 8 */
- unsigned long s_ntarguvhub4; /* number of times target hubs >= 4 */
- unsigned long s_ntarguvhub2; /* number of times target hubs >= 2 */
- unsigned long s_ntarguvhub1; /* number of times target hubs == 1 */
- unsigned long s_resets_plug; /* ipi-style resets from plug state */
- unsigned long s_resets_timeout; /* ipi-style resets from timeouts */
- unsigned long s_busy; /* status stayed busy past s/w timer */
- unsigned long s_throttles; /* waits in throttle */
- unsigned long s_retry_messages; /* retry broadcasts */
- unsigned long s_bau_reenabled; /* for bau enable/disable */
- unsigned long s_bau_disabled; /* for bau enable/disable */
+ unsigned long s_giveup; /* number of fall backs to
+ IPI-style flushes */
+ unsigned long s_requestor; /* number of shootdown
+ requests */
+ unsigned long s_stimeout; /* source side timeouts */
+ unsigned long s_dtimeout; /* destination side timeouts */
+ unsigned long s_time; /* time spent in sending side */
+ unsigned long s_retriesok; /* successful retries */
+ unsigned long s_ntargcpu; /* total number of cpu's
+ targeted */
+ unsigned long s_ntargself; /* times the sending cpu was
+ targeted */
+ unsigned long s_ntarglocals; /* targets of cpus on the local
+ blade */
+ unsigned long s_ntargremotes; /* targets of cpus on remote
+ blades */
+ unsigned long s_ntarglocaluvhub; /* targets of the local hub */
+ unsigned long s_ntargremoteuvhub; /* remotes hubs targeted */
+ unsigned long s_ntarguvhub; /* total number of uvhubs
+ targeted */
+ unsigned long s_ntarguvhub16; /* number of times target
+ hubs >= 16*/
+ unsigned long s_ntarguvhub8; /* number of times target
+ hubs >= 8 */
+ unsigned long s_ntarguvhub4; /* number of times target
+ hubs >= 4 */
+ unsigned long s_ntarguvhub2; /* number of times target
+ hubs >= 2 */
+ unsigned long s_ntarguvhub1; /* number of times target
+ hubs == 1 */
+ unsigned long s_resets_plug; /* ipi-style resets from plug
+ state */
+ unsigned long s_resets_timeout; /* ipi-style resets from
+ timeouts */
+ unsigned long s_busy; /* status stayed busy past
+ s/w timer */
+ unsigned long s_throttles; /* waits in throttle */
+ unsigned long s_retry_messages; /* retry broadcasts */
+ unsigned long s_bau_reenabled; /* for bau enable/disable */
+ unsigned long s_bau_disabled; /* for bau enable/disable */
/* destination statistics */
- unsigned long d_alltlb; /* times all tlb's on this cpu were flushed */
- unsigned long d_onetlb; /* times just one tlb on this cpu was flushed */
- unsigned long d_multmsg; /* interrupts with multiple messages */
- unsigned long d_nomsg; /* interrupts with no message */
- unsigned long d_time; /* time spent on destination side */
- unsigned long d_requestee; /* number of messages processed */
- unsigned long d_retries; /* number of retry messages processed */
- unsigned long d_canceled; /* number of messages canceled by retries */
- unsigned long d_nocanceled; /* retries that found nothing to cancel */
- unsigned long d_resets; /* number of ipi-style requests processed */
- unsigned long d_rcanceled; /* number of messages canceled by resets */
+ unsigned long d_alltlb; /* times all tlb's on this
+ cpu were flushed */
+ unsigned long d_onetlb; /* times just one tlb on this
+ cpu was flushed */
+ unsigned long d_multmsg; /* interrupts with multiple
+ messages */
+ unsigned long d_nomsg; /* interrupts with no message */
+ unsigned long d_time; /* time spent on destination
+ side */
+ unsigned long d_requestee; /* number of messages
+ processed */
+ unsigned long d_retries; /* number of retry messages
+ processed */
+ unsigned long d_canceled; /* number of messages canceled
+ by retries */
+ unsigned long d_nocanceled; /* retries that found nothing
+ to cancel */
+ unsigned long d_resets; /* number of ipi-style requests
+ processed */
+ unsigned long d_rcanceled; /* number of messages canceled
+ by resets */
+};
+
+struct tunables {
+ int *tunp;
+ int deflt;
};
struct hub_and_pnode {
- short uvhub;
- short pnode;
+ short uvhub;
+ short pnode;
};
+
+struct socket_desc {
+ short num_cpus;
+ short cpu_number[MAX_CPUS_PER_SOCKET];
+};
+
+struct uvhub_desc {
+ unsigned short socket_mask;
+ short num_cpus;
+ short uvhub;
+ short pnode;
+ struct socket_desc socket[2];
+};
+
/*
* one per-cpu; to locate the software tables
*/
struct bau_control {
- struct bau_desc *descriptor_base;
- struct bau_payload_queue_entry *va_queue_first;
- struct bau_payload_queue_entry *va_queue_last;
- struct bau_payload_queue_entry *bau_msg_head;
- struct bau_control *uvhub_master;
- struct bau_control *socket_master;
- struct ptc_stats *statp;
- unsigned long timeout_interval;
- unsigned long set_bau_on_time;
- atomic_t active_descriptor_count;
- int plugged_tries;
- int timeout_tries;
- int ipi_attempts;
- int conseccompletes;
- int baudisabled;
- int set_bau_off;
- short cpu;
- short osnode;
- short uvhub_cpu;
- short uvhub;
- short cpus_in_socket;
- short cpus_in_uvhub;
- short partition_base_pnode;
- unsigned short message_number;
- unsigned short uvhub_quiesce;
- short socket_acknowledge_count[DEST_Q_SIZE];
- cycles_t send_message;
- spinlock_t uvhub_lock;
- spinlock_t queue_lock;
+ struct bau_desc *descriptor_base;
+ struct bau_pq_entry *queue_first;
+ struct bau_pq_entry *queue_last;
+ struct bau_pq_entry *bau_msg_head;
+ struct bau_control *uvhub_master;
+ struct bau_control *socket_master;
+ struct ptc_stats *statp;
+ unsigned long timeout_interval;
+ unsigned long set_bau_on_time;
+ atomic_t active_descriptor_count;
+ int plugged_tries;
+ int timeout_tries;
+ int ipi_attempts;
+ int conseccompletes;
+ int baudisabled;
+ int set_bau_off;
+ short cpu;
+ short osnode;
+ short uvhub_cpu;
+ short uvhub;
+ short cpus_in_socket;
+ short cpus_in_uvhub;
+ short partition_base_pnode;
+ unsigned short message_number;
+ unsigned short uvhub_quiesce;
+ short socket_acknowledge_count[DEST_Q_SIZE];
+ cycles_t send_message;
+ spinlock_t uvhub_lock;
+ spinlock_t queue_lock;
/* tunables */
- int max_bau_concurrent;
- int max_bau_concurrent_constant;
- int plugged_delay;
- int plugsb4reset;
- int timeoutsb4reset;
- int ipi_reset_limit;
- int complete_threshold;
- int congested_response_us;
- int congested_reps;
- int congested_period;
- cycles_t period_time;
- long period_requests;
- struct hub_and_pnode *target_hub_and_pnode;
+ int max_concurr;
+ int max_concurr_const;
+ int plugged_delay;
+ int plugsb4reset;
+ int timeoutsb4reset;
+ int ipi_reset_limit;
+ int complete_threshold;
+ int cong_response_us;
+ int cong_reps;
+ int cong_period;
+ cycles_t period_time;
+ long period_requests;
+ struct hub_and_pnode *thp;
};
-static inline int bau_uvhub_isset(int uvhub, struct bau_target_uvhubmask *dstp)
+static unsigned long read_mmr_uv2_status(void)
+{
+ return read_lmmr(UV2H_LB_BAU_SB_ACTIVATION_STATUS_2);
+}
+
+static void write_mmr_data_broadcast(int pnode, unsigned long mmr_image)
+{
+ write_gmmr(pnode, UVH_BAU_DATA_BROADCAST, mmr_image);
+}
+
+static void write_mmr_descriptor_base(int pnode, unsigned long mmr_image)
+{
+ write_gmmr(pnode, UVH_LB_BAU_SB_DESCRIPTOR_BASE, mmr_image);
+}
+
+static void write_mmr_activation(unsigned long index)
+{
+ write_lmmr(UVH_LB_BAU_SB_ACTIVATION_CONTROL, index);
+}
+
+static void write_gmmr_activation(int pnode, unsigned long mmr_image)
+{
+ write_gmmr(pnode, UVH_LB_BAU_SB_ACTIVATION_CONTROL, mmr_image);
+}
+
+static void write_mmr_payload_first(int pnode, unsigned long mmr_image)
+{
+ write_gmmr(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST, mmr_image);
+}
+
+static void write_mmr_payload_tail(int pnode, unsigned long mmr_image)
+{
+ write_gmmr(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL, mmr_image);
+}
+
+static void write_mmr_payload_last(int pnode, unsigned long mmr_image)
+{
+ write_gmmr(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST, mmr_image);
+}
+
+static void write_mmr_misc_control(int pnode, unsigned long mmr_image)
+{
+ write_gmmr(pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image);
+}
+
+static unsigned long read_mmr_misc_control(int pnode)
+{
+ return read_gmmr(pnode, UVH_LB_BAU_MISC_CONTROL);
+}
+
+static void write_mmr_sw_ack(unsigned long mr)
+{
+ uv_write_local_mmr(UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, mr);
+}
+
+static unsigned long read_mmr_sw_ack(void)
+{
+ return read_lmmr(UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE);
+}
+
+static unsigned long read_gmmr_sw_ack(int pnode)
+{
+ return read_gmmr(pnode, UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE);
+}
+
+static void write_mmr_data_config(int pnode, unsigned long mr)
+{
+ uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG, mr);
+}
+
+static inline int bau_uvhub_isset(int uvhub, struct bau_targ_hubmask *dstp)
{
return constant_test_bit(uvhub, &dstp->bits[0]);
}
-static inline void bau_uvhub_set(int pnode, struct bau_target_uvhubmask *dstp)
+static inline void bau_uvhub_set(int pnode, struct bau_targ_hubmask *dstp)
{
__set_bit(pnode, &dstp->bits[0]);
}
-static inline void bau_uvhubs_clear(struct bau_target_uvhubmask *dstp,
+static inline void bau_uvhubs_clear(struct bau_targ_hubmask *dstp,
int nbits)
{
bitmap_zero(&dstp->bits[0], nbits);
}
-static inline int bau_uvhub_weight(struct bau_target_uvhubmask *dstp)
+static inline int bau_uvhub_weight(struct bau_targ_hubmask *dstp)
{
return bitmap_weight((unsigned long *)&dstp->bits[0],
UV_DISTRIBUTION_SIZE);
@@ -457,9 +620,6 @@ static inline void bau_cpubits_clear(struct bau_local_cpumask *dstp, int nbits)
bitmap_zero(&dstp->bits, nbits);
}
-#define cpubit_isset(cpu, bau_local_cpumask) \
- test_bit((cpu), (bau_local_cpumask).bits)
-
extern void uv_bau_message_intr1(void);
extern void uv_bau_timeout_intr1(void);
@@ -467,7 +627,7 @@ struct atomic_short {
short counter;
};
-/**
+/*
* atomic_read_short - read a short atomic variable
* @v: pointer of type atomic_short
*
@@ -478,14 +638,14 @@ static inline int atomic_read_short(const struct atomic_short *v)
return v->counter;
}
-/**
- * atomic_add_short_return - add and return a short int
+/*
+ * atom_asr - add and return a short int
* @i: short value to add
* @v: pointer of type atomic_short
*
* Atomically adds @i to @v and returns @i + @v
*/
-static inline int atomic_add_short_return(short i, struct atomic_short *v)
+static inline int atom_asr(short i, struct atomic_short *v)
{
short __i = i;
asm volatile(LOCK_PREFIX "xaddw %0, %1"
@@ -494,4 +654,26 @@ static inline int atomic_add_short_return(short i, struct atomic_short *v)
return i + __i;
}
+/*
+ * conditionally add 1 to *v, unless *v is >= u
+ * return 0 if we cannot add 1 to *v because it is >= u
+ * return 1 if we can add 1 to *v because it is < u
+ * the add is atomic
+ *
+ * This is close to atomic_add_unless(), but this allows the 'u' value
+ * to be lowered below the current 'v'. atomic_add_unless can only stop
+ * on equal.
+ */
+static inline int atomic_inc_unless_ge(spinlock_t *lock, atomic_t *v, int u)
+{
+ spin_lock(lock);
+ if (atomic_read(v) >= u) {
+ spin_unlock(lock);
+ return 0;
+ }
+ atomic_inc(v);
+ spin_unlock(lock);
+ return 1;
+}
+
#endif /* _ASM_X86_UV_UV_BAU_H */
diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h
index 4298002d0c83..f26544a15214 100644
--- a/arch/x86/include/asm/uv/uv_hub.h
+++ b/arch/x86/include/asm/uv/uv_hub.h
@@ -77,8 +77,9 @@
*
* 1111110000000000
* 5432109876543210
- * pppppppppplc0cch Nehalem-EX
- * ppppppppplcc0cch Westmere-EX
+ * pppppppppplc0cch Nehalem-EX (12 bits in hdw reg)
+ * ppppppppplcc0cch Westmere-EX (12 bits in hdw reg)
+ * pppppppppppcccch SandyBridge (15 bits in hdw reg)
* sssssssssss
*
* p = pnode bits
@@ -87,7 +88,7 @@
* h = hyperthread
* s = bits that are in the SOCKET_ID CSR
*
- * Note: Processor only supports 12 bits in the APICID register. The ACPI
+ * Note: Processor may support fewer bits in the APICID register. The ACPI
* tables hold all 16 bits. Software needs to be aware of this.
*
* Unless otherwise specified, all references to APICID refer to
@@ -138,6 +139,8 @@ struct uv_hub_info_s {
unsigned long global_mmr_base;
unsigned long gpa_mask;
unsigned int gnode_extra;
+ unsigned char hub_revision;
+ unsigned char apic_pnode_shift;
unsigned long gnode_upper;
unsigned long lowmem_remap_top;
unsigned long lowmem_remap_base;
@@ -149,13 +152,31 @@ struct uv_hub_info_s {
unsigned char m_val;
unsigned char n_val;
struct uv_scir_s scir;
- unsigned char apic_pnode_shift;
};
DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
#define uv_hub_info (&__get_cpu_var(__uv_hub_info))
#define uv_cpu_hub_info(cpu) (&per_cpu(__uv_hub_info, cpu))
+/*
+ * Hub revisions less than UV2_HUB_REVISION_BASE are UV1 hubs. All UV2
+ * hubs have revision numbers greater than or equal to UV2_HUB_REVISION_BASE.
+ * This is a software convention - NOT the hardware revision numbers in
+ * the hub chip.
+ */
+#define UV1_HUB_REVISION_BASE 1
+#define UV2_HUB_REVISION_BASE 3
+
+static inline int is_uv1_hub(void)
+{
+ return uv_hub_info->hub_revision < UV2_HUB_REVISION_BASE;
+}
+
+static inline int is_uv2_hub(void)
+{
+ return uv_hub_info->hub_revision >= UV2_HUB_REVISION_BASE;
+}
+
union uvh_apicid {
unsigned long v;
struct uvh_apicid_s {
@@ -180,11 +201,25 @@ union uvh_apicid {
#define UV_PNODE_TO_GNODE(p) ((p) |uv_hub_info->gnode_extra)
#define UV_PNODE_TO_NASID(p) (UV_PNODE_TO_GNODE(p) << 1)
-#define UV_LOCAL_MMR_BASE 0xf4000000UL
-#define UV_GLOBAL_MMR32_BASE 0xf8000000UL
+#define UV1_LOCAL_MMR_BASE 0xf4000000UL
+#define UV1_GLOBAL_MMR32_BASE 0xf8000000UL
+#define UV1_LOCAL_MMR_SIZE (64UL * 1024 * 1024)
+#define UV1_GLOBAL_MMR32_SIZE (64UL * 1024 * 1024)
+
+#define UV2_LOCAL_MMR_BASE 0xfa000000UL
+#define UV2_GLOBAL_MMR32_BASE 0xfc000000UL
+#define UV2_LOCAL_MMR_SIZE (32UL * 1024 * 1024)
+#define UV2_GLOBAL_MMR32_SIZE (32UL * 1024 * 1024)
+
+#define UV_LOCAL_MMR_BASE (is_uv1_hub() ? UV1_LOCAL_MMR_BASE \
+ : UV2_LOCAL_MMR_BASE)
+#define UV_GLOBAL_MMR32_BASE (is_uv1_hub() ? UV1_GLOBAL_MMR32_BASE \
+ : UV2_GLOBAL_MMR32_BASE)
+#define UV_LOCAL_MMR_SIZE (is_uv1_hub() ? UV1_LOCAL_MMR_SIZE : \
+ UV2_LOCAL_MMR_SIZE)
+#define UV_GLOBAL_MMR32_SIZE (is_uv1_hub() ? UV1_GLOBAL_MMR32_SIZE :\
+ UV2_GLOBAL_MMR32_SIZE)
#define UV_GLOBAL_MMR64_BASE (uv_hub_info->global_mmr_base)
-#define UV_LOCAL_MMR_SIZE (64UL * 1024 * 1024)
-#define UV_GLOBAL_MMR32_SIZE (64UL * 1024 * 1024)
#define UV_GLOBAL_GRU_MMR_BASE 0x4000000
@@ -301,6 +336,17 @@ static inline int uv_apicid_to_pnode(int apicid)
}
/*
+ * Convert an apicid to the socket number on the blade
+ */
+static inline int uv_apicid_to_socket(int apicid)
+{
+ if (is_uv1_hub())
+ return (apicid >> (uv_hub_info->apic_pnode_shift - 1)) & 1;
+ else
+ return 0;
+}
+
+/*
* Access global MMRs using the low memory MMR32 space. This region supports
* faster MMR access but not all MMRs are accessible in this space.
*/
@@ -519,14 +565,13 @@ static inline void uv_hub_send_ipi(int pnode, int apicid, int vector)
/*
* Get the minimum revision number of the hub chips within the partition.
- * 1 - initial rev 1.0 silicon
- * 2 - rev 2.0 production silicon
+ * 1 - UV1 rev 1.0 initial silicon
+ * 2 - UV1 rev 2.0 production silicon
+ * 3 - UV2 rev 1.0 initial silicon
*/
static inline int uv_get_min_hub_revision_id(void)
{
- extern int uv_min_hub_revision_id;
-
- return uv_min_hub_revision_id;
+ return uv_hub_info->hub_revision;
}
#endif /* CONFIG_X86_64 */
diff --git a/arch/x86/include/asm/uv/uv_mmrs.h b/arch/x86/include/asm/uv/uv_mmrs.h
index f5bb64a823d7..4be52c863448 100644
--- a/arch/x86/include/asm/uv/uv_mmrs.h
+++ b/arch/x86/include/asm/uv/uv_mmrs.h
@@ -11,13 +11,64 @@
#ifndef _ASM_X86_UV_UV_MMRS_H
#define _ASM_X86_UV_UV_MMRS_H
+/*
+ * This file contains MMR definitions for both UV1 & UV2 hubs.
+ *
+ * In general, MMR addresses and structures are identical on both hubs.
+ * These MMRs are identified as:
+ * #define UVH_xxx <address>
+ * union uvh_xxx {
+ * unsigned long v;
+ * struct uvh_int_cmpd_s {
+ * } s;
+ * };
+ *
+ * If the MMR exists on both hub type but has different addresses or
+ * contents, the MMR definition is similar to:
+ * #define UV1H_xxx <uv1 address>
+ * #define UV2H_xxx <uv2address>
+ * #define UVH_xxx (is_uv1_hub() ? UV1H_xxx : UV2H_xxx)
+ * union uvh_xxx {
+ * unsigned long v;
+ * struct uv1h_int_cmpd_s { (Common fields only)
+ * } s;
+ * struct uv1h_int_cmpd_s { (Full UV1 definition)
+ * } s1;
+ * struct uv2h_int_cmpd_s { (Full UV2 definition)
+ * } s2;
+ * };
+ *
+ * Only essential difference are enumerated. For example, if the address is
+ * the same for both UV1 & UV2, only a single #define is generated. Likewise,
+ * if the contents is the same for both hubs, only the "s" structure is
+ * generated.
+ *
+ * If the MMR exists on ONLY 1 type of hub, no generic definition is
+ * generated:
+ * #define UVnH_xxx <uvn address>
+ * union uvnh_xxx {
+ * unsigned long v;
+ * struct uvh_int_cmpd_s {
+ * } sn;
+ * };
+ */
+
#define UV_MMR_ENABLE (1UL << 63)
+#define UV1_HUB_PART_NUMBER 0x88a5
+#define UV2_HUB_PART_NUMBER 0x8eb8
+
+/* Compat: if this #define is present, UV headers support UV2 */
+#define UV2_HUB_IS_SUPPORTED 1
+
+/* KABI compat: if this #define is present, KABI hacks are present */
+#define UV2_HUB_KABI_HACKS 1
+
/* ========================================================================= */
/* UVH_BAU_DATA_BROADCAST */
/* ========================================================================= */
#define UVH_BAU_DATA_BROADCAST 0x61688UL
-#define UVH_BAU_DATA_BROADCAST_32 0x0440
+#define UVH_BAU_DATA_BROADCAST_32 0x440
#define UVH_BAU_DATA_BROADCAST_ENABLE_SHFT 0
#define UVH_BAU_DATA_BROADCAST_ENABLE_MASK 0x0000000000000001UL
@@ -34,7 +85,7 @@ union uvh_bau_data_broadcast_u {
/* UVH_BAU_DATA_CONFIG */
/* ========================================================================= */
#define UVH_BAU_DATA_CONFIG 0x61680UL
-#define UVH_BAU_DATA_CONFIG_32 0x0438
+#define UVH_BAU_DATA_CONFIG_32 0x438
#define UVH_BAU_DATA_CONFIG_VECTOR_SHFT 0
#define UVH_BAU_DATA_CONFIG_VECTOR_MASK 0x00000000000000ffUL
@@ -73,125 +124,245 @@ union uvh_bau_data_config_u {
/* UVH_EVENT_OCCURRED0 */
/* ========================================================================= */
#define UVH_EVENT_OCCURRED0 0x70000UL
-#define UVH_EVENT_OCCURRED0_32 0x005e8
-
-#define UVH_EVENT_OCCURRED0_LB_HCERR_SHFT 0
-#define UVH_EVENT_OCCURRED0_LB_HCERR_MASK 0x0000000000000001UL
-#define UVH_EVENT_OCCURRED0_GR0_HCERR_SHFT 1
-#define UVH_EVENT_OCCURRED0_GR0_HCERR_MASK 0x0000000000000002UL
-#define UVH_EVENT_OCCURRED0_GR1_HCERR_SHFT 2
-#define UVH_EVENT_OCCURRED0_GR1_HCERR_MASK 0x0000000000000004UL
-#define UVH_EVENT_OCCURRED0_LH_HCERR_SHFT 3
-#define UVH_EVENT_OCCURRED0_LH_HCERR_MASK 0x0000000000000008UL
-#define UVH_EVENT_OCCURRED0_RH_HCERR_SHFT 4
-#define UVH_EVENT_OCCURRED0_RH_HCERR_MASK 0x0000000000000010UL
-#define UVH_EVENT_OCCURRED0_XN_HCERR_SHFT 5
-#define UVH_EVENT_OCCURRED0_XN_HCERR_MASK 0x0000000000000020UL
-#define UVH_EVENT_OCCURRED0_SI_HCERR_SHFT 6
-#define UVH_EVENT_OCCURRED0_SI_HCERR_MASK 0x0000000000000040UL
-#define UVH_EVENT_OCCURRED0_LB_AOERR0_SHFT 7
-#define UVH_EVENT_OCCURRED0_LB_AOERR0_MASK 0x0000000000000080UL
-#define UVH_EVENT_OCCURRED0_GR0_AOERR0_SHFT 8
-#define UVH_EVENT_OCCURRED0_GR0_AOERR0_MASK 0x0000000000000100UL
-#define UVH_EVENT_OCCURRED0_GR1_AOERR0_SHFT 9
-#define UVH_EVENT_OCCURRED0_GR1_AOERR0_MASK 0x0000000000000200UL
-#define UVH_EVENT_OCCURRED0_LH_AOERR0_SHFT 10
-#define UVH_EVENT_OCCURRED0_LH_AOERR0_MASK 0x0000000000000400UL
-#define UVH_EVENT_OCCURRED0_RH_AOERR0_SHFT 11
-#define UVH_EVENT_OCCURRED0_RH_AOERR0_MASK 0x0000000000000800UL
-#define UVH_EVENT_OCCURRED0_XN_AOERR0_SHFT 12
-#define UVH_EVENT_OCCURRED0_XN_AOERR0_MASK 0x0000000000001000UL
-#define UVH_EVENT_OCCURRED0_SI_AOERR0_SHFT 13
-#define UVH_EVENT_OCCURRED0_SI_AOERR0_MASK 0x0000000000002000UL
-#define UVH_EVENT_OCCURRED0_LB_AOERR1_SHFT 14
-#define UVH_EVENT_OCCURRED0_LB_AOERR1_MASK 0x0000000000004000UL
-#define UVH_EVENT_OCCURRED0_GR0_AOERR1_SHFT 15
-#define UVH_EVENT_OCCURRED0_GR0_AOERR1_MASK 0x0000000000008000UL
-#define UVH_EVENT_OCCURRED0_GR1_AOERR1_SHFT 16
-#define UVH_EVENT_OCCURRED0_GR1_AOERR1_MASK 0x0000000000010000UL
-#define UVH_EVENT_OCCURRED0_LH_AOERR1_SHFT 17
-#define UVH_EVENT_OCCURRED0_LH_AOERR1_MASK 0x0000000000020000UL
-#define UVH_EVENT_OCCURRED0_RH_AOERR1_SHFT 18
-#define UVH_EVENT_OCCURRED0_RH_AOERR1_MASK 0x0000000000040000UL
-#define UVH_EVENT_OCCURRED0_XN_AOERR1_SHFT 19
-#define UVH_EVENT_OCCURRED0_XN_AOERR1_MASK 0x0000000000080000UL
-#define UVH_EVENT_OCCURRED0_SI_AOERR1_SHFT 20
-#define UVH_EVENT_OCCURRED0_SI_AOERR1_MASK 0x0000000000100000UL
-#define UVH_EVENT_OCCURRED0_RH_VPI_INT_SHFT 21
-#define UVH_EVENT_OCCURRED0_RH_VPI_INT_MASK 0x0000000000200000UL
-#define UVH_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_SHFT 22
-#define UVH_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_MASK 0x0000000000400000UL
-#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_0_SHFT 23
-#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_0_MASK 0x0000000000800000UL
-#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_1_SHFT 24
-#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_1_MASK 0x0000000001000000UL
-#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_2_SHFT 25
-#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_2_MASK 0x0000000002000000UL
-#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_3_SHFT 26
-#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_3_MASK 0x0000000004000000UL
-#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_4_SHFT 27
-#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_4_MASK 0x0000000008000000UL
-#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_5_SHFT 28
-#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_5_MASK 0x0000000010000000UL
-#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_6_SHFT 29
-#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_6_MASK 0x0000000020000000UL
-#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_7_SHFT 30
-#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_7_MASK 0x0000000040000000UL
-#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_8_SHFT 31
-#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_8_MASK 0x0000000080000000UL
-#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_9_SHFT 32
-#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_9_MASK 0x0000000100000000UL
-#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_10_SHFT 33
-#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_10_MASK 0x0000000200000000UL
-#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_11_SHFT 34
-#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_11_MASK 0x0000000400000000UL
-#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_12_SHFT 35
-#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_12_MASK 0x0000000800000000UL
-#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_13_SHFT 36
-#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_13_MASK 0x0000001000000000UL
-#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_14_SHFT 37
-#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_14_MASK 0x0000002000000000UL
-#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_15_SHFT 38
-#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_15_MASK 0x0000004000000000UL
-#define UVH_EVENT_OCCURRED0_L1_NMI_INT_SHFT 39
-#define UVH_EVENT_OCCURRED0_L1_NMI_INT_MASK 0x0000008000000000UL
-#define UVH_EVENT_OCCURRED0_STOP_CLOCK_SHFT 40
-#define UVH_EVENT_OCCURRED0_STOP_CLOCK_MASK 0x0000010000000000UL
-#define UVH_EVENT_OCCURRED0_ASIC_TO_L1_SHFT 41
-#define UVH_EVENT_OCCURRED0_ASIC_TO_L1_MASK 0x0000020000000000UL
-#define UVH_EVENT_OCCURRED0_L1_TO_ASIC_SHFT 42
-#define UVH_EVENT_OCCURRED0_L1_TO_ASIC_MASK 0x0000040000000000UL
-#define UVH_EVENT_OCCURRED0_LTC_INT_SHFT 43
-#define UVH_EVENT_OCCURRED0_LTC_INT_MASK 0x0000080000000000UL
-#define UVH_EVENT_OCCURRED0_LA_SEQ_TRIGGER_SHFT 44
-#define UVH_EVENT_OCCURRED0_LA_SEQ_TRIGGER_MASK 0x0000100000000000UL
-#define UVH_EVENT_OCCURRED0_IPI_INT_SHFT 45
-#define UVH_EVENT_OCCURRED0_IPI_INT_MASK 0x0000200000000000UL
-#define UVH_EVENT_OCCURRED0_EXTIO_INT0_SHFT 46
-#define UVH_EVENT_OCCURRED0_EXTIO_INT0_MASK 0x0000400000000000UL
-#define UVH_EVENT_OCCURRED0_EXTIO_INT1_SHFT 47
-#define UVH_EVENT_OCCURRED0_EXTIO_INT1_MASK 0x0000800000000000UL
-#define UVH_EVENT_OCCURRED0_EXTIO_INT2_SHFT 48
-#define UVH_EVENT_OCCURRED0_EXTIO_INT2_MASK 0x0001000000000000UL
-#define UVH_EVENT_OCCURRED0_EXTIO_INT3_SHFT 49
-#define UVH_EVENT_OCCURRED0_EXTIO_INT3_MASK 0x0002000000000000UL
-#define UVH_EVENT_OCCURRED0_PROFILE_INT_SHFT 50
-#define UVH_EVENT_OCCURRED0_PROFILE_INT_MASK 0x0004000000000000UL
-#define UVH_EVENT_OCCURRED0_RTC0_SHFT 51
-#define UVH_EVENT_OCCURRED0_RTC0_MASK 0x0008000000000000UL
-#define UVH_EVENT_OCCURRED0_RTC1_SHFT 52
-#define UVH_EVENT_OCCURRED0_RTC1_MASK 0x0010000000000000UL
-#define UVH_EVENT_OCCURRED0_RTC2_SHFT 53
-#define UVH_EVENT_OCCURRED0_RTC2_MASK 0x0020000000000000UL
-#define UVH_EVENT_OCCURRED0_RTC3_SHFT 54
-#define UVH_EVENT_OCCURRED0_RTC3_MASK 0x0040000000000000UL
-#define UVH_EVENT_OCCURRED0_BAU_DATA_SHFT 55
-#define UVH_EVENT_OCCURRED0_BAU_DATA_MASK 0x0080000000000000UL
-#define UVH_EVENT_OCCURRED0_POWER_MANAGEMENT_REQ_SHFT 56
-#define UVH_EVENT_OCCURRED0_POWER_MANAGEMENT_REQ_MASK 0x0100000000000000UL
+#define UVH_EVENT_OCCURRED0_32 0x5e8
+
+#define UV1H_EVENT_OCCURRED0_LB_HCERR_SHFT 0
+#define UV1H_EVENT_OCCURRED0_LB_HCERR_MASK 0x0000000000000001UL
+#define UV1H_EVENT_OCCURRED0_GR0_HCERR_SHFT 1
+#define UV1H_EVENT_OCCURRED0_GR0_HCERR_MASK 0x0000000000000002UL
+#define UV1H_EVENT_OCCURRED0_GR1_HCERR_SHFT 2
+#define UV1H_EVENT_OCCURRED0_GR1_HCERR_MASK 0x0000000000000004UL
+#define UV1H_EVENT_OCCURRED0_LH_HCERR_SHFT 3
+#define UV1H_EVENT_OCCURRED0_LH_HCERR_MASK 0x0000000000000008UL
+#define UV1H_EVENT_OCCURRED0_RH_HCERR_SHFT 4
+#define UV1H_EVENT_OCCURRED0_RH_HCERR_MASK 0x0000000000000010UL
+#define UV1H_EVENT_OCCURRED0_XN_HCERR_SHFT 5
+#define UV1H_EVENT_OCCURRED0_XN_HCERR_MASK 0x0000000000000020UL
+#define UV1H_EVENT_OCCURRED0_SI_HCERR_SHFT 6
+#define UV1H_EVENT_OCCURRED0_SI_HCERR_MASK 0x0000000000000040UL
+#define UV1H_EVENT_OCCURRED0_LB_AOERR0_SHFT 7
+#define UV1H_EVENT_OCCURRED0_LB_AOERR0_MASK 0x0000000000000080UL
+#define UV1H_EVENT_OCCURRED0_GR0_AOERR0_SHFT 8
+#define UV1H_EVENT_OCCURRED0_GR0_AOERR0_MASK 0x0000000000000100UL
+#define UV1H_EVENT_OCCURRED0_GR1_AOERR0_SHFT 9
+#define UV1H_EVENT_OCCURRED0_GR1_AOERR0_MASK 0x0000000000000200UL
+#define UV1H_EVENT_OCCURRED0_LH_AOERR0_SHFT 10
+#define UV1H_EVENT_OCCURRED0_LH_AOERR0_MASK 0x0000000000000400UL
+#define UV1H_EVENT_OCCURRED0_RH_AOERR0_SHFT 11
+#define UV1H_EVENT_OCCURRED0_RH_AOERR0_MASK 0x0000000000000800UL
+#define UV1H_EVENT_OCCURRED0_XN_AOERR0_SHFT 12
+#define UV1H_EVENT_OCCURRED0_XN_AOERR0_MASK 0x0000000000001000UL
+#define UV1H_EVENT_OCCURRED0_SI_AOERR0_SHFT 13
+#define UV1H_EVENT_OCCURRED0_SI_AOERR0_MASK 0x0000000000002000UL
+#define UV1H_EVENT_OCCURRED0_LB_AOERR1_SHFT 14
+#define UV1H_EVENT_OCCURRED0_LB_AOERR1_MASK 0x0000000000004000UL
+#define UV1H_EVENT_OCCURRED0_GR0_AOERR1_SHFT 15
+#define UV1H_EVENT_OCCURRED0_GR0_AOERR1_MASK 0x0000000000008000UL
+#define UV1H_EVENT_OCCURRED0_GR1_AOERR1_SHFT 16
+#define UV1H_EVENT_OCCURRED0_GR1_AOERR1_MASK 0x0000000000010000UL
+#define UV1H_EVENT_OCCURRED0_LH_AOERR1_SHFT 17
+#define UV1H_EVENT_OCCURRED0_LH_AOERR1_MASK 0x0000000000020000UL
+#define UV1H_EVENT_OCCURRED0_RH_AOERR1_SHFT 18
+#define UV1H_EVENT_OCCURRED0_RH_AOERR1_MASK 0x0000000000040000UL
+#define UV1H_EVENT_OCCURRED0_XN_AOERR1_SHFT 19
+#define UV1H_EVENT_OCCURRED0_XN_AOERR1_MASK 0x0000000000080000UL
+#define UV1H_EVENT_OCCURRED0_SI_AOERR1_SHFT 20
+#define UV1H_EVENT_OCCURRED0_SI_AOERR1_MASK 0x0000000000100000UL
+#define UV1H_EVENT_OCCURRED0_RH_VPI_INT_SHFT 21
+#define UV1H_EVENT_OCCURRED0_RH_VPI_INT_MASK 0x0000000000200000UL
+#define UV1H_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_SHFT 22
+#define UV1H_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_MASK 0x0000000000400000UL
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_0_SHFT 23
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_0_MASK 0x0000000000800000UL
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_1_SHFT 24
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_1_MASK 0x0000000001000000UL
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_2_SHFT 25
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_2_MASK 0x0000000002000000UL
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_3_SHFT 26
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_3_MASK 0x0000000004000000UL
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_4_SHFT 27
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_4_MASK 0x0000000008000000UL
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_5_SHFT 28
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_5_MASK 0x0000000010000000UL
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_6_SHFT 29
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_6_MASK 0x0000000020000000UL
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_7_SHFT 30
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_7_MASK 0x0000000040000000UL
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_8_SHFT 31
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_8_MASK 0x0000000080000000UL
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_9_SHFT 32
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_9_MASK 0x0000000100000000UL
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_10_SHFT 33
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_10_MASK 0x0000000200000000UL
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_11_SHFT 34
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_11_MASK 0x0000000400000000UL
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_12_SHFT 35
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_12_MASK 0x0000000800000000UL
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_13_SHFT 36
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_13_MASK 0x0000001000000000UL
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_14_SHFT 37
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_14_MASK 0x0000002000000000UL
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_15_SHFT 38
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_15_MASK 0x0000004000000000UL
+#define UV1H_EVENT_OCCURRED0_L1_NMI_INT_SHFT 39
+#define UV1H_EVENT_OCCURRED0_L1_NMI_INT_MASK 0x0000008000000000UL
+#define UV1H_EVENT_OCCURRED0_STOP_CLOCK_SHFT 40
+#define UV1H_EVENT_OCCURRED0_STOP_CLOCK_MASK 0x0000010000000000UL
+#define UV1H_EVENT_OCCURRED0_ASIC_TO_L1_SHFT 41
+#define UV1H_EVENT_OCCURRED0_ASIC_TO_L1_MASK 0x0000020000000000UL
+#define UV1H_EVENT_OCCURRED0_L1_TO_ASIC_SHFT 42
+#define UV1H_EVENT_OCCURRED0_L1_TO_ASIC_MASK 0x0000040000000000UL
+#define UV1H_EVENT_OCCURRED0_LTC_INT_SHFT 43
+#define UV1H_EVENT_OCCURRED0_LTC_INT_MASK 0x0000080000000000UL
+#define UV1H_EVENT_OCCURRED0_LA_SEQ_TRIGGER_SHFT 44
+#define UV1H_EVENT_OCCURRED0_LA_SEQ_TRIGGER_MASK 0x0000100000000000UL
+#define UV1H_EVENT_OCCURRED0_IPI_INT_SHFT 45
+#define UV1H_EVENT_OCCURRED0_IPI_INT_MASK 0x0000200000000000UL
+#define UV1H_EVENT_OCCURRED0_EXTIO_INT0_SHFT 46
+#define UV1H_EVENT_OCCURRED0_EXTIO_INT0_MASK 0x0000400000000000UL
+#define UV1H_EVENT_OCCURRED0_EXTIO_INT1_SHFT 47
+#define UV1H_EVENT_OCCURRED0_EXTIO_INT1_MASK 0x0000800000000000UL
+#define UV1H_EVENT_OCCURRED0_EXTIO_INT2_SHFT 48
+#define UV1H_EVENT_OCCURRED0_EXTIO_INT2_MASK 0x0001000000000000UL
+#define UV1H_EVENT_OCCURRED0_EXTIO_INT3_SHFT 49
+#define UV1H_EVENT_OCCURRED0_EXTIO_INT3_MASK 0x0002000000000000UL
+#define UV1H_EVENT_OCCURRED0_PROFILE_INT_SHFT 50
+#define UV1H_EVENT_OCCURRED0_PROFILE_INT_MASK 0x0004000000000000UL
+#define UV1H_EVENT_OCCURRED0_RTC0_SHFT 51
+#define UV1H_EVENT_OCCURRED0_RTC0_MASK 0x0008000000000000UL
+#define UV1H_EVENT_OCCURRED0_RTC1_SHFT 52
+#define UV1H_EVENT_OCCURRED0_RTC1_MASK 0x0010000000000000UL
+#define UV1H_EVENT_OCCURRED0_RTC2_SHFT 53
+#define UV1H_EVENT_OCCURRED0_RTC2_MASK 0x0020000000000000UL
+#define UV1H_EVENT_OCCURRED0_RTC3_SHFT 54
+#define UV1H_EVENT_OCCURRED0_RTC3_MASK 0x0040000000000000UL
+#define UV1H_EVENT_OCCURRED0_BAU_DATA_SHFT 55
+#define UV1H_EVENT_OCCURRED0_BAU_DATA_MASK 0x0080000000000000UL
+#define UV1H_EVENT_OCCURRED0_POWER_MANAGEMENT_REQ_SHFT 56
+#define UV1H_EVENT_OCCURRED0_POWER_MANAGEMENT_REQ_MASK 0x0100000000000000UL
+
+#define UV2H_EVENT_OCCURRED0_LB_HCERR_SHFT 0
+#define UV2H_EVENT_OCCURRED0_LB_HCERR_MASK 0x0000000000000001UL
+#define UV2H_EVENT_OCCURRED0_QP_HCERR_SHFT 1
+#define UV2H_EVENT_OCCURRED0_QP_HCERR_MASK 0x0000000000000002UL
+#define UV2H_EVENT_OCCURRED0_RH_HCERR_SHFT 2
+#define UV2H_EVENT_OCCURRED0_RH_HCERR_MASK 0x0000000000000004UL
+#define UV2H_EVENT_OCCURRED0_LH0_HCERR_SHFT 3
+#define UV2H_EVENT_OCCURRED0_LH0_HCERR_MASK 0x0000000000000008UL
+#define UV2H_EVENT_OCCURRED0_LH1_HCERR_SHFT 4
+#define UV2H_EVENT_OCCURRED0_LH1_HCERR_MASK 0x0000000000000010UL
+#define UV2H_EVENT_OCCURRED0_GR0_HCERR_SHFT 5
+#define UV2H_EVENT_OCCURRED0_GR0_HCERR_MASK 0x0000000000000020UL
+#define UV2H_EVENT_OCCURRED0_GR1_HCERR_SHFT 6
+#define UV2H_EVENT_OCCURRED0_GR1_HCERR_MASK 0x0000000000000040UL
+#define UV2H_EVENT_OCCURRED0_NI0_HCERR_SHFT 7
+#define UV2H_EVENT_OCCURRED0_NI0_HCERR_MASK 0x0000000000000080UL
+#define UV2H_EVENT_OCCURRED0_NI1_HCERR_SHFT 8
+#define UV2H_EVENT_OCCURRED0_NI1_HCERR_MASK 0x0000000000000100UL
+#define UV2H_EVENT_OCCURRED0_LB_AOERR0_SHFT 9
+#define UV2H_EVENT_OCCURRED0_LB_AOERR0_MASK 0x0000000000000200UL
+#define UV2H_EVENT_OCCURRED0_QP_AOERR0_SHFT 10
+#define UV2H_EVENT_OCCURRED0_QP_AOERR0_MASK 0x0000000000000400UL
+#define UV2H_EVENT_OCCURRED0_RH_AOERR0_SHFT 11
+#define UV2H_EVENT_OCCURRED0_RH_AOERR0_MASK 0x0000000000000800UL
+#define UV2H_EVENT_OCCURRED0_LH0_AOERR0_SHFT 12
+#define UV2H_EVENT_OCCURRED0_LH0_AOERR0_MASK 0x0000000000001000UL
+#define UV2H_EVENT_OCCURRED0_LH1_AOERR0_SHFT 13
+#define UV2H_EVENT_OCCURRED0_LH1_AOERR0_MASK 0x0000000000002000UL
+#define UV2H_EVENT_OCCURRED0_GR0_AOERR0_SHFT 14
+#define UV2H_EVENT_OCCURRED0_GR0_AOERR0_MASK 0x0000000000004000UL
+#define UV2H_EVENT_OCCURRED0_GR1_AOERR0_SHFT 15
+#define UV2H_EVENT_OCCURRED0_GR1_AOERR0_MASK 0x0000000000008000UL
+#define UV2H_EVENT_OCCURRED0_XB_AOERR0_SHFT 16
+#define UV2H_EVENT_OCCURRED0_XB_AOERR0_MASK 0x0000000000010000UL
+#define UV2H_EVENT_OCCURRED0_RT_AOERR0_SHFT 17
+#define UV2H_EVENT_OCCURRED0_RT_AOERR0_MASK 0x0000000000020000UL
+#define UV2H_EVENT_OCCURRED0_NI0_AOERR0_SHFT 18
+#define UV2H_EVENT_OCCURRED0_NI0_AOERR0_MASK 0x0000000000040000UL
+#define UV2H_EVENT_OCCURRED0_NI1_AOERR0_SHFT 19
+#define UV2H_EVENT_OCCURRED0_NI1_AOERR0_MASK 0x0000000000080000UL
+#define UV2H_EVENT_OCCURRED0_LB_AOERR1_SHFT 20
+#define UV2H_EVENT_OCCURRED0_LB_AOERR1_MASK 0x0000000000100000UL
+#define UV2H_EVENT_OCCURRED0_QP_AOERR1_SHFT 21
+#define UV2H_EVENT_OCCURRED0_QP_AOERR1_MASK 0x0000000000200000UL
+#define UV2H_EVENT_OCCURRED0_RH_AOERR1_SHFT 22
+#define UV2H_EVENT_OCCURRED0_RH_AOERR1_MASK 0x0000000000400000UL
+#define UV2H_EVENT_OCCURRED0_LH0_AOERR1_SHFT 23
+#define UV2H_EVENT_OCCURRED0_LH0_AOERR1_MASK 0x0000000000800000UL
+#define UV2H_EVENT_OCCURRED0_LH1_AOERR1_SHFT 24
+#define UV2H_EVENT_OCCURRED0_LH1_AOERR1_MASK 0x0000000001000000UL
+#define UV2H_EVENT_OCCURRED0_GR0_AOERR1_SHFT 25
+#define UV2H_EVENT_OCCURRED0_GR0_AOERR1_MASK 0x0000000002000000UL
+#define UV2H_EVENT_OCCURRED0_GR1_AOERR1_SHFT 26
+#define UV2H_EVENT_OCCURRED0_GR1_AOERR1_MASK 0x0000000004000000UL
+#define UV2H_EVENT_OCCURRED0_XB_AOERR1_SHFT 27
+#define UV2H_EVENT_OCCURRED0_XB_AOERR1_MASK 0x0000000008000000UL
+#define UV2H_EVENT_OCCURRED0_RT_AOERR1_SHFT 28
+#define UV2H_EVENT_OCCURRED0_RT_AOERR1_MASK 0x0000000010000000UL
+#define UV2H_EVENT_OCCURRED0_NI0_AOERR1_SHFT 29
+#define UV2H_EVENT_OCCURRED0_NI0_AOERR1_MASK 0x0000000020000000UL
+#define UV2H_EVENT_OCCURRED0_NI1_AOERR1_SHFT 30
+#define UV2H_EVENT_OCCURRED0_NI1_AOERR1_MASK 0x0000000040000000UL
+#define UV2H_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_SHFT 31
+#define UV2H_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_MASK 0x0000000080000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_0_SHFT 32
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_0_MASK 0x0000000100000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_1_SHFT 33
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_1_MASK 0x0000000200000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_2_SHFT 34
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_2_MASK 0x0000000400000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_3_SHFT 35
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_3_MASK 0x0000000800000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_4_SHFT 36
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_4_MASK 0x0000001000000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_5_SHFT 37
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_5_MASK 0x0000002000000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_6_SHFT 38
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_6_MASK 0x0000004000000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_7_SHFT 39
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_7_MASK 0x0000008000000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_8_SHFT 40
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_8_MASK 0x0000010000000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_9_SHFT 41
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_9_MASK 0x0000020000000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_10_SHFT 42
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_10_MASK 0x0000040000000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_11_SHFT 43
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_11_MASK 0x0000080000000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_12_SHFT 44
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_12_MASK 0x0000100000000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_13_SHFT 45
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_13_MASK 0x0000200000000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_14_SHFT 46
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_14_MASK 0x0000400000000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_15_SHFT 47
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_15_MASK 0x0000800000000000UL
+#define UV2H_EVENT_OCCURRED0_L1_NMI_INT_SHFT 48
+#define UV2H_EVENT_OCCURRED0_L1_NMI_INT_MASK 0x0001000000000000UL
+#define UV2H_EVENT_OCCURRED0_STOP_CLOCK_SHFT 49
+#define UV2H_EVENT_OCCURRED0_STOP_CLOCK_MASK 0x0002000000000000UL
+#define UV2H_EVENT_OCCURRED0_ASIC_TO_L1_SHFT 50
+#define UV2H_EVENT_OCCURRED0_ASIC_TO_L1_MASK 0x0004000000000000UL
+#define UV2H_EVENT_OCCURRED0_L1_TO_ASIC_SHFT 51
+#define UV2H_EVENT_OCCURRED0_L1_TO_ASIC_MASK 0x0008000000000000UL
+#define UV2H_EVENT_OCCURRED0_LA_SEQ_TRIGGER_SHFT 52
+#define UV2H_EVENT_OCCURRED0_LA_SEQ_TRIGGER_MASK 0x0010000000000000UL
+#define UV2H_EVENT_OCCURRED0_IPI_INT_SHFT 53
+#define UV2H_EVENT_OCCURRED0_IPI_INT_MASK 0x0020000000000000UL
+#define UV2H_EVENT_OCCURRED0_EXTIO_INT0_SHFT 54
+#define UV2H_EVENT_OCCURRED0_EXTIO_INT0_MASK 0x0040000000000000UL
+#define UV2H_EVENT_OCCURRED0_EXTIO_INT1_SHFT 55
+#define UV2H_EVENT_OCCURRED0_EXTIO_INT1_MASK 0x0080000000000000UL
+#define UV2H_EVENT_OCCURRED0_EXTIO_INT2_SHFT 56
+#define UV2H_EVENT_OCCURRED0_EXTIO_INT2_MASK 0x0100000000000000UL
+#define UV2H_EVENT_OCCURRED0_EXTIO_INT3_SHFT 57
+#define UV2H_EVENT_OCCURRED0_EXTIO_INT3_MASK 0x0200000000000000UL
+#define UV2H_EVENT_OCCURRED0_PROFILE_INT_SHFT 58
+#define UV2H_EVENT_OCCURRED0_PROFILE_INT_MASK 0x0400000000000000UL
+
union uvh_event_occurred0_u {
unsigned long v;
- struct uvh_event_occurred0_s {
+ struct uv1h_event_occurred0_s {
unsigned long lb_hcerr : 1; /* RW, W1C */
unsigned long gr0_hcerr : 1; /* RW, W1C */
unsigned long gr1_hcerr : 1; /* RW, W1C */
@@ -250,14 +421,76 @@ union uvh_event_occurred0_u {
unsigned long bau_data : 1; /* RW, W1C */
unsigned long power_management_req : 1; /* RW, W1C */
unsigned long rsvd_57_63 : 7; /* */
- } s;
+ } s1;
+ struct uv2h_event_occurred0_s {
+ unsigned long lb_hcerr : 1; /* RW */
+ unsigned long qp_hcerr : 1; /* RW */
+ unsigned long rh_hcerr : 1; /* RW */
+ unsigned long lh0_hcerr : 1; /* RW */
+ unsigned long lh1_hcerr : 1; /* RW */
+ unsigned long gr0_hcerr : 1; /* RW */
+ unsigned long gr1_hcerr : 1; /* RW */
+ unsigned long ni0_hcerr : 1; /* RW */
+ unsigned long ni1_hcerr : 1; /* RW */
+ unsigned long lb_aoerr0 : 1; /* RW */
+ unsigned long qp_aoerr0 : 1; /* RW */
+ unsigned long rh_aoerr0 : 1; /* RW */
+ unsigned long lh0_aoerr0 : 1; /* RW */
+ unsigned long lh1_aoerr0 : 1; /* RW */
+ unsigned long gr0_aoerr0 : 1; /* RW */
+ unsigned long gr1_aoerr0 : 1; /* RW */
+ unsigned long xb_aoerr0 : 1; /* RW */
+ unsigned long rt_aoerr0 : 1; /* RW */
+ unsigned long ni0_aoerr0 : 1; /* RW */
+ unsigned long ni1_aoerr0 : 1; /* RW */
+ unsigned long lb_aoerr1 : 1; /* RW */
+ unsigned long qp_aoerr1 : 1; /* RW */
+ unsigned long rh_aoerr1 : 1; /* RW */
+ unsigned long lh0_aoerr1 : 1; /* RW */
+ unsigned long lh1_aoerr1 : 1; /* RW */
+ unsigned long gr0_aoerr1 : 1; /* RW */
+ unsigned long gr1_aoerr1 : 1; /* RW */
+ unsigned long xb_aoerr1 : 1; /* RW */
+ unsigned long rt_aoerr1 : 1; /* RW */
+ unsigned long ni0_aoerr1 : 1; /* RW */
+ unsigned long ni1_aoerr1 : 1; /* RW */
+ unsigned long system_shutdown_int : 1; /* RW */
+ unsigned long lb_irq_int_0 : 1; /* RW */
+ unsigned long lb_irq_int_1 : 1; /* RW */
+ unsigned long lb_irq_int_2 : 1; /* RW */
+ unsigned long lb_irq_int_3 : 1; /* RW */
+ unsigned long lb_irq_int_4 : 1; /* RW */
+ unsigned long lb_irq_int_5 : 1; /* RW */
+ unsigned long lb_irq_int_6 : 1; /* RW */
+ unsigned long lb_irq_int_7 : 1; /* RW */
+ unsigned long lb_irq_int_8 : 1; /* RW */
+ unsigned long lb_irq_int_9 : 1; /* RW */
+ unsigned long lb_irq_int_10 : 1; /* RW */
+ unsigned long lb_irq_int_11 : 1; /* RW */
+ unsigned long lb_irq_int_12 : 1; /* RW */
+ unsigned long lb_irq_int_13 : 1; /* RW */
+ unsigned long lb_irq_int_14 : 1; /* RW */
+ unsigned long lb_irq_int_15 : 1; /* RW */
+ unsigned long l1_nmi_int : 1; /* RW */
+ unsigned long stop_clock : 1; /* RW */
+ unsigned long asic_to_l1 : 1; /* RW */
+ unsigned long l1_to_asic : 1; /* RW */
+ unsigned long la_seq_trigger : 1; /* RW */
+ unsigned long ipi_int : 1; /* RW */
+ unsigned long extio_int0 : 1; /* RW */
+ unsigned long extio_int1 : 1; /* RW */
+ unsigned long extio_int2 : 1; /* RW */
+ unsigned long extio_int3 : 1; /* RW */
+ unsigned long profile_int : 1; /* RW */
+ unsigned long rsvd_59_63 : 5; /* */
+ } s2;
};
/* ========================================================================= */
/* UVH_EVENT_OCCURRED0_ALIAS */
/* ========================================================================= */
#define UVH_EVENT_OCCURRED0_ALIAS 0x0000000000070008UL
-#define UVH_EVENT_OCCURRED0_ALIAS_32 0x005f0
+#define UVH_EVENT_OCCURRED0_ALIAS_32 0x5f0
/* ========================================================================= */
/* UVH_GR0_TLB_INT0_CONFIG */
@@ -432,8 +665,16 @@ union uvh_int_cmpb_u {
/* ========================================================================= */
#define UVH_INT_CMPC 0x22100UL
-#define UVH_INT_CMPC_REAL_TIME_CMPC_SHFT 0
-#define UVH_INT_CMPC_REAL_TIME_CMPC_MASK 0x00ffffffffffffffUL
+#define UV1H_INT_CMPC_REAL_TIME_CMPC_SHFT 0
+#define UV2H_INT_CMPC_REAL_TIME_CMPC_SHFT 0
+#define UVH_INT_CMPC_REAL_TIME_CMPC_SHFT (is_uv1_hub() ? \
+ UV1H_INT_CMPC_REAL_TIME_CMPC_SHFT : \
+ UV2H_INT_CMPC_REAL_TIME_CMPC_SHFT)
+#define UV1H_INT_CMPC_REAL_TIME_CMPC_MASK 0xffffffffffffffUL
+#define UV2H_INT_CMPC_REAL_TIME_CMPC_MASK 0xffffffffffffffUL
+#define UVH_INT_CMPC_REAL_TIME_CMPC_MASK (is_uv1_hub() ? \
+ UV1H_INT_CMPC_REAL_TIME_CMPC_MASK : \
+ UV2H_INT_CMPC_REAL_TIME_CMPC_MASK)
union uvh_int_cmpc_u {
unsigned long v;
@@ -448,8 +689,16 @@ union uvh_int_cmpc_u {
/* ========================================================================= */
#define UVH_INT_CMPD 0x22180UL
-#define UVH_INT_CMPD_REAL_TIME_CMPD_SHFT 0
-#define UVH_INT_CMPD_REAL_TIME_CMPD_MASK 0x00ffffffffffffffUL
+#define UV1H_INT_CMPD_REAL_TIME_CMPD_SHFT 0
+#define UV2H_INT_CMPD_REAL_TIME_CMPD_SHFT 0
+#define UVH_INT_CMPD_REAL_TIME_CMPD_SHFT (is_uv1_hub() ? \
+ UV1H_INT_CMPD_REAL_TIME_CMPD_SHFT : \
+ UV2H_INT_CMPD_REAL_TIME_CMPD_SHFT)
+#define UV1H_INT_CMPD_REAL_TIME_CMPD_MASK 0xffffffffffffffUL
+#define UV2H_INT_CMPD_REAL_TIME_CMPD_MASK 0xffffffffffffffUL
+#define UVH_INT_CMPD_REAL_TIME_CMPD_MASK (is_uv1_hub() ? \
+ UV1H_INT_CMPD_REAL_TIME_CMPD_MASK : \
+ UV2H_INT_CMPD_REAL_TIME_CMPD_MASK)
union uvh_int_cmpd_u {
unsigned long v;
@@ -463,7 +712,7 @@ union uvh_int_cmpd_u {
/* UVH_IPI_INT */
/* ========================================================================= */
#define UVH_IPI_INT 0x60500UL
-#define UVH_IPI_INT_32 0x0348
+#define UVH_IPI_INT_32 0x348
#define UVH_IPI_INT_VECTOR_SHFT 0
#define UVH_IPI_INT_VECTOR_MASK 0x00000000000000ffUL
@@ -493,7 +742,7 @@ union uvh_ipi_int_u {
/* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST */
/* ========================================================================= */
#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST 0x320050UL
-#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_32 0x009c0
+#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_32 0x9c0
#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_ADDRESS_SHFT 4
#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_ADDRESS_MASK 0x000007fffffffff0UL
@@ -515,7 +764,7 @@ union uvh_lb_bau_intd_payload_queue_first_u {
/* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST */
/* ========================================================================= */
#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST 0x320060UL
-#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_32 0x009c8
+#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_32 0x9c8
#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_ADDRESS_SHFT 4
#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_ADDRESS_MASK 0x000007fffffffff0UL
@@ -533,7 +782,7 @@ union uvh_lb_bau_intd_payload_queue_last_u {
/* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL */
/* ========================================================================= */
#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL 0x320070UL
-#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_32 0x009d0
+#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_32 0x9d0
#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_ADDRESS_SHFT 4
#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_ADDRESS_MASK 0x000007fffffffff0UL
@@ -551,7 +800,7 @@ union uvh_lb_bau_intd_payload_queue_tail_u {
/* UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE */
/* ========================================================================= */
#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE 0x320080UL
-#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_32 0x0a68
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_32 0xa68
#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_0_SHFT 0
#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_0_MASK 0x0000000000000001UL
@@ -585,6 +834,7 @@ union uvh_lb_bau_intd_payload_queue_tail_u {
#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_6_MASK 0x0000000000004000UL
#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_7_SHFT 15
#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_7_MASK 0x0000000000008000UL
+
union uvh_lb_bau_intd_software_acknowledge_u {
unsigned long v;
struct uvh_lb_bau_intd_software_acknowledge_s {
@@ -612,13 +862,13 @@ union uvh_lb_bau_intd_software_acknowledge_u {
/* UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS */
/* ========================================================================= */
#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS 0x0000000000320088UL
-#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS_32 0x0a70
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS_32 0xa70
/* ========================================================================= */
/* UVH_LB_BAU_MISC_CONTROL */
/* ========================================================================= */
#define UVH_LB_BAU_MISC_CONTROL 0x320170UL
-#define UVH_LB_BAU_MISC_CONTROL_32 0x00a10
+#define UVH_LB_BAU_MISC_CONTROL_32 0xa10
#define UVH_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0
#define UVH_LB_BAU_MISC_CONTROL_REJECTION_DELAY_MASK 0x00000000000000ffUL
@@ -628,8 +878,8 @@ union uvh_lb_bau_intd_software_acknowledge_u {
#define UVH_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_MASK 0x0000000000000200UL
#define UVH_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_SHFT 10
#define UVH_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_MASK 0x0000000000000400UL
-#define UVH_LB_BAU_MISC_CONTROL_CSI_AGENT_PRESENCE_VECTOR_SHFT 11
-#define UVH_LB_BAU_MISC_CONTROL_CSI_AGENT_PRESENCE_VECTOR_MASK 0x0000000000003800UL
+#define UVH_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_SHFT 11
+#define UVH_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_MASK 0x0000000000003800UL
#define UVH_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_SHFT 14
#define UVH_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_MASK 0x0000000000004000UL
#define UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT 15
@@ -650,8 +900,86 @@ union uvh_lb_bau_intd_software_acknowledge_u {
#define UVH_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_MASK 0x0000000008000000UL
#define UVH_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_SHFT 28
#define UVH_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000010000000UL
-#define UVH_LB_BAU_MISC_CONTROL_FUN_SHFT 48
-#define UVH_LB_BAU_MISC_CONTROL_FUN_MASK 0xffff000000000000UL
+
+#define UV1H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0
+#define UV1H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_MASK 0x00000000000000ffUL
+#define UV1H_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT 8
+#define UV1H_LB_BAU_MISC_CONTROL_APIC_MODE_MASK 0x0000000000000100UL
+#define UV1H_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_SHFT 9
+#define UV1H_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_MASK 0x0000000000000200UL
+#define UV1H_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_SHFT 10
+#define UV1H_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_MASK 0x0000000000000400UL
+#define UV1H_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_SHFT 11
+#define UV1H_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_MASK 0x0000000000003800UL
+#define UV1H_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_SHFT 14
+#define UV1H_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_MASK 0x0000000000004000UL
+#define UV1H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT 15
+#define UV1H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_MASK 0x0000000000008000UL
+#define UV1H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT 16
+#define UV1H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK 0x00000000000f0000UL
+#define UV1H_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_SHFT 20
+#define UV1H_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_MASK 0x0000000000100000UL
+#define UV1H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_SHFT 21
+#define UV1H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_MASK 0x0000000000200000UL
+#define UV1H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_SHFT 22
+#define UV1H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_MASK 0x0000000000400000UL
+#define UV1H_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_SHFT 23
+#define UV1H_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_MASK 0x0000000000800000UL
+#define UV1H_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_SHFT 24
+#define UV1H_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000007000000UL
+#define UV1H_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_SHFT 27
+#define UV1H_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_MASK 0x0000000008000000UL
+#define UV1H_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_SHFT 28
+#define UV1H_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000010000000UL
+#define UV1H_LB_BAU_MISC_CONTROL_FUN_SHFT 48
+#define UV1H_LB_BAU_MISC_CONTROL_FUN_MASK 0xffff000000000000UL
+
+#define UV2H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0
+#define UV2H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_MASK 0x00000000000000ffUL
+#define UV2H_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT 8
+#define UV2H_LB_BAU_MISC_CONTROL_APIC_MODE_MASK 0x0000000000000100UL
+#define UV2H_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_SHFT 9
+#define UV2H_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_MASK 0x0000000000000200UL
+#define UV2H_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_SHFT 10
+#define UV2H_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_MASK 0x0000000000000400UL
+#define UV2H_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_SHFT 11
+#define UV2H_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_MASK 0x0000000000003800UL
+#define UV2H_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_SHFT 14
+#define UV2H_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_MASK 0x0000000000004000UL
+#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT 15
+#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_MASK 0x0000000000008000UL
+#define UV2H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT 16
+#define UV2H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK 0x00000000000f0000UL
+#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_SHFT 20
+#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_MASK 0x0000000000100000UL
+#define UV2H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_SHFT 21
+#define UV2H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_MASK 0x0000000000200000UL
+#define UV2H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_SHFT 22
+#define UV2H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_MASK 0x0000000000400000UL
+#define UV2H_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_SHFT 23
+#define UV2H_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_MASK 0x0000000000800000UL
+#define UV2H_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_SHFT 24
+#define UV2H_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000007000000UL
+#define UV2H_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_SHFT 27
+#define UV2H_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_MASK 0x0000000008000000UL
+#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_SHFT 28
+#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000010000000UL
+#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_AUTOMATIC_APIC_MODE_SELECTION_SHFT 29
+#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_AUTOMATIC_APIC_MODE_SELECTION_MASK 0x0000000020000000UL
+#define UV2H_LB_BAU_MISC_CONTROL_APIC_MODE_STATUS_SHFT 30
+#define UV2H_LB_BAU_MISC_CONTROL_APIC_MODE_STATUS_MASK 0x0000000040000000UL
+#define UV2H_LB_BAU_MISC_CONTROL_SUPPRESS_INTERRUPTS_TO_SELF_SHFT 31
+#define UV2H_LB_BAU_MISC_CONTROL_SUPPRESS_INTERRUPTS_TO_SELF_MASK 0x0000000080000000UL
+#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_LOCK_BASED_SYSTEM_FLUSH_SHFT 32
+#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_LOCK_BASED_SYSTEM_FLUSH_MASK 0x0000000100000000UL
+#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_SHFT 33
+#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_MASK 0x0000000200000000UL
+#define UV2H_LB_BAU_MISC_CONTROL_SUPPRESS_INT_PRIO_UDT_TO_SELF_SHFT 34
+#define UV2H_LB_BAU_MISC_CONTROL_SUPPRESS_INT_PRIO_UDT_TO_SELF_MASK 0x0000000400000000UL
+#define UV2H_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_SHFT 35
+#define UV2H_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_MASK 0x0000000800000000UL
+#define UV2H_LB_BAU_MISC_CONTROL_FUN_SHFT 48
+#define UV2H_LB_BAU_MISC_CONTROL_FUN_MASK 0xffff000000000000UL
union uvh_lb_bau_misc_control_u {
unsigned long v;
@@ -660,7 +988,25 @@ union uvh_lb_bau_misc_control_u {
unsigned long apic_mode : 1; /* RW */
unsigned long force_broadcast : 1; /* RW */
unsigned long force_lock_nop : 1; /* RW */
- unsigned long csi_agent_presence_vector : 3; /* RW */
+ unsigned long qpi_agent_presence_vector : 3; /* RW */
+ unsigned long descriptor_fetch_mode : 1; /* RW */
+ unsigned long enable_intd_soft_ack_mode : 1; /* RW */
+ unsigned long intd_soft_ack_timeout_period : 4; /* RW */
+ unsigned long enable_dual_mapping_mode : 1; /* RW */
+ unsigned long vga_io_port_decode_enable : 1; /* RW */
+ unsigned long vga_io_port_16_bit_decode : 1; /* RW */
+ unsigned long suppress_dest_registration : 1; /* RW */
+ unsigned long programmed_initial_priority : 3; /* RW */
+ unsigned long use_incoming_priority : 1; /* RW */
+ unsigned long enable_programmed_initial_priority : 1; /* RW */
+ unsigned long rsvd_29_63 : 35;
+ } s;
+ struct uv1h_lb_bau_misc_control_s {
+ unsigned long rejection_delay : 8; /* RW */
+ unsigned long apic_mode : 1; /* RW */
+ unsigned long force_broadcast : 1; /* RW */
+ unsigned long force_lock_nop : 1; /* RW */
+ unsigned long qpi_agent_presence_vector : 3; /* RW */
unsigned long descriptor_fetch_mode : 1; /* RW */
unsigned long enable_intd_soft_ack_mode : 1; /* RW */
unsigned long intd_soft_ack_timeout_period : 4; /* RW */
@@ -673,14 +1019,40 @@ union uvh_lb_bau_misc_control_u {
unsigned long enable_programmed_initial_priority : 1; /* RW */
unsigned long rsvd_29_47 : 19; /* */
unsigned long fun : 16; /* RW */
- } s;
+ } s1;
+ struct uv2h_lb_bau_misc_control_s {
+ unsigned long rejection_delay : 8; /* RW */
+ unsigned long apic_mode : 1; /* RW */
+ unsigned long force_broadcast : 1; /* RW */
+ unsigned long force_lock_nop : 1; /* RW */
+ unsigned long qpi_agent_presence_vector : 3; /* RW */
+ unsigned long descriptor_fetch_mode : 1; /* RW */
+ unsigned long enable_intd_soft_ack_mode : 1; /* RW */
+ unsigned long intd_soft_ack_timeout_period : 4; /* RW */
+ unsigned long enable_dual_mapping_mode : 1; /* RW */
+ unsigned long vga_io_port_decode_enable : 1; /* RW */
+ unsigned long vga_io_port_16_bit_decode : 1; /* RW */
+ unsigned long suppress_dest_registration : 1; /* RW */
+ unsigned long programmed_initial_priority : 3; /* RW */
+ unsigned long use_incoming_priority : 1; /* RW */
+ unsigned long enable_programmed_initial_priority : 1; /* RW */
+ unsigned long enable_automatic_apic_mode_selection : 1; /* RW */
+ unsigned long apic_mode_status : 1; /* RO */
+ unsigned long suppress_interrupts_to_self : 1; /* RW */
+ unsigned long enable_lock_based_system_flush : 1; /* RW */
+ unsigned long enable_extended_sb_status : 1; /* RW */
+ unsigned long suppress_int_prio_udt_to_self : 1; /* RW */
+ unsigned long use_legacy_descriptor_formats : 1; /* RW */
+ unsigned long rsvd_36_47 : 12; /* */
+ unsigned long fun : 16; /* RW */
+ } s2;
};
/* ========================================================================= */
/* UVH_LB_BAU_SB_ACTIVATION_CONTROL */
/* ========================================================================= */
#define UVH_LB_BAU_SB_ACTIVATION_CONTROL 0x320020UL
-#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_32 0x009a8
+#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_32 0x9a8
#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_INDEX_SHFT 0
#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_INDEX_MASK 0x000000000000003fUL
@@ -703,7 +1075,7 @@ union uvh_lb_bau_sb_activation_control_u {
/* UVH_LB_BAU_SB_ACTIVATION_STATUS_0 */
/* ========================================================================= */
#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0 0x320030UL
-#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_32 0x009b0
+#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_32 0x9b0
#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_STATUS_SHFT 0
#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_STATUS_MASK 0xffffffffffffffffUL
@@ -719,7 +1091,7 @@ union uvh_lb_bau_sb_activation_status_0_u {
/* UVH_LB_BAU_SB_ACTIVATION_STATUS_1 */
/* ========================================================================= */
#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1 0x320040UL
-#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_32 0x009b8
+#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_32 0x9b8
#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_STATUS_SHFT 0
#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_STATUS_MASK 0xffffffffffffffffUL
@@ -735,7 +1107,7 @@ union uvh_lb_bau_sb_activation_status_1_u {
/* UVH_LB_BAU_SB_DESCRIPTOR_BASE */
/* ========================================================================= */
#define UVH_LB_BAU_SB_DESCRIPTOR_BASE 0x320010UL
-#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_32 0x009a0
+#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_32 0x9a0
#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_PAGE_ADDRESS_SHFT 12
#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_PAGE_ADDRESS_MASK 0x000007fffffff000UL
@@ -754,23 +1126,6 @@ union uvh_lb_bau_sb_descriptor_base_u {
};
/* ========================================================================= */
-/* UVH_LB_TARGET_PHYSICAL_APIC_ID_MASK */
-/* ========================================================================= */
-#define UVH_LB_TARGET_PHYSICAL_APIC_ID_MASK 0x320130UL
-#define UVH_LB_TARGET_PHYSICAL_APIC_ID_MASK_32 0x009f0
-
-#define UVH_LB_TARGET_PHYSICAL_APIC_ID_MASK_BIT_ENABLES_SHFT 0
-#define UVH_LB_TARGET_PHYSICAL_APIC_ID_MASK_BIT_ENABLES_MASK 0x00000000ffffffffUL
-
-union uvh_lb_target_physical_apic_id_mask_u {
- unsigned long v;
- struct uvh_lb_target_physical_apic_id_mask_s {
- unsigned long bit_enables : 32; /* RW */
- unsigned long rsvd_32_63 : 32; /* */
- } s;
-};
-
-/* ========================================================================= */
/* UVH_NODE_ID */
/* ========================================================================= */
#define UVH_NODE_ID 0x0UL
@@ -785,10 +1140,36 @@ union uvh_lb_target_physical_apic_id_mask_u {
#define UVH_NODE_ID_REVISION_MASK 0x00000000f0000000UL
#define UVH_NODE_ID_NODE_ID_SHFT 32
#define UVH_NODE_ID_NODE_ID_MASK 0x00007fff00000000UL
-#define UVH_NODE_ID_NODES_PER_BIT_SHFT 48
-#define UVH_NODE_ID_NODES_PER_BIT_MASK 0x007f000000000000UL
-#define UVH_NODE_ID_NI_PORT_SHFT 56
-#define UVH_NODE_ID_NI_PORT_MASK 0x0f00000000000000UL
+
+#define UV1H_NODE_ID_FORCE1_SHFT 0
+#define UV1H_NODE_ID_FORCE1_MASK 0x0000000000000001UL
+#define UV1H_NODE_ID_MANUFACTURER_SHFT 1
+#define UV1H_NODE_ID_MANUFACTURER_MASK 0x0000000000000ffeUL
+#define UV1H_NODE_ID_PART_NUMBER_SHFT 12
+#define UV1H_NODE_ID_PART_NUMBER_MASK 0x000000000ffff000UL
+#define UV1H_NODE_ID_REVISION_SHFT 28
+#define UV1H_NODE_ID_REVISION_MASK 0x00000000f0000000UL
+#define UV1H_NODE_ID_NODE_ID_SHFT 32
+#define UV1H_NODE_ID_NODE_ID_MASK 0x00007fff00000000UL
+#define UV1H_NODE_ID_NODES_PER_BIT_SHFT 48
+#define UV1H_NODE_ID_NODES_PER_BIT_MASK 0x007f000000000000UL
+#define UV1H_NODE_ID_NI_PORT_SHFT 56
+#define UV1H_NODE_ID_NI_PORT_MASK 0x0f00000000000000UL
+
+#define UV2H_NODE_ID_FORCE1_SHFT 0
+#define UV2H_NODE_ID_FORCE1_MASK 0x0000000000000001UL
+#define UV2H_NODE_ID_MANUFACTURER_SHFT 1
+#define UV2H_NODE_ID_MANUFACTURER_MASK 0x0000000000000ffeUL
+#define UV2H_NODE_ID_PART_NUMBER_SHFT 12
+#define UV2H_NODE_ID_PART_NUMBER_MASK 0x000000000ffff000UL
+#define UV2H_NODE_ID_REVISION_SHFT 28
+#define UV2H_NODE_ID_REVISION_MASK 0x00000000f0000000UL
+#define UV2H_NODE_ID_NODE_ID_SHFT 32
+#define UV2H_NODE_ID_NODE_ID_MASK 0x00007fff00000000UL
+#define UV2H_NODE_ID_NODES_PER_BIT_SHFT 50
+#define UV2H_NODE_ID_NODES_PER_BIT_MASK 0x01fc000000000000UL
+#define UV2H_NODE_ID_NI_PORT_SHFT 57
+#define UV2H_NODE_ID_NI_PORT_MASK 0x3e00000000000000UL
union uvh_node_id_u {
unsigned long v;
@@ -798,12 +1179,31 @@ union uvh_node_id_u {
unsigned long part_number : 16; /* RO */
unsigned long revision : 4; /* RO */
unsigned long node_id : 15; /* RW */
+ unsigned long rsvd_47_63 : 17;
+ } s;
+ struct uv1h_node_id_s {
+ unsigned long force1 : 1; /* RO */
+ unsigned long manufacturer : 11; /* RO */
+ unsigned long part_number : 16; /* RO */
+ unsigned long revision : 4; /* RO */
+ unsigned long node_id : 15; /* RW */
unsigned long rsvd_47 : 1; /* */
unsigned long nodes_per_bit : 7; /* RW */
unsigned long rsvd_55 : 1; /* */
unsigned long ni_port : 4; /* RO */
unsigned long rsvd_60_63 : 4; /* */
- } s;
+ } s1;
+ struct uv2h_node_id_s {
+ unsigned long force1 : 1; /* RO */
+ unsigned long manufacturer : 11; /* RO */
+ unsigned long part_number : 16; /* RO */
+ unsigned long revision : 4; /* RO */
+ unsigned long node_id : 15; /* RW */
+ unsigned long rsvd_47_49 : 3; /* */
+ unsigned long nodes_per_bit : 7; /* RO */
+ unsigned long ni_port : 5; /* RO */
+ unsigned long rsvd_62_63 : 2; /* */
+ } s2;
};
/* ========================================================================= */
@@ -954,18 +1354,38 @@ union uvh_rh_gam_alias210_redirect_config_2_mmr_u {
#define UVH_RH_GAM_CONFIG_MMR_M_SKT_MASK 0x000000000000003fUL
#define UVH_RH_GAM_CONFIG_MMR_N_SKT_SHFT 6
#define UVH_RH_GAM_CONFIG_MMR_N_SKT_MASK 0x00000000000003c0UL
-#define UVH_RH_GAM_CONFIG_MMR_MMIOL_CFG_SHFT 12
-#define UVH_RH_GAM_CONFIG_MMR_MMIOL_CFG_MASK 0x0000000000001000UL
+
+#define UV1H_RH_GAM_CONFIG_MMR_M_SKT_SHFT 0
+#define UV1H_RH_GAM_CONFIG_MMR_M_SKT_MASK 0x000000000000003fUL
+#define UV1H_RH_GAM_CONFIG_MMR_N_SKT_SHFT 6
+#define UV1H_RH_GAM_CONFIG_MMR_N_SKT_MASK 0x00000000000003c0UL
+#define UV1H_RH_GAM_CONFIG_MMR_MMIOL_CFG_SHFT 12
+#define UV1H_RH_GAM_CONFIG_MMR_MMIOL_CFG_MASK 0x0000000000001000UL
+
+#define UV2H_RH_GAM_CONFIG_MMR_M_SKT_SHFT 0
+#define UV2H_RH_GAM_CONFIG_MMR_M_SKT_MASK 0x000000000000003fUL
+#define UV2H_RH_GAM_CONFIG_MMR_N_SKT_SHFT 6
+#define UV2H_RH_GAM_CONFIG_MMR_N_SKT_MASK 0x00000000000003c0UL
union uvh_rh_gam_config_mmr_u {
unsigned long v;
struct uvh_rh_gam_config_mmr_s {
unsigned long m_skt : 6; /* RW */
unsigned long n_skt : 4; /* RW */
+ unsigned long rsvd_10_63 : 54;
+ } s;
+ struct uv1h_rh_gam_config_mmr_s {
+ unsigned long m_skt : 6; /* RW */
+ unsigned long n_skt : 4; /* RW */
unsigned long rsvd_10_11: 2; /* */
unsigned long mmiol_cfg : 1; /* RW */
unsigned long rsvd_13_63: 51; /* */
- } s;
+ } s1;
+ struct uv2h_rh_gam_config_mmr_s {
+ unsigned long m_skt : 6; /* RW */
+ unsigned long n_skt : 4; /* RW */
+ unsigned long rsvd_10_63: 54; /* */
+ } s2;
};
/* ========================================================================= */
@@ -975,25 +1395,49 @@ union uvh_rh_gam_config_mmr_u {
#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28
#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffff0000000UL
-#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_GR4_SHFT 48
-#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_GR4_MASK 0x0001000000000000UL
-#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT 52
-#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL
-#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
-#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
+
+#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28
+#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffff0000000UL
+#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_GR4_SHFT 48
+#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_GR4_MASK 0x0001000000000000UL
+#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT 52
+#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL
+#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
+#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
+
+#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28
+#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffff0000000UL
+#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT 52
+#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL
+#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
+#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
union uvh_rh_gam_gru_overlay_config_mmr_u {
unsigned long v;
struct uvh_rh_gam_gru_overlay_config_mmr_s {
unsigned long rsvd_0_27: 28; /* */
unsigned long base : 18; /* RW */
+ unsigned long rsvd_46_62 : 17;
+ unsigned long enable : 1; /* RW */
+ } s;
+ struct uv1h_rh_gam_gru_overlay_config_mmr_s {
+ unsigned long rsvd_0_27: 28; /* */
+ unsigned long base : 18; /* RW */
unsigned long rsvd_46_47: 2; /* */
unsigned long gr4 : 1; /* RW */
unsigned long rsvd_49_51: 3; /* */
unsigned long n_gru : 4; /* RW */
unsigned long rsvd_56_62: 7; /* */
unsigned long enable : 1; /* RW */
- } s;
+ } s1;
+ struct uv2h_rh_gam_gru_overlay_config_mmr_s {
+ unsigned long rsvd_0_27: 28; /* */
+ unsigned long base : 18; /* RW */
+ unsigned long rsvd_46_51: 6; /* */
+ unsigned long n_gru : 4; /* RW */
+ unsigned long rsvd_56_62: 7; /* */
+ unsigned long enable : 1; /* RW */
+ } s2;
};
/* ========================================================================= */
@@ -1001,25 +1445,42 @@ union uvh_rh_gam_gru_overlay_config_mmr_u {
/* ========================================================================= */
#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR 0x1600030UL
-#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT 30
-#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003fffc0000000UL
-#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_M_IO_SHFT 46
-#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_M_IO_MASK 0x000fc00000000000UL
-#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_N_IO_SHFT 52
-#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_N_IO_MASK 0x00f0000000000000UL
-#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
-#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
+#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT 30
+#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003fffc0000000UL
+#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_M_IO_SHFT 46
+#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_M_IO_MASK 0x000fc00000000000UL
+#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_N_IO_SHFT 52
+#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_N_IO_MASK 0x00f0000000000000UL
+#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
+#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
+
+#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT 27
+#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffff8000000UL
+#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_M_IO_SHFT 46
+#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_M_IO_MASK 0x000fc00000000000UL
+#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_N_IO_SHFT 52
+#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_N_IO_MASK 0x00f0000000000000UL
+#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
+#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
union uvh_rh_gam_mmioh_overlay_config_mmr_u {
unsigned long v;
- struct uvh_rh_gam_mmioh_overlay_config_mmr_s {
+ struct uv1h_rh_gam_mmioh_overlay_config_mmr_s {
unsigned long rsvd_0_29: 30; /* */
unsigned long base : 16; /* RW */
unsigned long m_io : 6; /* RW */
unsigned long n_io : 4; /* RW */
unsigned long rsvd_56_62: 7; /* */
unsigned long enable : 1; /* RW */
- } s;
+ } s1;
+ struct uv2h_rh_gam_mmioh_overlay_config_mmr_s {
+ unsigned long rsvd_0_26: 27; /* */
+ unsigned long base : 19; /* RW */
+ unsigned long m_io : 6; /* RW */
+ unsigned long n_io : 4; /* RW */
+ unsigned long rsvd_56_62: 7; /* */
+ unsigned long enable : 1; /* RW */
+ } s2;
};
/* ========================================================================= */
@@ -1029,20 +1490,40 @@ union uvh_rh_gam_mmioh_overlay_config_mmr_u {
#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26
#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL
-#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_DUAL_HUB_SHFT 46
-#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_DUAL_HUB_MASK 0x0000400000000000UL
-#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
-#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
+
+#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26
+#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL
+#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_DUAL_HUB_SHFT 46
+#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_DUAL_HUB_MASK 0x0000400000000000UL
+#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
+#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
+
+#define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26
+#define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL
+#define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
+#define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
union uvh_rh_gam_mmr_overlay_config_mmr_u {
unsigned long v;
struct uvh_rh_gam_mmr_overlay_config_mmr_s {
unsigned long rsvd_0_25: 26; /* */
unsigned long base : 20; /* RW */
+ unsigned long rsvd_46_62 : 17;
+ unsigned long enable : 1; /* RW */
+ } s;
+ struct uv1h_rh_gam_mmr_overlay_config_mmr_s {
+ unsigned long rsvd_0_25: 26; /* */
+ unsigned long base : 20; /* RW */
unsigned long dual_hub : 1; /* RW */
unsigned long rsvd_47_62: 16; /* */
unsigned long enable : 1; /* RW */
- } s;
+ } s1;
+ struct uv2h_rh_gam_mmr_overlay_config_mmr_s {
+ unsigned long rsvd_0_25: 26; /* */
+ unsigned long base : 20; /* RW */
+ unsigned long rsvd_46_62: 17; /* */
+ unsigned long enable : 1; /* RW */
+ } s2;
};
/* ========================================================================= */
@@ -1103,10 +1584,11 @@ union uvh_rtc1_int_config_u {
/* UVH_SCRATCH5 */
/* ========================================================================= */
#define UVH_SCRATCH5 0x2d0200UL
-#define UVH_SCRATCH5_32 0x00778
+#define UVH_SCRATCH5_32 0x778
#define UVH_SCRATCH5_SCRATCH5_SHFT 0
#define UVH_SCRATCH5_SCRATCH5_MASK 0xffffffffffffffffUL
+
union uvh_scratch5_u {
unsigned long v;
struct uvh_scratch5_s {
@@ -1114,4 +1596,154 @@ union uvh_scratch5_u {
} s;
};
+/* ========================================================================= */
+/* UV2H_EVENT_OCCURRED2 */
+/* ========================================================================= */
+#define UV2H_EVENT_OCCURRED2 0x70100UL
+#define UV2H_EVENT_OCCURRED2_32 0xb68
+
+#define UV2H_EVENT_OCCURRED2_RTC_0_SHFT 0
+#define UV2H_EVENT_OCCURRED2_RTC_0_MASK 0x0000000000000001UL
+#define UV2H_EVENT_OCCURRED2_RTC_1_SHFT 1
+#define UV2H_EVENT_OCCURRED2_RTC_1_MASK 0x0000000000000002UL
+#define UV2H_EVENT_OCCURRED2_RTC_2_SHFT 2
+#define UV2H_EVENT_OCCURRED2_RTC_2_MASK 0x0000000000000004UL
+#define UV2H_EVENT_OCCURRED2_RTC_3_SHFT 3
+#define UV2H_EVENT_OCCURRED2_RTC_3_MASK 0x0000000000000008UL
+#define UV2H_EVENT_OCCURRED2_RTC_4_SHFT 4
+#define UV2H_EVENT_OCCURRED2_RTC_4_MASK 0x0000000000000010UL
+#define UV2H_EVENT_OCCURRED2_RTC_5_SHFT 5
+#define UV2H_EVENT_OCCURRED2_RTC_5_MASK 0x0000000000000020UL
+#define UV2H_EVENT_OCCURRED2_RTC_6_SHFT 6
+#define UV2H_EVENT_OCCURRED2_RTC_6_MASK 0x0000000000000040UL
+#define UV2H_EVENT_OCCURRED2_RTC_7_SHFT 7
+#define UV2H_EVENT_OCCURRED2_RTC_7_MASK 0x0000000000000080UL
+#define UV2H_EVENT_OCCURRED2_RTC_8_SHFT 8
+#define UV2H_EVENT_OCCURRED2_RTC_8_MASK 0x0000000000000100UL
+#define UV2H_EVENT_OCCURRED2_RTC_9_SHFT 9
+#define UV2H_EVENT_OCCURRED2_RTC_9_MASK 0x0000000000000200UL
+#define UV2H_EVENT_OCCURRED2_RTC_10_SHFT 10
+#define UV2H_EVENT_OCCURRED2_RTC_10_MASK 0x0000000000000400UL
+#define UV2H_EVENT_OCCURRED2_RTC_11_SHFT 11
+#define UV2H_EVENT_OCCURRED2_RTC_11_MASK 0x0000000000000800UL
+#define UV2H_EVENT_OCCURRED2_RTC_12_SHFT 12
+#define UV2H_EVENT_OCCURRED2_RTC_12_MASK 0x0000000000001000UL
+#define UV2H_EVENT_OCCURRED2_RTC_13_SHFT 13
+#define UV2H_EVENT_OCCURRED2_RTC_13_MASK 0x0000000000002000UL
+#define UV2H_EVENT_OCCURRED2_RTC_14_SHFT 14
+#define UV2H_EVENT_OCCURRED2_RTC_14_MASK 0x0000000000004000UL
+#define UV2H_EVENT_OCCURRED2_RTC_15_SHFT 15
+#define UV2H_EVENT_OCCURRED2_RTC_15_MASK 0x0000000000008000UL
+#define UV2H_EVENT_OCCURRED2_RTC_16_SHFT 16
+#define UV2H_EVENT_OCCURRED2_RTC_16_MASK 0x0000000000010000UL
+#define UV2H_EVENT_OCCURRED2_RTC_17_SHFT 17
+#define UV2H_EVENT_OCCURRED2_RTC_17_MASK 0x0000000000020000UL
+#define UV2H_EVENT_OCCURRED2_RTC_18_SHFT 18
+#define UV2H_EVENT_OCCURRED2_RTC_18_MASK 0x0000000000040000UL
+#define UV2H_EVENT_OCCURRED2_RTC_19_SHFT 19
+#define UV2H_EVENT_OCCURRED2_RTC_19_MASK 0x0000000000080000UL
+#define UV2H_EVENT_OCCURRED2_RTC_20_SHFT 20
+#define UV2H_EVENT_OCCURRED2_RTC_20_MASK 0x0000000000100000UL
+#define UV2H_EVENT_OCCURRED2_RTC_21_SHFT 21
+#define UV2H_EVENT_OCCURRED2_RTC_21_MASK 0x0000000000200000UL
+#define UV2H_EVENT_OCCURRED2_RTC_22_SHFT 22
+#define UV2H_EVENT_OCCURRED2_RTC_22_MASK 0x0000000000400000UL
+#define UV2H_EVENT_OCCURRED2_RTC_23_SHFT 23
+#define UV2H_EVENT_OCCURRED2_RTC_23_MASK 0x0000000000800000UL
+#define UV2H_EVENT_OCCURRED2_RTC_24_SHFT 24
+#define UV2H_EVENT_OCCURRED2_RTC_24_MASK 0x0000000001000000UL
+#define UV2H_EVENT_OCCURRED2_RTC_25_SHFT 25
+#define UV2H_EVENT_OCCURRED2_RTC_25_MASK 0x0000000002000000UL
+#define UV2H_EVENT_OCCURRED2_RTC_26_SHFT 26
+#define UV2H_EVENT_OCCURRED2_RTC_26_MASK 0x0000000004000000UL
+#define UV2H_EVENT_OCCURRED2_RTC_27_SHFT 27
+#define UV2H_EVENT_OCCURRED2_RTC_27_MASK 0x0000000008000000UL
+#define UV2H_EVENT_OCCURRED2_RTC_28_SHFT 28
+#define UV2H_EVENT_OCCURRED2_RTC_28_MASK 0x0000000010000000UL
+#define UV2H_EVENT_OCCURRED2_RTC_29_SHFT 29
+#define UV2H_EVENT_OCCURRED2_RTC_29_MASK 0x0000000020000000UL
+#define UV2H_EVENT_OCCURRED2_RTC_30_SHFT 30
+#define UV2H_EVENT_OCCURRED2_RTC_30_MASK 0x0000000040000000UL
+#define UV2H_EVENT_OCCURRED2_RTC_31_SHFT 31
+#define UV2H_EVENT_OCCURRED2_RTC_31_MASK 0x0000000080000000UL
+
+union uv2h_event_occurred2_u {
+ unsigned long v;
+ struct uv2h_event_occurred2_s {
+ unsigned long rtc_0 : 1; /* RW */
+ unsigned long rtc_1 : 1; /* RW */
+ unsigned long rtc_2 : 1; /* RW */
+ unsigned long rtc_3 : 1; /* RW */
+ unsigned long rtc_4 : 1; /* RW */
+ unsigned long rtc_5 : 1; /* RW */
+ unsigned long rtc_6 : 1; /* RW */
+ unsigned long rtc_7 : 1; /* RW */
+ unsigned long rtc_8 : 1; /* RW */
+ unsigned long rtc_9 : 1; /* RW */
+ unsigned long rtc_10 : 1; /* RW */
+ unsigned long rtc_11 : 1; /* RW */
+ unsigned long rtc_12 : 1; /* RW */
+ unsigned long rtc_13 : 1; /* RW */
+ unsigned long rtc_14 : 1; /* RW */
+ unsigned long rtc_15 : 1; /* RW */
+ unsigned long rtc_16 : 1; /* RW */
+ unsigned long rtc_17 : 1; /* RW */
+ unsigned long rtc_18 : 1; /* RW */
+ unsigned long rtc_19 : 1; /* RW */
+ unsigned long rtc_20 : 1; /* RW */
+ unsigned long rtc_21 : 1; /* RW */
+ unsigned long rtc_22 : 1; /* RW */
+ unsigned long rtc_23 : 1; /* RW */
+ unsigned long rtc_24 : 1; /* RW */
+ unsigned long rtc_25 : 1; /* RW */
+ unsigned long rtc_26 : 1; /* RW */
+ unsigned long rtc_27 : 1; /* RW */
+ unsigned long rtc_28 : 1; /* RW */
+ unsigned long rtc_29 : 1; /* RW */
+ unsigned long rtc_30 : 1; /* RW */
+ unsigned long rtc_31 : 1; /* RW */
+ unsigned long rsvd_32_63: 32; /* */
+ } s1;
+};
+
+/* ========================================================================= */
+/* UV2H_EVENT_OCCURRED2_ALIAS */
+/* ========================================================================= */
+#define UV2H_EVENT_OCCURRED2_ALIAS 0x70108UL
+#define UV2H_EVENT_OCCURRED2_ALIAS_32 0xb70
+
+/* ========================================================================= */
+/* UV2H_LB_BAU_SB_ACTIVATION_STATUS_2 */
+/* ========================================================================= */
+#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2 0x320130UL
+#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2_32 0x9f0
+
+#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_SHFT 0
+#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_MASK 0xffffffffffffffffUL
+
+union uv2h_lb_bau_sb_activation_status_2_u {
+ unsigned long v;
+ struct uv2h_lb_bau_sb_activation_status_2_s {
+ unsigned long aux_error : 64; /* RW */
+ } s1;
+};
+
+/* ========================================================================= */
+/* UV1H_LB_TARGET_PHYSICAL_APIC_ID_MASK */
+/* ========================================================================= */
+#define UV1H_LB_TARGET_PHYSICAL_APIC_ID_MASK 0x320130UL
+#define UV1H_LB_TARGET_PHYSICAL_APIC_ID_MASK_32 0x9f0
+
+#define UV1H_LB_TARGET_PHYSICAL_APIC_ID_MASK_BIT_ENABLES_SHFT 0
+#define UV1H_LB_TARGET_PHYSICAL_APIC_ID_MASK_BIT_ENABLES_MASK 0x00000000ffffffffUL
+
+union uv1h_lb_target_physical_apic_id_mask_u {
+ unsigned long v;
+ struct uv1h_lb_target_physical_apic_id_mask_s {
+ unsigned long bit_enables : 32; /* RW */
+ unsigned long rsvd_32_63 : 32; /* */
+ } s1;
+};
+
+
#endif /* __ASM_UV_MMRS_X86_H__ */
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index f450b683dfcf..b511a011b7d0 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -91,6 +91,10 @@ static int __init early_get_pnodeid(void)
m_n_config.v = uv_early_read_mmr(UVH_RH_GAM_CONFIG_MMR);
uv_min_hub_revision_id = node_id.s.revision;
+ if (node_id.s.part_number == UV2_HUB_PART_NUMBER)
+ uv_min_hub_revision_id += UV2_HUB_REVISION_BASE - 1;
+
+ uv_hub_info->hub_revision = uv_min_hub_revision_id;
pnode = (node_id.s.node_id >> 1) & ((1 << m_n_config.s.n_skt) - 1);
return pnode;
}
@@ -112,17 +116,25 @@ static void __init early_get_apic_pnode_shift(void)
*/
static void __init uv_set_apicid_hibit(void)
{
- union uvh_lb_target_physical_apic_id_mask_u apicid_mask;
+ union uv1h_lb_target_physical_apic_id_mask_u apicid_mask;
- apicid_mask.v = uv_early_read_mmr(UVH_LB_TARGET_PHYSICAL_APIC_ID_MASK);
- uv_apicid_hibits = apicid_mask.s.bit_enables & UV_APICID_HIBIT_MASK;
+ if (is_uv1_hub()) {
+ apicid_mask.v =
+ uv_early_read_mmr(UV1H_LB_TARGET_PHYSICAL_APIC_ID_MASK);
+ uv_apicid_hibits =
+ apicid_mask.s1.bit_enables & UV_APICID_HIBIT_MASK;
+ }
}
static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
{
- int pnodeid;
+ int pnodeid, is_uv1, is_uv2;
- if (!strcmp(oem_id, "SGI")) {
+ is_uv1 = !strcmp(oem_id, "SGI");
+ is_uv2 = !strcmp(oem_id, "SGI2");
+ if (is_uv1 || is_uv2) {
+ uv_hub_info->hub_revision =
+ is_uv1 ? UV1_HUB_REVISION_BASE : UV2_HUB_REVISION_BASE;
pnodeid = early_get_pnodeid();
early_get_apic_pnode_shift();
x86_platform.is_untracked_pat_range = uv_is_untracked_pat_range;
@@ -484,12 +496,19 @@ static __init void map_mmr_high(int max_pnode)
static __init void map_mmioh_high(int max_pnode)
{
union uvh_rh_gam_mmioh_overlay_config_mmr_u mmioh;
- int shift = UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT;
+ int shift;
mmioh.v = uv_read_local_mmr(UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR);
- if (mmioh.s.enable)
- map_high("MMIOH", mmioh.s.base, shift, mmioh.s.m_io,
+ if (is_uv1_hub() && mmioh.s1.enable) {
+ shift = UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT;
+ map_high("MMIOH", mmioh.s1.base, shift, mmioh.s1.m_io,
+ max_pnode, map_uc);
+ }
+ if (is_uv2_hub() && mmioh.s2.enable) {
+ shift = UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT;
+ map_high("MMIOH", mmioh.s2.base, shift, mmioh.s2.m_io,
max_pnode, map_uc);
+ }
}
static __init void map_low_mmrs(void)
@@ -736,13 +755,14 @@ void __init uv_system_init(void)
unsigned long mmr_base, present, paddr;
unsigned short pnode_mask, pnode_io_mask;
+ printk(KERN_INFO "UV: Found %s hub\n", is_uv1_hub() ? "UV1" : "UV2");
map_low_mmrs();
m_n_config.v = uv_read_local_mmr(UVH_RH_GAM_CONFIG_MMR );
m_val = m_n_config.s.m_skt;
n_val = m_n_config.s.n_skt;
mmioh.v = uv_read_local_mmr(UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR);
- n_io = mmioh.s.n_io;
+ n_io = is_uv1_hub() ? mmioh.s1.n_io : mmioh.s2.n_io;
mmr_base =
uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR) &
~UV_MMR_ENABLE;
@@ -811,6 +831,8 @@ void __init uv_system_init(void)
*/
uv_cpu_hub_info(cpu)->pnode_mask = pnode_mask;
uv_cpu_hub_info(cpu)->apic_pnode_shift = uvh_apicid.s.pnode_shift;
+ uv_cpu_hub_info(cpu)->hub_revision = uv_hub_info->hub_revision;
+
pnode = uv_apicid_to_pnode(apicid);
blade = boot_pnode_to_blade(pnode);
lcpu = uv_blade_info[blade].nr_possible_cpus;
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 8f5cabb3c5b0..b13ed393dfce 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -612,8 +612,11 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
}
#endif
- /* As a rule processors have APIC timer running in deep C states */
- if (c->x86 > 0xf && !cpu_has_amd_erratum(amd_erratum_400))
+ /*
+ * Family 0x12 and above processors have APIC timer
+ * running in deep C states.
+ */
+ if (c->x86 > 0x11)
set_cpu_cap(c, X86_FEATURE_ARAT);
/*
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index c8b41623377f..53f02f5d8cce 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -477,13 +477,6 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
if (smp_num_siblings <= 1)
goto out;
- if (smp_num_siblings > nr_cpu_ids) {
- pr_warning("CPU: Unsupported number of siblings %d",
- smp_num_siblings);
- smp_num_siblings = 1;
- return;
- }
-
index_msb = get_count_order(smp_num_siblings);
c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb);
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 0ba15a6cc57e..c9a281f272fd 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -123,7 +123,7 @@ static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
static atomic_t nmi_running = ATOMIC_INIT(0);
static int mod_code_status; /* holds return value of text write */
static void *mod_code_ip; /* holds the IP to write to */
-static void *mod_code_newcode; /* holds the text to write to the IP */
+static const void *mod_code_newcode; /* holds the text to write to the IP */
static unsigned nmi_wait_count;
static atomic_t nmi_update_count = ATOMIC_INIT(0);
@@ -225,7 +225,7 @@ within(unsigned long addr, unsigned long start, unsigned long end)
}
static int
-do_ftrace_mod_code(unsigned long ip, void *new_code)
+do_ftrace_mod_code(unsigned long ip, const void *new_code)
{
/*
* On x86_64, kernel text mappings are mapped read-only with
@@ -266,8 +266,8 @@ static const unsigned char *ftrace_nop_replace(void)
}
static int
-ftrace_modify_code(unsigned long ip, unsigned char *old_code,
- unsigned char *new_code)
+ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
+ unsigned const char *new_code)
{
unsigned char replaced[MCOUNT_INSN_SIZE];
@@ -301,7 +301,7 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code,
int ftrace_make_nop(struct module *mod,
struct dyn_ftrace *rec, unsigned long addr)
{
- unsigned char *new, *old;
+ unsigned const char *new, *old;
unsigned long ip = rec->ip;
old = ftrace_call_replace(ip, addr);
@@ -312,7 +312,7 @@ int ftrace_make_nop(struct module *mod,
int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
- unsigned char *new, *old;
+ unsigned const char *new, *old;
unsigned long ip = rec->ip;
old = ftrace_nop_replace();
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index a3e5948670c2..afaf38447ef5 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -910,6 +910,13 @@ void __init setup_arch(char **cmdline_p)
memblock.current_limit = get_max_mapped();
memblock_x86_fill();
+ /*
+ * The EFI specification says that boot service code won't be called
+ * after ExitBootServices(). This is, in fact, a lie.
+ */
+ if (efi_enabled)
+ efi_reserve_boot_services();
+
/* preallocate 4k for mptable mpc */
early_reserve_e820_mpc_new();
diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S
index 32cbffb0c494..fbb0a045a1a2 100644
--- a/arch/x86/kernel/syscall_table_32.S
+++ b/arch/x86/kernel/syscall_table_32.S
@@ -345,3 +345,4 @@ ENTRY(sys_call_table)
.long sys_clock_adjtime
.long sys_syncfs
.long sys_sendmmsg /* 345 */
+ .long sys_setns
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index f7a2a054a3c0..2dbf6bf4c7e5 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -823,16 +823,30 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
force_sig_info_fault(SIGBUS, code, address, tsk, fault);
}
-static noinline void
+static noinline int
mm_fault_error(struct pt_regs *regs, unsigned long error_code,
unsigned long address, unsigned int fault)
{
+ /*
+ * Pagefault was interrupted by SIGKILL. We have no reason to
+ * continue pagefault.
+ */
+ if (fatal_signal_pending(current)) {
+ if (!(fault & VM_FAULT_RETRY))
+ up_read(&current->mm->mmap_sem);
+ if (!(error_code & PF_USER))
+ no_context(regs, error_code, address);
+ return 1;
+ }
+ if (!(fault & VM_FAULT_ERROR))
+ return 0;
+
if (fault & VM_FAULT_OOM) {
/* Kernel mode? Handle exceptions or die: */
if (!(error_code & PF_USER)) {
up_read(&current->mm->mmap_sem);
no_context(regs, error_code, address);
- return;
+ return 1;
}
out_of_memory(regs, error_code, address);
@@ -843,6 +857,7 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
else
BUG();
}
+ return 1;
}
static int spurious_fault_check(unsigned long error_code, pte_t *pte)
@@ -1133,19 +1148,9 @@ good_area:
*/
fault = handle_mm_fault(mm, vma, address, flags);
- if (unlikely(fault & VM_FAULT_ERROR)) {
- mm_fault_error(regs, error_code, address, fault);
- return;
- }
-
- /*
- * Pagefault was interrupted by SIGKILL. We have no reason to
- * continue pagefault.
- */
- if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
- if (!(error_code & PF_USER))
- no_context(regs, error_code, address);
- return;
+ if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) {
+ if (mm_fault_error(regs, error_code, address, fault))
+ return;
}
/*
diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
index c3b8e24f2b16..9fd8a567fe1e 100644
--- a/arch/x86/oprofile/op_model_amd.c
+++ b/arch/x86/oprofile/op_model_amd.c
@@ -316,16 +316,23 @@ static void op_amd_stop_ibs(void)
wrmsrl(MSR_AMD64_IBSOPCTL, 0);
}
-static inline int eilvt_is_available(int offset)
+static inline int get_eilvt(int offset)
{
- /* check if we may assign a vector */
return !setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_NMI, 1);
}
+static inline int put_eilvt(int offset)
+{
+ return !setup_APIC_eilvt(offset, 0, 0, 1);
+}
+
static inline int ibs_eilvt_valid(void)
{
int offset;
u64 val;
+ int valid = 0;
+
+ preempt_disable();
rdmsrl(MSR_AMD64_IBSCTL, val);
offset = val & IBSCTL_LVT_OFFSET_MASK;
@@ -333,16 +340,20 @@ static inline int ibs_eilvt_valid(void)
if (!(val & IBSCTL_LVT_OFFSET_VALID)) {
pr_err(FW_BUG "cpu %d, invalid IBS interrupt offset %d (MSR%08X=0x%016llx)\n",
smp_processor_id(), offset, MSR_AMD64_IBSCTL, val);
- return 0;
+ goto out;
}
- if (!eilvt_is_available(offset)) {
+ if (!get_eilvt(offset)) {
pr_err(FW_BUG "cpu %d, IBS interrupt offset %d not available (MSR%08X=0x%016llx)\n",
smp_processor_id(), offset, MSR_AMD64_IBSCTL, val);
- return 0;
+ goto out;
}
- return 1;
+ valid = 1;
+out:
+ preempt_enable();
+
+ return valid;
}
static inline int get_ibs_offset(void)
@@ -600,67 +611,69 @@ static int setup_ibs_ctl(int ibs_eilvt_off)
static int force_ibs_eilvt_setup(void)
{
- int i;
+ int offset;
int ret;
- /* find the next free available EILVT entry */
- for (i = 1; i < 4; i++) {
- if (!eilvt_is_available(i))
- continue;
- ret = setup_ibs_ctl(i);
- if (ret)
- return ret;
- pr_err(FW_BUG "using offset %d for IBS interrupts\n", i);
- return 0;
+ /*
+ * find the next free available EILVT entry, skip offset 0,
+ * pin search to this cpu
+ */
+ preempt_disable();
+ for (offset = 1; offset < APIC_EILVT_NR_MAX; offset++) {
+ if (get_eilvt(offset))
+ break;
}
+ preempt_enable();
- printk(KERN_DEBUG "No EILVT entry available\n");
-
- return -EBUSY;
-}
-
-static int __init_ibs_nmi(void)
-{
- int ret;
-
- if (ibs_eilvt_valid())
- return 0;
+ if (offset == APIC_EILVT_NR_MAX) {
+ printk(KERN_DEBUG "No EILVT entry available\n");
+ return -EBUSY;
+ }
- ret = force_ibs_eilvt_setup();
+ ret = setup_ibs_ctl(offset);
if (ret)
- return ret;
+ goto out;
- if (!ibs_eilvt_valid())
- return -EFAULT;
+ if (!ibs_eilvt_valid()) {
+ ret = -EFAULT;
+ goto out;
+ }
+ pr_err(FW_BUG "using offset %d for IBS interrupts\n", offset);
pr_err(FW_BUG "workaround enabled for IBS LVT offset\n");
return 0;
+out:
+ preempt_disable();
+ put_eilvt(offset);
+ preempt_enable();
+ return ret;
}
/*
* check and reserve APIC extended interrupt LVT offset for IBS if
* available
- *
- * init_ibs() preforms implicitly cpu-local operations, so pin this
- * thread to its current CPU
*/
static void init_ibs(void)
{
- preempt_disable();
-
ibs_caps = get_ibs_caps();
+
if (!ibs_caps)
+ return;
+
+ if (ibs_eilvt_valid())
goto out;
- if (__init_ibs_nmi() < 0)
- ibs_caps = 0;
- else
- printk(KERN_INFO "oprofile: AMD IBS detected (0x%08x)\n", ibs_caps);
+ if (!force_ibs_eilvt_setup())
+ goto out;
+
+ /* Failed to setup ibs */
+ ibs_caps = 0;
+ return;
out:
- preempt_enable();
+ printk(KERN_INFO "oprofile: AMD IBS detected (0x%08x)\n", ibs_caps);
}
static int (*create_arch_files)(struct super_block *sb, struct dentry *root);
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index b30aa26a8df2..0d3a4fa34560 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -304,6 +304,40 @@ static void __init print_efi_memmap(void)
}
#endif /* EFI_DEBUG */
+void __init efi_reserve_boot_services(void)
+{
+ void *p;
+
+ for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
+ efi_memory_desc_t *md = p;
+ unsigned long long start = md->phys_addr;
+ unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
+
+ if (md->type != EFI_BOOT_SERVICES_CODE &&
+ md->type != EFI_BOOT_SERVICES_DATA)
+ continue;
+
+ memblock_x86_reserve_range(start, start + size, "EFI Boot");
+ }
+}
+
+static void __init efi_free_boot_services(void)
+{
+ void *p;
+
+ for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
+ efi_memory_desc_t *md = p;
+ unsigned long long start = md->phys_addr;
+ unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
+
+ if (md->type != EFI_BOOT_SERVICES_CODE &&
+ md->type != EFI_BOOT_SERVICES_DATA)
+ continue;
+
+ free_bootmem_late(start, size);
+ }
+}
+
void __init efi_init(void)
{
efi_config_table_t *config_tables;
@@ -536,7 +570,9 @@ void __init efi_enter_virtual_mode(void)
for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
md = p;
- if (!(md->attribute & EFI_MEMORY_RUNTIME))
+ if (!(md->attribute & EFI_MEMORY_RUNTIME) &&
+ md->type != EFI_BOOT_SERVICES_CODE &&
+ md->type != EFI_BOOT_SERVICES_DATA)
continue;
size = md->num_pages << EFI_PAGE_SHIFT;
@@ -593,6 +629,13 @@ void __init efi_enter_virtual_mode(void)
}
/*
+ * Thankfully, it does seem that no runtime services other than
+ * SetVirtualAddressMap() will touch boot services code, so we can
+ * get rid of it all at this point
+ */
+ efi_free_boot_services();
+
+ /*
* Now that EFI is in virtual mode, update the function
* pointers in the runtime service table to the new virtual addresses.
*
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index 2649426a7905..ac3aa54e2654 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -49,10 +49,11 @@ static void __init early_code_mapping_set_exec(int executable)
if (!(__supported_pte_mask & _PAGE_NX))
return;
- /* Make EFI runtime service code area executable */
+ /* Make EFI service code area executable */
for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
md = p;
- if (md->type == EFI_RUNTIME_SERVICES_CODE)
+ if (md->type == EFI_RUNTIME_SERVICES_CODE ||
+ md->type == EFI_BOOT_SERVICES_CODE)
efi_set_executable(md, executable);
}
}
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
index c58e0ea39ef5..68e467f69fec 100644
--- a/arch/x86/platform/uv/tlb_uv.c
+++ b/arch/x86/platform/uv/tlb_uv.c
@@ -1,7 +1,7 @@
/*
* SGI UltraViolet TLB flush routines.
*
- * (c) 2008-2010 Cliff Wickman <cpw@sgi.com>, SGI.
+ * (c) 2008-2011 Cliff Wickman <cpw@sgi.com>, SGI.
*
* This code is released under the GNU General Public License version 2 or
* later.
@@ -35,6 +35,7 @@ static int timeout_base_ns[] = {
5242880,
167772160
};
+
static int timeout_us;
static int nobau;
static int baudisabled;
@@ -42,20 +43,70 @@ static spinlock_t disable_lock;
static cycles_t congested_cycles;
/* tunables: */
-static int max_bau_concurrent = MAX_BAU_CONCURRENT;
-static int max_bau_concurrent_constant = MAX_BAU_CONCURRENT;
-static int plugged_delay = PLUGGED_DELAY;
-static int plugsb4reset = PLUGSB4RESET;
-static int timeoutsb4reset = TIMEOUTSB4RESET;
-static int ipi_reset_limit = IPI_RESET_LIMIT;
-static int complete_threshold = COMPLETE_THRESHOLD;
-static int congested_response_us = CONGESTED_RESPONSE_US;
-static int congested_reps = CONGESTED_REPS;
-static int congested_period = CONGESTED_PERIOD;
+static int max_concurr = MAX_BAU_CONCURRENT;
+static int max_concurr_const = MAX_BAU_CONCURRENT;
+static int plugged_delay = PLUGGED_DELAY;
+static int plugsb4reset = PLUGSB4RESET;
+static int timeoutsb4reset = TIMEOUTSB4RESET;
+static int ipi_reset_limit = IPI_RESET_LIMIT;
+static int complete_threshold = COMPLETE_THRESHOLD;
+static int congested_respns_us = CONGESTED_RESPONSE_US;
+static int congested_reps = CONGESTED_REPS;
+static int congested_period = CONGESTED_PERIOD;
+
+static struct tunables tunables[] = {
+ {&max_concurr, MAX_BAU_CONCURRENT}, /* must be [0] */
+ {&plugged_delay, PLUGGED_DELAY},
+ {&plugsb4reset, PLUGSB4RESET},
+ {&timeoutsb4reset, TIMEOUTSB4RESET},
+ {&ipi_reset_limit, IPI_RESET_LIMIT},
+ {&complete_threshold, COMPLETE_THRESHOLD},
+ {&congested_respns_us, CONGESTED_RESPONSE_US},
+ {&congested_reps, CONGESTED_REPS},
+ {&congested_period, CONGESTED_PERIOD}
+};
+
static struct dentry *tunables_dir;
static struct dentry *tunables_file;
-static int __init setup_nobau(char *arg)
+/* these correspond to the statistics printed by ptc_seq_show() */
+static char *stat_description[] = {
+ "sent: number of shootdown messages sent",
+ "stime: time spent sending messages",
+ "numuvhubs: number of hubs targeted with shootdown",
+ "numuvhubs16: number times 16 or more hubs targeted",
+ "numuvhubs8: number times 8 or more hubs targeted",
+ "numuvhubs4: number times 4 or more hubs targeted",
+ "numuvhubs2: number times 2 or more hubs targeted",
+ "numuvhubs1: number times 1 hub targeted",
+ "numcpus: number of cpus targeted with shootdown",
+ "dto: number of destination timeouts",
+ "retries: destination timeout retries sent",
+ "rok: : destination timeouts successfully retried",
+ "resetp: ipi-style resource resets for plugs",
+ "resett: ipi-style resource resets for timeouts",
+ "giveup: fall-backs to ipi-style shootdowns",
+ "sto: number of source timeouts",
+ "bz: number of stay-busy's",
+ "throt: number times spun in throttle",
+ "swack: image of UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE",
+ "recv: shootdown messages received",
+ "rtime: time spent processing messages",
+ "all: shootdown all-tlb messages",
+ "one: shootdown one-tlb messages",
+ "mult: interrupts that found multiple messages",
+ "none: interrupts that found no messages",
+ "retry: number of retry messages processed",
+ "canc: number messages canceled by retries",
+ "nocan: number retries that found nothing to cancel",
+ "reset: number of ipi-style reset requests processed",
+ "rcan: number messages canceled by reset requests",
+ "disable: number times use of the BAU was disabled",
+ "enable: number times use of the BAU was re-enabled"
+};
+
+static int __init
+setup_nobau(char *arg)
{
nobau = 1;
return 0;
@@ -63,7 +114,7 @@ static int __init setup_nobau(char *arg)
early_param("nobau", setup_nobau);
/* base pnode in this partition */
-static int uv_partition_base_pnode __read_mostly;
+static int uv_base_pnode __read_mostly;
/* position of pnode (which is nasid>>1): */
static int uv_nshift __read_mostly;
static unsigned long uv_mmask __read_mostly;
@@ -109,60 +160,52 @@ static int __init uvhub_to_first_apicid(int uvhub)
* clear of the Timeout bit (as well) will free the resource. No reply will
* be sent (the hardware will only do one reply per message).
*/
-static inline void uv_reply_to_message(struct msg_desc *mdp,
- struct bau_control *bcp)
+static void reply_to_message(struct msg_desc *mdp, struct bau_control *bcp)
{
unsigned long dw;
- struct bau_payload_queue_entry *msg;
+ struct bau_pq_entry *msg;
msg = mdp->msg;
if (!msg->canceled) {
- dw = (msg->sw_ack_vector << UV_SW_ACK_NPENDING) |
- msg->sw_ack_vector;
- uv_write_local_mmr(
- UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, dw);
+ dw = (msg->swack_vec << UV_SW_ACK_NPENDING) | msg->swack_vec;
+ write_mmr_sw_ack(dw);
}
msg->replied_to = 1;
- msg->sw_ack_vector = 0;
+ msg->swack_vec = 0;
}
/*
* Process the receipt of a RETRY message
*/
-static inline void uv_bau_process_retry_msg(struct msg_desc *mdp,
- struct bau_control *bcp)
+static void bau_process_retry_msg(struct msg_desc *mdp,
+ struct bau_control *bcp)
{
int i;
int cancel_count = 0;
- int slot2;
unsigned long msg_res;
unsigned long mmr = 0;
- struct bau_payload_queue_entry *msg;
- struct bau_payload_queue_entry *msg2;
- struct ptc_stats *stat;
+ struct bau_pq_entry *msg = mdp->msg;
+ struct bau_pq_entry *msg2;
+ struct ptc_stats *stat = bcp->statp;
- msg = mdp->msg;
- stat = bcp->statp;
stat->d_retries++;
/*
* cancel any message from msg+1 to the retry itself
*/
for (msg2 = msg+1, i = 0; i < DEST_Q_SIZE; msg2++, i++) {
- if (msg2 > mdp->va_queue_last)
- msg2 = mdp->va_queue_first;
+ if (msg2 > mdp->queue_last)
+ msg2 = mdp->queue_first;
if (msg2 == msg)
break;
- /* same conditions for cancellation as uv_do_reset */
+ /* same conditions for cancellation as do_reset */
if ((msg2->replied_to == 0) && (msg2->canceled == 0) &&
- (msg2->sw_ack_vector) && ((msg2->sw_ack_vector &
- msg->sw_ack_vector) == 0) &&
+ (msg2->swack_vec) && ((msg2->swack_vec &
+ msg->swack_vec) == 0) &&
(msg2->sending_cpu == msg->sending_cpu) &&
(msg2->msg_type != MSG_NOOP)) {
- slot2 = msg2 - mdp->va_queue_first;
- mmr = uv_read_local_mmr
- (UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE);
- msg_res = msg2->sw_ack_vector;
+ mmr = read_mmr_sw_ack();
+ msg_res = msg2->swack_vec;
/*
* This is a message retry; clear the resources held
* by the previous message only if they timed out.
@@ -170,6 +213,7 @@ static inline void uv_bau_process_retry_msg(struct msg_desc *mdp,
* situation to report.
*/
if (mmr & (msg_res << UV_SW_ACK_NPENDING)) {
+ unsigned long mr;
/*
* is the resource timed out?
* make everyone ignore the cancelled message.
@@ -177,10 +221,8 @@ static inline void uv_bau_process_retry_msg(struct msg_desc *mdp,
msg2->canceled = 1;
stat->d_canceled++;
cancel_count++;
- uv_write_local_mmr(
- UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS,
- (msg_res << UV_SW_ACK_NPENDING) |
- msg_res);
+ mr = (msg_res << UV_SW_ACK_NPENDING) | msg_res;
+ write_mmr_sw_ack(mr);
}
}
}
@@ -192,20 +234,19 @@ static inline void uv_bau_process_retry_msg(struct msg_desc *mdp,
* Do all the things a cpu should do for a TLB shootdown message.
* Other cpu's may come here at the same time for this message.
*/
-static void uv_bau_process_message(struct msg_desc *mdp,
- struct bau_control *bcp)
+static void bau_process_message(struct msg_desc *mdp,
+ struct bau_control *bcp)
{
- int msg_ack_count;
short socket_ack_count = 0;
- struct ptc_stats *stat;
- struct bau_payload_queue_entry *msg;
+ short *sp;
+ struct atomic_short *asp;
+ struct ptc_stats *stat = bcp->statp;
+ struct bau_pq_entry *msg = mdp->msg;
struct bau_control *smaster = bcp->socket_master;
/*
* This must be a normal message, or retry of a normal message
*/
- msg = mdp->msg;
- stat = bcp->statp;
if (msg->address == TLB_FLUSH_ALL) {
local_flush_tlb();
stat->d_alltlb++;
@@ -222,30 +263,32 @@ static void uv_bau_process_message(struct msg_desc *mdp,
* cpu number.
*/
if (msg->msg_type == MSG_RETRY && bcp == bcp->uvhub_master)
- uv_bau_process_retry_msg(mdp, bcp);
+ bau_process_retry_msg(mdp, bcp);
/*
- * This is a sw_ack message, so we have to reply to it.
+ * This is a swack message, so we have to reply to it.
* Count each responding cpu on the socket. This avoids
* pinging the count's cache line back and forth between
* the sockets.
*/
- socket_ack_count = atomic_add_short_return(1, (struct atomic_short *)
- &smaster->socket_acknowledge_count[mdp->msg_slot]);
+ sp = &smaster->socket_acknowledge_count[mdp->msg_slot];
+ asp = (struct atomic_short *)sp;
+ socket_ack_count = atom_asr(1, asp);
if (socket_ack_count == bcp->cpus_in_socket) {
+ int msg_ack_count;
/*
* Both sockets dump their completed count total into
* the message's count.
*/
smaster->socket_acknowledge_count[mdp->msg_slot] = 0;
- msg_ack_count = atomic_add_short_return(socket_ack_count,
- (struct atomic_short *)&msg->acknowledge_count);
+ asp = (struct atomic_short *)&msg->acknowledge_count;
+ msg_ack_count = atom_asr(socket_ack_count, asp);
if (msg_ack_count == bcp->cpus_in_uvhub) {
/*
* All cpus in uvhub saw it; reply
*/
- uv_reply_to_message(mdp, bcp);
+ reply_to_message(mdp, bcp);
}
}
@@ -268,62 +311,51 @@ static int uvhub_to_first_cpu(int uvhub)
* Last resort when we get a large number of destination timeouts is
* to clear resources held by a given cpu.
* Do this with IPI so that all messages in the BAU message queue
- * can be identified by their nonzero sw_ack_vector field.
+ * can be identified by their nonzero swack_vec field.
*
* This is entered for a single cpu on the uvhub.
* The sender want's this uvhub to free a specific message's
- * sw_ack resources.
+ * swack resources.
*/
-static void
-uv_do_reset(void *ptr)
+static void do_reset(void *ptr)
{
int i;
- int slot;
- int count = 0;
- unsigned long mmr;
- unsigned long msg_res;
- struct bau_control *bcp;
- struct reset_args *rap;
- struct bau_payload_queue_entry *msg;
- struct ptc_stats *stat;
+ struct bau_control *bcp = &per_cpu(bau_control, smp_processor_id());
+ struct reset_args *rap = (struct reset_args *)ptr;
+ struct bau_pq_entry *msg;
+ struct ptc_stats *stat = bcp->statp;
- bcp = &per_cpu(bau_control, smp_processor_id());
- rap = (struct reset_args *)ptr;
- stat = bcp->statp;
stat->d_resets++;
-
/*
* We're looking for the given sender, and
- * will free its sw_ack resource.
+ * will free its swack resource.
* If all cpu's finally responded after the timeout, its
* message 'replied_to' was set.
*/
- for (msg = bcp->va_queue_first, i = 0; i < DEST_Q_SIZE; msg++, i++) {
- /* uv_do_reset: same conditions for cancellation as
- uv_bau_process_retry_msg() */
+ for (msg = bcp->queue_first, i = 0; i < DEST_Q_SIZE; msg++, i++) {
+ unsigned long msg_res;
+ /* do_reset: same conditions for cancellation as
+ bau_process_retry_msg() */
if ((msg->replied_to == 0) &&
(msg->canceled == 0) &&
(msg->sending_cpu == rap->sender) &&
- (msg->sw_ack_vector) &&
+ (msg->swack_vec) &&
(msg->msg_type != MSG_NOOP)) {
+ unsigned long mmr;
+ unsigned long mr;
/*
* make everyone else ignore this message
*/
msg->canceled = 1;
- slot = msg - bcp->va_queue_first;
- count++;
/*
* only reset the resource if it is still pending
*/
- mmr = uv_read_local_mmr
- (UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE);
- msg_res = msg->sw_ack_vector;
+ mmr = read_mmr_sw_ack();
+ msg_res = msg->swack_vec;
+ mr = (msg_res << UV_SW_ACK_NPENDING) | msg_res;
if (mmr & msg_res) {
stat->d_rcanceled++;
- uv_write_local_mmr(
- UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS,
- (msg_res << UV_SW_ACK_NPENDING) |
- msg_res);
+ write_mmr_sw_ack(mr);
}
}
}
@@ -334,39 +366,38 @@ uv_do_reset(void *ptr)
* Use IPI to get all target uvhubs to release resources held by
* a given sending cpu number.
*/
-static void uv_reset_with_ipi(struct bau_target_uvhubmask *distribution,
- int sender)
+static void reset_with_ipi(struct bau_targ_hubmask *distribution, int sender)
{
int uvhub;
- int cpu;
+ int maskbits;
cpumask_t mask;
struct reset_args reset_args;
reset_args.sender = sender;
-
cpus_clear(mask);
/* find a single cpu for each uvhub in this distribution mask */
- for (uvhub = 0;
- uvhub < sizeof(struct bau_target_uvhubmask) * BITSPERBYTE;
- uvhub++) {
+ maskbits = sizeof(struct bau_targ_hubmask) * BITSPERBYTE;
+ for (uvhub = 0; uvhub < maskbits; uvhub++) {
+ int cpu;
if (!bau_uvhub_isset(uvhub, distribution))
continue;
/* find a cpu for this uvhub */
cpu = uvhub_to_first_cpu(uvhub);
cpu_set(cpu, mask);
}
- /* IPI all cpus; Preemption is already disabled */
- smp_call_function_many(&mask, uv_do_reset, (void *)&reset_args, 1);
+
+ /* IPI all cpus; preemption is already disabled */
+ smp_call_function_many(&mask, do_reset, (void *)&reset_args, 1);
return;
}
-static inline unsigned long
-cycles_2_us(unsigned long long cyc)
+static inline unsigned long cycles_2_us(unsigned long long cyc)
{
unsigned long long ns;
unsigned long us;
- ns = (cyc * per_cpu(cyc2ns, smp_processor_id()))
- >> CYC2NS_SCALE_FACTOR;
+ int cpu = smp_processor_id();
+
+ ns = (cyc * per_cpu(cyc2ns, cpu)) >> CYC2NS_SCALE_FACTOR;
us = ns / 1000;
return us;
}
@@ -376,56 +407,56 @@ cycles_2_us(unsigned long long cyc)
* leaves uvhub_quiesce set so that no new broadcasts are started by
* bau_flush_send_and_wait()
*/
-static inline void
-quiesce_local_uvhub(struct bau_control *hmaster)
+static inline void quiesce_local_uvhub(struct bau_control *hmaster)
{
- atomic_add_short_return(1, (struct atomic_short *)
- &hmaster->uvhub_quiesce);
+ atom_asr(1, (struct atomic_short *)&hmaster->uvhub_quiesce);
}
/*
* mark this quiet-requestor as done
*/
-static inline void
-end_uvhub_quiesce(struct bau_control *hmaster)
+static inline void end_uvhub_quiesce(struct bau_control *hmaster)
{
- atomic_add_short_return(-1, (struct atomic_short *)
- &hmaster->uvhub_quiesce);
+ atom_asr(-1, (struct atomic_short *)&hmaster->uvhub_quiesce);
+}
+
+static unsigned long uv1_read_status(unsigned long mmr_offset, int right_shift)
+{
+ unsigned long descriptor_status;
+
+ descriptor_status = uv_read_local_mmr(mmr_offset);
+ descriptor_status >>= right_shift;
+ descriptor_status &= UV_ACT_STATUS_MASK;
+ return descriptor_status;
}
/*
* Wait for completion of a broadcast software ack message
* return COMPLETE, RETRY(PLUGGED or TIMEOUT) or GIVEUP
*/
-static int uv_wait_completion(struct bau_desc *bau_desc,
- unsigned long mmr_offset, int right_shift, int this_cpu,
- struct bau_control *bcp, struct bau_control *smaster, long try)
+static int uv1_wait_completion(struct bau_desc *bau_desc,
+ unsigned long mmr_offset, int right_shift,
+ struct bau_control *bcp, long try)
{
unsigned long descriptor_status;
- cycles_t ttime;
+ cycles_t ttm;
struct ptc_stats *stat = bcp->statp;
- struct bau_control *hmaster;
-
- hmaster = bcp->uvhub_master;
+ descriptor_status = uv1_read_status(mmr_offset, right_shift);
/* spin on the status MMR, waiting for it to go idle */
- while ((descriptor_status = (((unsigned long)
- uv_read_local_mmr(mmr_offset) >>
- right_shift) & UV_ACT_STATUS_MASK)) !=
- DESC_STATUS_IDLE) {
+ while ((descriptor_status != DS_IDLE)) {
/*
- * Our software ack messages may be blocked because there are
- * no swack resources available. As long as none of them
- * has timed out hardware will NACK our message and its
- * state will stay IDLE.
+ * Our software ack messages may be blocked because
+ * there are no swack resources available. As long
+ * as none of them has timed out hardware will NACK
+ * our message and its state will stay IDLE.
*/
- if (descriptor_status == DESC_STATUS_SOURCE_TIMEOUT) {
+ if (descriptor_status == DS_SOURCE_TIMEOUT) {
stat->s_stimeout++;
return FLUSH_GIVEUP;
- } else if (descriptor_status ==
- DESC_STATUS_DESTINATION_TIMEOUT) {
+ } else if (descriptor_status == DS_DESTINATION_TIMEOUT) {
stat->s_dtimeout++;
- ttime = get_cycles();
+ ttm = get_cycles();
/*
* Our retries may be blocked by all destination
@@ -433,8 +464,7 @@ static int uv_wait_completion(struct bau_desc *bau_desc,
* pending. In that case hardware returns the
* ERROR that looks like a destination timeout.
*/
- if (cycles_2_us(ttime - bcp->send_message) <
- timeout_us) {
+ if (cycles_2_us(ttm - bcp->send_message) < timeout_us) {
bcp->conseccompletes = 0;
return FLUSH_RETRY_PLUGGED;
}
@@ -447,80 +477,160 @@ static int uv_wait_completion(struct bau_desc *bau_desc,
*/
cpu_relax();
}
+ descriptor_status = uv1_read_status(mmr_offset, right_shift);
}
bcp->conseccompletes++;
return FLUSH_COMPLETE;
}
-static inline cycles_t
-sec_2_cycles(unsigned long sec)
+/*
+ * UV2 has an extra bit of status in the ACTIVATION_STATUS_2 register.
+ */
+static unsigned long uv2_read_status(unsigned long offset, int rshft, int cpu)
{
- unsigned long ns;
- cycles_t cyc;
+ unsigned long descriptor_status;
+ unsigned long descriptor_status2;
- ns = sec * 1000000000;
- cyc = (ns << CYC2NS_SCALE_FACTOR)/(per_cpu(cyc2ns, smp_processor_id()));
- return cyc;
+ descriptor_status = ((read_lmmr(offset) >> rshft) & UV_ACT_STATUS_MASK);
+ descriptor_status2 = (read_mmr_uv2_status() >> cpu) & 0x1UL;
+ descriptor_status = (descriptor_status << 1) | descriptor_status2;
+ return descriptor_status;
+}
+
+static int uv2_wait_completion(struct bau_desc *bau_desc,
+ unsigned long mmr_offset, int right_shift,
+ struct bau_control *bcp, long try)
+{
+ unsigned long descriptor_stat;
+ cycles_t ttm;
+ int cpu = bcp->uvhub_cpu;
+ struct ptc_stats *stat = bcp->statp;
+
+ descriptor_stat = uv2_read_status(mmr_offset, right_shift, cpu);
+
+ /* spin on the status MMR, waiting for it to go idle */
+ while (descriptor_stat != UV2H_DESC_IDLE) {
+ /*
+ * Our software ack messages may be blocked because
+ * there are no swack resources available. As long
+ * as none of them has timed out hardware will NACK
+ * our message and its state will stay IDLE.
+ */
+ if ((descriptor_stat == UV2H_DESC_SOURCE_TIMEOUT) ||
+ (descriptor_stat == UV2H_DESC_DEST_STRONG_NACK) ||
+ (descriptor_stat == UV2H_DESC_DEST_PUT_ERR)) {
+ stat->s_stimeout++;
+ return FLUSH_GIVEUP;
+ } else if (descriptor_stat == UV2H_DESC_DEST_TIMEOUT) {
+ stat->s_dtimeout++;
+ ttm = get_cycles();
+ /*
+ * Our retries may be blocked by all destination
+ * swack resources being consumed, and a timeout
+ * pending. In that case hardware returns the
+ * ERROR that looks like a destination timeout.
+ */
+ if (cycles_2_us(ttm - bcp->send_message) < timeout_us) {
+ bcp->conseccompletes = 0;
+ return FLUSH_RETRY_PLUGGED;
+ }
+ bcp->conseccompletes = 0;
+ return FLUSH_RETRY_TIMEOUT;
+ } else {
+ /*
+ * descriptor_stat is still BUSY
+ */
+ cpu_relax();
+ }
+ descriptor_stat = uv2_read_status(mmr_offset, right_shift, cpu);
+ }
+ bcp->conseccompletes++;
+ return FLUSH_COMPLETE;
}
/*
- * conditionally add 1 to *v, unless *v is >= u
- * return 0 if we cannot add 1 to *v because it is >= u
- * return 1 if we can add 1 to *v because it is < u
- * the add is atomic
- *
- * This is close to atomic_add_unless(), but this allows the 'u' value
- * to be lowered below the current 'v'. atomic_add_unless can only stop
- * on equal.
+ * There are 2 status registers; each and array[32] of 2 bits. Set up for
+ * which register to read and position in that register based on cpu in
+ * current hub.
*/
-static inline int atomic_inc_unless_ge(spinlock_t *lock, atomic_t *v, int u)
+static int wait_completion(struct bau_desc *bau_desc,
+ struct bau_control *bcp, long try)
{
- spin_lock(lock);
- if (atomic_read(v) >= u) {
- spin_unlock(lock);
- return 0;
+ int right_shift;
+ unsigned long mmr_offset;
+ int cpu = bcp->uvhub_cpu;
+
+ if (cpu < UV_CPUS_PER_AS) {
+ mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_0;
+ right_shift = cpu * UV_ACT_STATUS_SIZE;
+ } else {
+ mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_1;
+ right_shift = ((cpu - UV_CPUS_PER_AS) * UV_ACT_STATUS_SIZE);
}
- atomic_inc(v);
- spin_unlock(lock);
- return 1;
+
+ if (is_uv1_hub())
+ return uv1_wait_completion(bau_desc, mmr_offset, right_shift,
+ bcp, try);
+ else
+ return uv2_wait_completion(bau_desc, mmr_offset, right_shift,
+ bcp, try);
+}
+
+static inline cycles_t sec_2_cycles(unsigned long sec)
+{
+ unsigned long ns;
+ cycles_t cyc;
+
+ ns = sec * 1000000000;
+ cyc = (ns << CYC2NS_SCALE_FACTOR)/(per_cpu(cyc2ns, smp_processor_id()));
+ return cyc;
}
/*
- * Our retries are blocked by all destination swack resources being
+ * Our retries are blocked by all destination sw ack resources being
* in use, and a timeout is pending. In that case hardware immediately
* returns the ERROR that looks like a destination timeout.
*/
-static void
-destination_plugged(struct bau_desc *bau_desc, struct bau_control *bcp,
+static void destination_plugged(struct bau_desc *bau_desc,
+ struct bau_control *bcp,
struct bau_control *hmaster, struct ptc_stats *stat)
{
udelay(bcp->plugged_delay);
bcp->plugged_tries++;
+
if (bcp->plugged_tries >= bcp->plugsb4reset) {
bcp->plugged_tries = 0;
+
quiesce_local_uvhub(hmaster);
+
spin_lock(&hmaster->queue_lock);
- uv_reset_with_ipi(&bau_desc->distribution, bcp->cpu);
+ reset_with_ipi(&bau_desc->distribution, bcp->cpu);
spin_unlock(&hmaster->queue_lock);
+
end_uvhub_quiesce(hmaster);
+
bcp->ipi_attempts++;
stat->s_resets_plug++;
}
}
-static void
-destination_timeout(struct bau_desc *bau_desc, struct bau_control *bcp,
- struct bau_control *hmaster, struct ptc_stats *stat)
+static void destination_timeout(struct bau_desc *bau_desc,
+ struct bau_control *bcp, struct bau_control *hmaster,
+ struct ptc_stats *stat)
{
- hmaster->max_bau_concurrent = 1;
+ hmaster->max_concurr = 1;
bcp->timeout_tries++;
if (bcp->timeout_tries >= bcp->timeoutsb4reset) {
bcp->timeout_tries = 0;
+
quiesce_local_uvhub(hmaster);
+
spin_lock(&hmaster->queue_lock);
- uv_reset_with_ipi(&bau_desc->distribution, bcp->cpu);
+ reset_with_ipi(&bau_desc->distribution, bcp->cpu);
spin_unlock(&hmaster->queue_lock);
+
end_uvhub_quiesce(hmaster);
+
bcp->ipi_attempts++;
stat->s_resets_timeout++;
}
@@ -530,34 +640,104 @@ destination_timeout(struct bau_desc *bau_desc, struct bau_control *bcp,
* Completions are taking a very long time due to a congested numalink
* network.
*/
-static void
-disable_for_congestion(struct bau_control *bcp, struct ptc_stats *stat)
+static void disable_for_congestion(struct bau_control *bcp,
+ struct ptc_stats *stat)
{
- int tcpu;
- struct bau_control *tbcp;
-
/* let only one cpu do this disabling */
spin_lock(&disable_lock);
+
if (!baudisabled && bcp->period_requests &&
((bcp->period_time / bcp->period_requests) > congested_cycles)) {
+ int tcpu;
+ struct bau_control *tbcp;
/* it becomes this cpu's job to turn on the use of the
BAU again */
baudisabled = 1;
bcp->set_bau_off = 1;
- bcp->set_bau_on_time = get_cycles() +
- sec_2_cycles(bcp->congested_period);
+ bcp->set_bau_on_time = get_cycles();
+ bcp->set_bau_on_time += sec_2_cycles(bcp->cong_period);
stat->s_bau_disabled++;
for_each_present_cpu(tcpu) {
tbcp = &per_cpu(bau_control, tcpu);
- tbcp->baudisabled = 1;
+ tbcp->baudisabled = 1;
}
}
+
spin_unlock(&disable_lock);
}
-/**
- * uv_flush_send_and_wait
- *
+static void count_max_concurr(int stat, struct bau_control *bcp,
+ struct bau_control *hmaster)
+{
+ bcp->plugged_tries = 0;
+ bcp->timeout_tries = 0;
+ if (stat != FLUSH_COMPLETE)
+ return;
+ if (bcp->conseccompletes <= bcp->complete_threshold)
+ return;
+ if (hmaster->max_concurr >= hmaster->max_concurr_const)
+ return;
+ hmaster->max_concurr++;
+}
+
+static void record_send_stats(cycles_t time1, cycles_t time2,
+ struct bau_control *bcp, struct ptc_stats *stat,
+ int completion_status, int try)
+{
+ cycles_t elapsed;
+
+ if (time2 > time1) {
+ elapsed = time2 - time1;
+ stat->s_time += elapsed;
+
+ if ((completion_status == FLUSH_COMPLETE) && (try == 1)) {
+ bcp->period_requests++;
+ bcp->period_time += elapsed;
+ if ((elapsed > congested_cycles) &&
+ (bcp->period_requests > bcp->cong_reps))
+ disable_for_congestion(bcp, stat);
+ }
+ } else
+ stat->s_requestor--;
+
+ if (completion_status == FLUSH_COMPLETE && try > 1)
+ stat->s_retriesok++;
+ else if (completion_status == FLUSH_GIVEUP)
+ stat->s_giveup++;
+}
+
+/*
+ * Because of a uv1 hardware bug only a limited number of concurrent
+ * requests can be made.
+ */
+static void uv1_throttle(struct bau_control *hmaster, struct ptc_stats *stat)
+{
+ spinlock_t *lock = &hmaster->uvhub_lock;
+ atomic_t *v;
+
+ v = &hmaster->active_descriptor_count;
+ if (!atomic_inc_unless_ge(lock, v, hmaster->max_concurr)) {
+ stat->s_throttles++;
+ do {
+ cpu_relax();
+ } while (!atomic_inc_unless_ge(lock, v, hmaster->max_concurr));
+ }
+}
+
+/*
+ * Handle the completion status of a message send.
+ */
+static void handle_cmplt(int completion_status, struct bau_desc *bau_desc,
+ struct bau_control *bcp, struct bau_control *hmaster,
+ struct ptc_stats *stat)
+{
+ if (completion_status == FLUSH_RETRY_PLUGGED)
+ destination_plugged(bau_desc, bcp, hmaster, stat);
+ else if (completion_status == FLUSH_RETRY_TIMEOUT)
+ destination_timeout(bau_desc, bcp, hmaster, stat);
+}
+
+/*
* Send a broadcast and wait for it to complete.
*
* The flush_mask contains the cpus the broadcast is to be sent to including
@@ -568,44 +748,23 @@ disable_for_congestion(struct bau_control *bcp, struct ptc_stats *stat)
* returned to the kernel.
*/
int uv_flush_send_and_wait(struct bau_desc *bau_desc,
- struct cpumask *flush_mask, struct bau_control *bcp)
+ struct cpumask *flush_mask, struct bau_control *bcp)
{
- int right_shift;
- int completion_status = 0;
int seq_number = 0;
+ int completion_stat = 0;
long try = 0;
- int cpu = bcp->uvhub_cpu;
- int this_cpu = bcp->cpu;
- unsigned long mmr_offset;
unsigned long index;
cycles_t time1;
cycles_t time2;
- cycles_t elapsed;
struct ptc_stats *stat = bcp->statp;
- struct bau_control *smaster = bcp->socket_master;
struct bau_control *hmaster = bcp->uvhub_master;
- if (!atomic_inc_unless_ge(&hmaster->uvhub_lock,
- &hmaster->active_descriptor_count,
- hmaster->max_bau_concurrent)) {
- stat->s_throttles++;
- do {
- cpu_relax();
- } while (!atomic_inc_unless_ge(&hmaster->uvhub_lock,
- &hmaster->active_descriptor_count,
- hmaster->max_bau_concurrent));
- }
+ if (is_uv1_hub())
+ uv1_throttle(hmaster, stat);
+
while (hmaster->uvhub_quiesce)
cpu_relax();
- if (cpu < UV_CPUS_PER_ACT_STATUS) {
- mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_0;
- right_shift = cpu * UV_ACT_STATUS_SIZE;
- } else {
- mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_1;
- right_shift =
- ((cpu - UV_CPUS_PER_ACT_STATUS) * UV_ACT_STATUS_SIZE);
- }
time1 = get_cycles();
do {
if (try == 0) {
@@ -615,64 +774,134 @@ int uv_flush_send_and_wait(struct bau_desc *bau_desc,
bau_desc->header.msg_type = MSG_RETRY;
stat->s_retry_messages++;
}
+
bau_desc->header.sequence = seq_number;
- index = (1UL << UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_SHFT) |
- bcp->uvhub_cpu;
+ index = (1UL << AS_PUSH_SHIFT) | bcp->uvhub_cpu;
bcp->send_message = get_cycles();
- uv_write_local_mmr(UVH_LB_BAU_SB_ACTIVATION_CONTROL, index);
+
+ write_mmr_activation(index);
+
try++;
- completion_status = uv_wait_completion(bau_desc, mmr_offset,
- right_shift, this_cpu, bcp, smaster, try);
+ completion_stat = wait_completion(bau_desc, bcp, try);
+
+ handle_cmplt(completion_stat, bau_desc, bcp, hmaster, stat);
- if (completion_status == FLUSH_RETRY_PLUGGED) {
- destination_plugged(bau_desc, bcp, hmaster, stat);
- } else if (completion_status == FLUSH_RETRY_TIMEOUT) {
- destination_timeout(bau_desc, bcp, hmaster, stat);
- }
if (bcp->ipi_attempts >= bcp->ipi_reset_limit) {
bcp->ipi_attempts = 0;
- completion_status = FLUSH_GIVEUP;
+ completion_stat = FLUSH_GIVEUP;
break;
}
cpu_relax();
- } while ((completion_status == FLUSH_RETRY_PLUGGED) ||
- (completion_status == FLUSH_RETRY_TIMEOUT));
+ } while ((completion_stat == FLUSH_RETRY_PLUGGED) ||
+ (completion_stat == FLUSH_RETRY_TIMEOUT));
+
time2 = get_cycles();
- bcp->plugged_tries = 0;
- bcp->timeout_tries = 0;
- if ((completion_status == FLUSH_COMPLETE) &&
- (bcp->conseccompletes > bcp->complete_threshold) &&
- (hmaster->max_bau_concurrent <
- hmaster->max_bau_concurrent_constant))
- hmaster->max_bau_concurrent++;
+
+ count_max_concurr(completion_stat, bcp, hmaster);
+
while (hmaster->uvhub_quiesce)
cpu_relax();
+
atomic_dec(&hmaster->active_descriptor_count);
- if (time2 > time1) {
- elapsed = time2 - time1;
- stat->s_time += elapsed;
- if ((completion_status == FLUSH_COMPLETE) && (try == 1)) {
- bcp->period_requests++;
- bcp->period_time += elapsed;
- if ((elapsed > congested_cycles) &&
- (bcp->period_requests > bcp->congested_reps)) {
- disable_for_congestion(bcp, stat);
+
+ record_send_stats(time1, time2, bcp, stat, completion_stat, try);
+
+ if (completion_stat == FLUSH_GIVEUP)
+ return 1;
+ return 0;
+}
+
+/*
+ * The BAU is disabled. When the disabled time period has expired, the cpu
+ * that disabled it must re-enable it.
+ * Return 0 if it is re-enabled for all cpus.
+ */
+static int check_enable(struct bau_control *bcp, struct ptc_stats *stat)
+{
+ int tcpu;
+ struct bau_control *tbcp;
+
+ if (bcp->set_bau_off) {
+ if (get_cycles() >= bcp->set_bau_on_time) {
+ stat->s_bau_reenabled++;
+ baudisabled = 0;
+ for_each_present_cpu(tcpu) {
+ tbcp = &per_cpu(bau_control, tcpu);
+ tbcp->baudisabled = 0;
+ tbcp->period_requests = 0;
+ tbcp->period_time = 0;
}
+ return 0;
}
+ }
+ return -1;
+}
+
+static void record_send_statistics(struct ptc_stats *stat, int locals, int hubs,
+ int remotes, struct bau_desc *bau_desc)
+{
+ stat->s_requestor++;
+ stat->s_ntargcpu += remotes + locals;
+ stat->s_ntargremotes += remotes;
+ stat->s_ntarglocals += locals;
+
+ /* uvhub statistics */
+ hubs = bau_uvhub_weight(&bau_desc->distribution);
+ if (locals) {
+ stat->s_ntarglocaluvhub++;
+ stat->s_ntargremoteuvhub += (hubs - 1);
} else
- stat->s_requestor--;
- if (completion_status == FLUSH_COMPLETE && try > 1)
- stat->s_retriesok++;
- else if (completion_status == FLUSH_GIVEUP) {
- stat->s_giveup++;
- return 1;
+ stat->s_ntargremoteuvhub += hubs;
+
+ stat->s_ntarguvhub += hubs;
+
+ if (hubs >= 16)
+ stat->s_ntarguvhub16++;
+ else if (hubs >= 8)
+ stat->s_ntarguvhub8++;
+ else if (hubs >= 4)
+ stat->s_ntarguvhub4++;
+ else if (hubs >= 2)
+ stat->s_ntarguvhub2++;
+ else
+ stat->s_ntarguvhub1++;
+}
+
+/*
+ * Translate a cpu mask to the uvhub distribution mask in the BAU
+ * activation descriptor.
+ */
+static int set_distrib_bits(struct cpumask *flush_mask, struct bau_control *bcp,
+ struct bau_desc *bau_desc, int *localsp, int *remotesp)
+{
+ int cpu;
+ int pnode;
+ int cnt = 0;
+ struct hub_and_pnode *hpp;
+
+ for_each_cpu(cpu, flush_mask) {
+ /*
+ * The distribution vector is a bit map of pnodes, relative
+ * to the partition base pnode (and the partition base nasid
+ * in the header).
+ * Translate cpu to pnode and hub using a local memory array.
+ */
+ hpp = &bcp->socket_master->thp[cpu];
+ pnode = hpp->pnode - bcp->partition_base_pnode;
+ bau_uvhub_set(pnode, &bau_desc->distribution);
+ cnt++;
+ if (hpp->uvhub == bcp->uvhub)
+ (*localsp)++;
+ else
+ (*remotesp)++;
}
+ if (!cnt)
+ return 1;
return 0;
}
-/**
- * uv_flush_tlb_others - globally purge translation cache of a virtual
- * address or all TLB's
+/*
+ * globally purge translation cache of a virtual address or all TLB's
* @cpumask: mask of all cpu's in which the address is to be removed
* @mm: mm_struct containing virtual address range
* @va: virtual address to be removed (or TLB_FLUSH_ALL for all TLB's on cpu)
@@ -696,20 +925,16 @@ int uv_flush_send_and_wait(struct bau_desc *bau_desc,
* done. The returned pointer is valid till preemption is re-enabled.
*/
const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
- struct mm_struct *mm,
- unsigned long va, unsigned int cpu)
+ struct mm_struct *mm, unsigned long va,
+ unsigned int cpu)
{
int locals = 0;
int remotes = 0;
int hubs = 0;
- int tcpu;
- int tpnode;
struct bau_desc *bau_desc;
struct cpumask *flush_mask;
struct ptc_stats *stat;
struct bau_control *bcp;
- struct bau_control *tbcp;
- struct hub_and_pnode *hpp;
/* kernel was booted 'nobau' */
if (nobau)
@@ -720,20 +945,8 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
/* bau was disabled due to slow response */
if (bcp->baudisabled) {
- /* the cpu that disabled it must re-enable it */
- if (bcp->set_bau_off) {
- if (get_cycles() >= bcp->set_bau_on_time) {
- stat->s_bau_reenabled++;
- baudisabled = 0;
- for_each_present_cpu(tcpu) {
- tbcp = &per_cpu(bau_control, tcpu);
- tbcp->baudisabled = 0;
- tbcp->period_requests = 0;
- tbcp->period_time = 0;
- }
- }
- }
- return cpumask;
+ if (check_enable(bcp, stat))
+ return cpumask;
}
/*
@@ -744,59 +957,20 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
flush_mask = (struct cpumask *)per_cpu(uv_flush_tlb_mask, cpu);
/* don't actually do a shootdown of the local cpu */
cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu));
+
if (cpu_isset(cpu, *cpumask))
stat->s_ntargself++;
bau_desc = bcp->descriptor_base;
- bau_desc += UV_ITEMS_PER_DESCRIPTOR * bcp->uvhub_cpu;
+ bau_desc += ITEMS_PER_DESC * bcp->uvhub_cpu;
bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE);
-
- for_each_cpu(tcpu, flush_mask) {
- /*
- * The distribution vector is a bit map of pnodes, relative
- * to the partition base pnode (and the partition base nasid
- * in the header).
- * Translate cpu to pnode and hub using an array stored
- * in local memory.
- */
- hpp = &bcp->socket_master->target_hub_and_pnode[tcpu];
- tpnode = hpp->pnode - bcp->partition_base_pnode;
- bau_uvhub_set(tpnode, &bau_desc->distribution);
- if (hpp->uvhub == bcp->uvhub)
- locals++;
- else
- remotes++;
- }
- if ((locals + remotes) == 0)
+ if (set_distrib_bits(flush_mask, bcp, bau_desc, &locals, &remotes))
return NULL;
- stat->s_requestor++;
- stat->s_ntargcpu += remotes + locals;
- stat->s_ntargremotes += remotes;
- stat->s_ntarglocals += locals;
- remotes = bau_uvhub_weight(&bau_desc->distribution);
- /* uvhub statistics */
- hubs = bau_uvhub_weight(&bau_desc->distribution);
- if (locals) {
- stat->s_ntarglocaluvhub++;
- stat->s_ntargremoteuvhub += (hubs - 1);
- } else
- stat->s_ntargremoteuvhub += hubs;
- stat->s_ntarguvhub += hubs;
- if (hubs >= 16)
- stat->s_ntarguvhub16++;
- else if (hubs >= 8)
- stat->s_ntarguvhub8++;
- else if (hubs >= 4)
- stat->s_ntarguvhub4++;
- else if (hubs >= 2)
- stat->s_ntarguvhub2++;
- else
- stat->s_ntarguvhub1++;
+ record_send_statistics(stat, locals, hubs, remotes, bau_desc);
bau_desc->payload.address = va;
bau_desc->payload.sending_cpu = cpu;
-
/*
* uv_flush_send_and_wait returns 0 if all cpu's were messaged,
* or 1 if it gave up and the original cpumask should be returned.
@@ -825,26 +999,31 @@ void uv_bau_message_interrupt(struct pt_regs *regs)
{
int count = 0;
cycles_t time_start;
- struct bau_payload_queue_entry *msg;
+ struct bau_pq_entry *msg;
struct bau_control *bcp;
struct ptc_stats *stat;
struct msg_desc msgdesc;
time_start = get_cycles();
+
bcp = &per_cpu(bau_control, smp_processor_id());
stat = bcp->statp;
- msgdesc.va_queue_first = bcp->va_queue_first;
- msgdesc.va_queue_last = bcp->va_queue_last;
+
+ msgdesc.queue_first = bcp->queue_first;
+ msgdesc.queue_last = bcp->queue_last;
+
msg = bcp->bau_msg_head;
- while (msg->sw_ack_vector) {
+ while (msg->swack_vec) {
count++;
- msgdesc.msg_slot = msg - msgdesc.va_queue_first;
- msgdesc.sw_ack_slot = ffs(msg->sw_ack_vector) - 1;
+
+ msgdesc.msg_slot = msg - msgdesc.queue_first;
+ msgdesc.swack_slot = ffs(msg->swack_vec) - 1;
msgdesc.msg = msg;
- uv_bau_process_message(&msgdesc, bcp);
+ bau_process_message(&msgdesc, bcp);
+
msg++;
- if (msg > msgdesc.va_queue_last)
- msg = msgdesc.va_queue_first;
+ if (msg > msgdesc.queue_last)
+ msg = msgdesc.queue_first;
bcp->bau_msg_head = msg;
}
stat->d_time += (get_cycles() - time_start);
@@ -852,18 +1031,17 @@ void uv_bau_message_interrupt(struct pt_regs *regs)
stat->d_nomsg++;
else if (count > 1)
stat->d_multmsg++;
+
ack_APIC_irq();
}
/*
- * uv_enable_timeouts
- *
- * Each target uvhub (i.e. a uvhub that has no cpu's) needs to have
+ * Each target uvhub (i.e. a uvhub that has cpu's) needs to have
* shootdown message timeouts enabled. The timeout does not cause
* an interrupt, but causes an error message to be returned to
* the sender.
*/
-static void __init uv_enable_timeouts(void)
+static void __init enable_timeouts(void)
{
int uvhub;
int nuvhubs;
@@ -877,47 +1055,44 @@ static void __init uv_enable_timeouts(void)
continue;
pnode = uv_blade_to_pnode(uvhub);
- mmr_image =
- uv_read_global_mmr64(pnode, UVH_LB_BAU_MISC_CONTROL);
+ mmr_image = read_mmr_misc_control(pnode);
/*
* Set the timeout period and then lock it in, in three
* steps; captures and locks in the period.
*
* To program the period, the SOFT_ACK_MODE must be off.
*/
- mmr_image &= ~((unsigned long)1 <<
- UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT);
- uv_write_global_mmr64
- (pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image);
+ mmr_image &= ~(1L << SOFTACK_MSHIFT);
+ write_mmr_misc_control(pnode, mmr_image);
/*
* Set the 4-bit period.
*/
- mmr_image &= ~((unsigned long)0xf <<
- UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT);
- mmr_image |= (UV_INTD_SOFT_ACK_TIMEOUT_PERIOD <<
- UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT);
- uv_write_global_mmr64
- (pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image);
+ mmr_image &= ~((unsigned long)0xf << SOFTACK_PSHIFT);
+ mmr_image |= (SOFTACK_TIMEOUT_PERIOD << SOFTACK_PSHIFT);
+ write_mmr_misc_control(pnode, mmr_image);
/*
+ * UV1:
* Subsequent reversals of the timebase bit (3) cause an
* immediate timeout of one or all INTD resources as
* indicated in bits 2:0 (7 causes all of them to timeout).
*/
- mmr_image |= ((unsigned long)1 <<
- UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT);
- uv_write_global_mmr64
- (pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image);
+ mmr_image |= (1L << SOFTACK_MSHIFT);
+ if (is_uv2_hub()) {
+ mmr_image |= (1L << UV2_LEG_SHFT);
+ mmr_image |= (1L << UV2_EXT_SHFT);
+ }
+ write_mmr_misc_control(pnode, mmr_image);
}
}
-static void *uv_ptc_seq_start(struct seq_file *file, loff_t *offset)
+static void *ptc_seq_start(struct seq_file *file, loff_t *offset)
{
if (*offset < num_possible_cpus())
return offset;
return NULL;
}
-static void *uv_ptc_seq_next(struct seq_file *file, void *data, loff_t *offset)
+static void *ptc_seq_next(struct seq_file *file, void *data, loff_t *offset)
{
(*offset)++;
if (*offset < num_possible_cpus())
@@ -925,12 +1100,11 @@ static void *uv_ptc_seq_next(struct seq_file *file, void *data, loff_t *offset)
return NULL;
}
-static void uv_ptc_seq_stop(struct seq_file *file, void *data)
+static void ptc_seq_stop(struct seq_file *file, void *data)
{
}
-static inline unsigned long long
-microsec_2_cycles(unsigned long microsec)
+static inline unsigned long long usec_2_cycles(unsigned long microsec)
{
unsigned long ns;
unsigned long long cyc;
@@ -941,29 +1115,27 @@ microsec_2_cycles(unsigned long microsec)
}
/*
- * Display the statistics thru /proc.
+ * Display the statistics thru /proc/sgi_uv/ptc_statistics
* 'data' points to the cpu number
+ * Note: see the descriptions in stat_description[].
*/
-static int uv_ptc_seq_show(struct seq_file *file, void *data)
+static int ptc_seq_show(struct seq_file *file, void *data)
{
struct ptc_stats *stat;
int cpu;
cpu = *(loff_t *)data;
-
if (!cpu) {
seq_printf(file,
"# cpu sent stime self locals remotes ncpus localhub ");
seq_printf(file,
"remotehub numuvhubs numuvhubs16 numuvhubs8 ");
seq_printf(file,
- "numuvhubs4 numuvhubs2 numuvhubs1 dto ");
- seq_printf(file,
- "retries rok resetp resett giveup sto bz throt ");
+ "numuvhubs4 numuvhubs2 numuvhubs1 dto retries rok ");
seq_printf(file,
- "sw_ack recv rtime all ");
+ "resetp resett giveup sto bz throt swack recv rtime ");
seq_printf(file,
- "one mult none retry canc nocan reset rcan ");
+ "all one mult none retry canc nocan reset rcan ");
seq_printf(file,
"disable enable\n");
}
@@ -990,8 +1162,7 @@ static int uv_ptc_seq_show(struct seq_file *file, void *data)
/* destination side statistics */
seq_printf(file,
"%lx %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ",
- uv_read_global_mmr64(uv_cpu_to_pnode(cpu),
- UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE),
+ read_gmmr_sw_ack(uv_cpu_to_pnode(cpu)),
stat->d_requestee, cycles_2_us(stat->d_time),
stat->d_alltlb, stat->d_onetlb, stat->d_multmsg,
stat->d_nomsg, stat->d_retries, stat->d_canceled,
@@ -1000,7 +1171,6 @@ static int uv_ptc_seq_show(struct seq_file *file, void *data)
seq_printf(file, "%ld %ld\n",
stat->s_bau_disabled, stat->s_bau_reenabled);
}
-
return 0;
}
@@ -1008,18 +1178,18 @@ static int uv_ptc_seq_show(struct seq_file *file, void *data)
* Display the tunables thru debugfs
*/
static ssize_t tunables_read(struct file *file, char __user *userbuf,
- size_t count, loff_t *ppos)
+ size_t count, loff_t *ppos)
{
char *buf;
int ret;
buf = kasprintf(GFP_KERNEL, "%s %s %s\n%d %d %d %d %d %d %d %d %d\n",
- "max_bau_concurrent plugged_delay plugsb4reset",
+ "max_concur plugged_delay plugsb4reset",
"timeoutsb4reset ipi_reset_limit complete_threshold",
"congested_response_us congested_reps congested_period",
- max_bau_concurrent, plugged_delay, plugsb4reset,
+ max_concurr, plugged_delay, plugsb4reset,
timeoutsb4reset, ipi_reset_limit, complete_threshold,
- congested_response_us, congested_reps, congested_period);
+ congested_respns_us, congested_reps, congested_period);
if (!buf)
return -ENOMEM;
@@ -1030,13 +1200,16 @@ static ssize_t tunables_read(struct file *file, char __user *userbuf,
}
/*
- * -1: resetf the statistics
+ * handle a write to /proc/sgi_uv/ptc_statistics
+ * -1: reset the statistics
* 0: display meaning of the statistics
*/
-static ssize_t uv_ptc_proc_write(struct file *file, const char __user *user,
- size_t count, loff_t *data)
+static ssize_t ptc_proc_write(struct file *file, const char __user *user,
+ size_t count, loff_t *data)
{
int cpu;
+ int i;
+ int elements;
long input_arg;
char optstr[64];
struct ptc_stats *stat;
@@ -1046,79 +1219,18 @@ static ssize_t uv_ptc_proc_write(struct file *file, const char __user *user,
if (copy_from_user(optstr, user, count))
return -EFAULT;
optstr[count - 1] = '\0';
+
if (strict_strtol(optstr, 10, &input_arg) < 0) {
printk(KERN_DEBUG "%s is invalid\n", optstr);
return -EINVAL;
}
if (input_arg == 0) {
+ elements = sizeof(stat_description)/sizeof(*stat_description);
printk(KERN_DEBUG "# cpu: cpu number\n");
printk(KERN_DEBUG "Sender statistics:\n");
- printk(KERN_DEBUG
- "sent: number of shootdown messages sent\n");
- printk(KERN_DEBUG
- "stime: time spent sending messages\n");
- printk(KERN_DEBUG
- "numuvhubs: number of hubs targeted with shootdown\n");
- printk(KERN_DEBUG
- "numuvhubs16: number times 16 or more hubs targeted\n");
- printk(KERN_DEBUG
- "numuvhubs8: number times 8 or more hubs targeted\n");
- printk(KERN_DEBUG
- "numuvhubs4: number times 4 or more hubs targeted\n");
- printk(KERN_DEBUG
- "numuvhubs2: number times 2 or more hubs targeted\n");
- printk(KERN_DEBUG
- "numuvhubs1: number times 1 hub targeted\n");
- printk(KERN_DEBUG
- "numcpus: number of cpus targeted with shootdown\n");
- printk(KERN_DEBUG
- "dto: number of destination timeouts\n");
- printk(KERN_DEBUG
- "retries: destination timeout retries sent\n");
- printk(KERN_DEBUG
- "rok: : destination timeouts successfully retried\n");
- printk(KERN_DEBUG
- "resetp: ipi-style resource resets for plugs\n");
- printk(KERN_DEBUG
- "resett: ipi-style resource resets for timeouts\n");
- printk(KERN_DEBUG
- "giveup: fall-backs to ipi-style shootdowns\n");
- printk(KERN_DEBUG
- "sto: number of source timeouts\n");
- printk(KERN_DEBUG
- "bz: number of stay-busy's\n");
- printk(KERN_DEBUG
- "throt: number times spun in throttle\n");
- printk(KERN_DEBUG "Destination side statistics:\n");
- printk(KERN_DEBUG
- "sw_ack: image of UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE\n");
- printk(KERN_DEBUG
- "recv: shootdown messages received\n");
- printk(KERN_DEBUG
- "rtime: time spent processing messages\n");
- printk(KERN_DEBUG
- "all: shootdown all-tlb messages\n");
- printk(KERN_DEBUG
- "one: shootdown one-tlb messages\n");
- printk(KERN_DEBUG
- "mult: interrupts that found multiple messages\n");
- printk(KERN_DEBUG
- "none: interrupts that found no messages\n");
- printk(KERN_DEBUG
- "retry: number of retry messages processed\n");
- printk(KERN_DEBUG
- "canc: number messages canceled by retries\n");
- printk(KERN_DEBUG
- "nocan: number retries that found nothing to cancel\n");
- printk(KERN_DEBUG
- "reset: number of ipi-style reset requests processed\n");
- printk(KERN_DEBUG
- "rcan: number messages canceled by reset requests\n");
- printk(KERN_DEBUG
- "disable: number times use of the BAU was disabled\n");
- printk(KERN_DEBUG
- "enable: number times use of the BAU was re-enabled\n");
+ for (i = 0; i < elements; i++)
+ printk(KERN_DEBUG "%s\n", stat_description[i]);
} else if (input_arg == -1) {
for_each_present_cpu(cpu) {
stat = &per_cpu(ptcstats, cpu);
@@ -1145,27 +1257,18 @@ static int local_atoi(const char *name)
}
/*
- * set the tunables
- * 0 values reset them to defaults
+ * Parse the values written to /sys/kernel/debug/sgi_uv/bau_tunables.
+ * Zero values reset them to defaults.
*/
-static ssize_t tunables_write(struct file *file, const char __user *user,
- size_t count, loff_t *data)
+static int parse_tunables_write(struct bau_control *bcp, char *instr,
+ int count)
{
- int cpu;
- int cnt = 0;
- int val;
char *p;
char *q;
- char instr[64];
- struct bau_control *bcp;
-
- if (count == 0 || count > sizeof(instr)-1)
- return -EINVAL;
- if (copy_from_user(instr, user, count))
- return -EFAULT;
+ int cnt = 0;
+ int val;
+ int e = sizeof(tunables) / sizeof(*tunables);
- instr[count] = '\0';
- /* count the fields */
p = instr + strspn(instr, WHITESPACE);
q = p;
for (; *p; p = q + strspn(q, WHITESPACE)) {
@@ -1174,8 +1277,8 @@ static ssize_t tunables_write(struct file *file, const char __user *user,
if (q == p)
break;
}
- if (cnt != 9) {
- printk(KERN_INFO "bau tunable error: should be 9 numbers\n");
+ if (cnt != e) {
+ printk(KERN_INFO "bau tunable error: should be %d values\n", e);
return -EINVAL;
}
@@ -1187,97 +1290,80 @@ static ssize_t tunables_write(struct file *file, const char __user *user,
switch (cnt) {
case 0:
if (val == 0) {
- max_bau_concurrent = MAX_BAU_CONCURRENT;
- max_bau_concurrent_constant =
- MAX_BAU_CONCURRENT;
+ max_concurr = MAX_BAU_CONCURRENT;
+ max_concurr_const = MAX_BAU_CONCURRENT;
continue;
}
- bcp = &per_cpu(bau_control, smp_processor_id());
if (val < 1 || val > bcp->cpus_in_uvhub) {
printk(KERN_DEBUG
"Error: BAU max concurrent %d is invalid\n",
val);
return -EINVAL;
}
- max_bau_concurrent = val;
- max_bau_concurrent_constant = val;
- continue;
- case 1:
- if (val == 0)
- plugged_delay = PLUGGED_DELAY;
- else
- plugged_delay = val;
- continue;
- case 2:
- if (val == 0)
- plugsb4reset = PLUGSB4RESET;
- else
- plugsb4reset = val;
- continue;
- case 3:
- if (val == 0)
- timeoutsb4reset = TIMEOUTSB4RESET;
- else
- timeoutsb4reset = val;
- continue;
- case 4:
- if (val == 0)
- ipi_reset_limit = IPI_RESET_LIMIT;
- else
- ipi_reset_limit = val;
- continue;
- case 5:
- if (val == 0)
- complete_threshold = COMPLETE_THRESHOLD;
- else
- complete_threshold = val;
- continue;
- case 6:
- if (val == 0)
- congested_response_us = CONGESTED_RESPONSE_US;
- else
- congested_response_us = val;
- continue;
- case 7:
- if (val == 0)
- congested_reps = CONGESTED_REPS;
- else
- congested_reps = val;
+ max_concurr = val;
+ max_concurr_const = val;
continue;
- case 8:
+ default:
if (val == 0)
- congested_period = CONGESTED_PERIOD;
+ *tunables[cnt].tunp = tunables[cnt].deflt;
else
- congested_period = val;
+ *tunables[cnt].tunp = val;
continue;
}
if (q == p)
break;
}
+ return 0;
+}
+
+/*
+ * Handle a write to debugfs. (/sys/kernel/debug/sgi_uv/bau_tunables)
+ */
+static ssize_t tunables_write(struct file *file, const char __user *user,
+ size_t count, loff_t *data)
+{
+ int cpu;
+ int ret;
+ char instr[100];
+ struct bau_control *bcp;
+
+ if (count == 0 || count > sizeof(instr)-1)
+ return -EINVAL;
+ if (copy_from_user(instr, user, count))
+ return -EFAULT;
+
+ instr[count] = '\0';
+
+ bcp = &per_cpu(bau_control, smp_processor_id());
+
+ ret = parse_tunables_write(bcp, instr, count);
+ if (ret)
+ return ret;
+
for_each_present_cpu(cpu) {
bcp = &per_cpu(bau_control, cpu);
- bcp->max_bau_concurrent = max_bau_concurrent;
- bcp->max_bau_concurrent_constant = max_bau_concurrent;
- bcp->plugged_delay = plugged_delay;
- bcp->plugsb4reset = plugsb4reset;
- bcp->timeoutsb4reset = timeoutsb4reset;
- bcp->ipi_reset_limit = ipi_reset_limit;
- bcp->complete_threshold = complete_threshold;
- bcp->congested_response_us = congested_response_us;
- bcp->congested_reps = congested_reps;
- bcp->congested_period = congested_period;
+ bcp->max_concurr = max_concurr;
+ bcp->max_concurr_const = max_concurr;
+ bcp->plugged_delay = plugged_delay;
+ bcp->plugsb4reset = plugsb4reset;
+ bcp->timeoutsb4reset = timeoutsb4reset;
+ bcp->ipi_reset_limit = ipi_reset_limit;
+ bcp->complete_threshold = complete_threshold;
+ bcp->cong_response_us = congested_respns_us;
+ bcp->cong_reps = congested_reps;
+ bcp->cong_period = congested_period;
}
return count;
}
static const struct seq_operations uv_ptc_seq_ops = {
- .start = uv_ptc_seq_start,
- .next = uv_ptc_seq_next,
- .stop = uv_ptc_seq_stop,
- .show = uv_ptc_seq_show
+ .start = ptc_seq_start,
+ .next = ptc_seq_next,
+ .stop = ptc_seq_stop,
+ .show = ptc_seq_show
};
-static int uv_ptc_proc_open(struct inode *inode, struct file *file)
+static int ptc_proc_open(struct inode *inode, struct file *file)
{
return seq_open(file, &uv_ptc_seq_ops);
}
@@ -1288,9 +1374,9 @@ static int tunables_open(struct inode *inode, struct file *file)
}
static const struct file_operations proc_uv_ptc_operations = {
- .open = uv_ptc_proc_open,
+ .open = ptc_proc_open,
.read = seq_read,
- .write = uv_ptc_proc_write,
+ .write = ptc_proc_write,
.llseek = seq_lseek,
.release = seq_release,
};
@@ -1324,7 +1410,7 @@ static int __init uv_ptc_init(void)
return -EINVAL;
}
tunables_file = debugfs_create_file(UV_BAU_TUNABLES_FILE, 0600,
- tunables_dir, NULL, &tunables_fops);
+ tunables_dir, NULL, &tunables_fops);
if (!tunables_file) {
printk(KERN_ERR "unable to create debugfs file %s\n",
UV_BAU_TUNABLES_FILE);
@@ -1336,24 +1422,24 @@ static int __init uv_ptc_init(void)
/*
* Initialize the sending side's sending buffers.
*/
-static void
-uv_activation_descriptor_init(int node, int pnode, int base_pnode)
+static void activation_descriptor_init(int node, int pnode, int base_pnode)
{
int i;
int cpu;
unsigned long pa;
unsigned long m;
unsigned long n;
+ size_t dsize;
struct bau_desc *bau_desc;
struct bau_desc *bd2;
struct bau_control *bcp;
/*
- * each bau_desc is 64 bytes; there are 8 (UV_ITEMS_PER_DESCRIPTOR)
- * per cpu; and one per cpu on the uvhub (UV_ADP_SIZE)
+ * each bau_desc is 64 bytes; there are 8 (ITEMS_PER_DESC)
+ * per cpu; and one per cpu on the uvhub (ADP_SZ)
*/
- bau_desc = kmalloc_node(sizeof(struct bau_desc) * UV_ADP_SIZE
- * UV_ITEMS_PER_DESCRIPTOR, GFP_KERNEL, node);
+ dsize = sizeof(struct bau_desc) * ADP_SZ * ITEMS_PER_DESC;
+ bau_desc = kmalloc_node(dsize, GFP_KERNEL, node);
BUG_ON(!bau_desc);
pa = uv_gpa(bau_desc); /* need the real nasid*/
@@ -1361,27 +1447,25 @@ uv_activation_descriptor_init(int node, int pnode, int base_pnode)
m = pa & uv_mmask;
/* the 14-bit pnode */
- uv_write_global_mmr64(pnode, UVH_LB_BAU_SB_DESCRIPTOR_BASE,
- (n << UV_DESC_BASE_PNODE_SHIFT | m));
+ write_mmr_descriptor_base(pnode, (n << UV_DESC_PSHIFT | m));
/*
- * Initializing all 8 (UV_ITEMS_PER_DESCRIPTOR) descriptors for each
+ * Initializing all 8 (ITEMS_PER_DESC) descriptors for each
* cpu even though we only use the first one; one descriptor can
* describe a broadcast to 256 uv hubs.
*/
- for (i = 0, bd2 = bau_desc; i < (UV_ADP_SIZE*UV_ITEMS_PER_DESCRIPTOR);
- i++, bd2++) {
+ for (i = 0, bd2 = bau_desc; i < (ADP_SZ * ITEMS_PER_DESC); i++, bd2++) {
memset(bd2, 0, sizeof(struct bau_desc));
- bd2->header.sw_ack_flag = 1;
+ bd2->header.swack_flag = 1;
/*
* The base_dest_nasid set in the message header is the nasid
* of the first uvhub in the partition. The bit map will
* indicate destination pnode numbers relative to that base.
* They may not be consecutive if nasid striding is being used.
*/
- bd2->header.base_dest_nasid = UV_PNODE_TO_NASID(base_pnode);
- bd2->header.dest_subnodeid = UV_LB_SUBNODEID;
- bd2->header.command = UV_NET_ENDPOINT_INTD;
- bd2->header.int_both = 1;
+ bd2->header.base_dest_nasid = UV_PNODE_TO_NASID(base_pnode);
+ bd2->header.dest_subnodeid = UV_LB_SUBNODEID;
+ bd2->header.command = UV_NET_ENDPOINT_INTD;
+ bd2->header.int_both = 1;
/*
* all others need to be set to zero:
* fairness chaining multilevel count replied_to
@@ -1401,57 +1485,55 @@ uv_activation_descriptor_init(int node, int pnode, int base_pnode)
* - node is first node (kernel memory notion) on the uvhub
* - pnode is the uvhub's physical identifier
*/
-static void
-uv_payload_queue_init(int node, int pnode)
+static void pq_init(int node, int pnode)
{
- int pn;
int cpu;
+ size_t plsize;
char *cp;
- unsigned long pa;
- struct bau_payload_queue_entry *pqp;
- struct bau_payload_queue_entry *pqp_malloc;
+ void *vp;
+ unsigned long pn;
+ unsigned long first;
+ unsigned long pn_first;
+ unsigned long last;
+ struct bau_pq_entry *pqp;
struct bau_control *bcp;
- pqp = kmalloc_node((DEST_Q_SIZE + 1)
- * sizeof(struct bau_payload_queue_entry),
- GFP_KERNEL, node);
+ plsize = (DEST_Q_SIZE + 1) * sizeof(struct bau_pq_entry);
+ vp = kmalloc_node(plsize, GFP_KERNEL, node);
+ pqp = (struct bau_pq_entry *)vp;
BUG_ON(!pqp);
- pqp_malloc = pqp;
cp = (char *)pqp + 31;
- pqp = (struct bau_payload_queue_entry *)(((unsigned long)cp >> 5) << 5);
+ pqp = (struct bau_pq_entry *)(((unsigned long)cp >> 5) << 5);
for_each_present_cpu(cpu) {
if (pnode != uv_cpu_to_pnode(cpu))
continue;
/* for every cpu on this pnode: */
bcp = &per_cpu(bau_control, cpu);
- bcp->va_queue_first = pqp;
- bcp->bau_msg_head = pqp;
- bcp->va_queue_last = pqp + (DEST_Q_SIZE - 1);
+ bcp->queue_first = pqp;
+ bcp->bau_msg_head = pqp;
+ bcp->queue_last = pqp + (DEST_Q_SIZE - 1);
}
/*
* need the pnode of where the memory was really allocated
*/
- pa = uv_gpa(pqp);
- pn = pa >> uv_nshift;
- uv_write_global_mmr64(pnode,
- UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST,
- ((unsigned long)pn << UV_PAYLOADQ_PNODE_SHIFT) |
- uv_physnodeaddr(pqp));
- uv_write_global_mmr64(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL,
- uv_physnodeaddr(pqp));
- uv_write_global_mmr64(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST,
- (unsigned long)
- uv_physnodeaddr(pqp + (DEST_Q_SIZE - 1)));
+ pn = uv_gpa(pqp) >> uv_nshift;
+ first = uv_physnodeaddr(pqp);
+ pn_first = ((unsigned long)pn << UV_PAYLOADQ_PNODE_SHIFT) | first;
+ last = uv_physnodeaddr(pqp + (DEST_Q_SIZE - 1));
+ write_mmr_payload_first(pnode, pn_first);
+ write_mmr_payload_tail(pnode, first);
+ write_mmr_payload_last(pnode, last);
+
/* in effect, all msg_type's are set to MSG_NOOP */
- memset(pqp, 0, sizeof(struct bau_payload_queue_entry) * DEST_Q_SIZE);
+ memset(pqp, 0, sizeof(struct bau_pq_entry) * DEST_Q_SIZE);
}
/*
* Initialization of each UV hub's structures
*/
-static void __init uv_init_uvhub(int uvhub, int vector, int base_pnode)
+static void __init init_uvhub(int uvhub, int vector, int base_pnode)
{
int node;
int pnode;
@@ -1459,24 +1541,24 @@ static void __init uv_init_uvhub(int uvhub, int vector, int base_pnode)
node = uvhub_to_first_node(uvhub);
pnode = uv_blade_to_pnode(uvhub);
- uv_activation_descriptor_init(node, pnode, base_pnode);
- uv_payload_queue_init(node, pnode);
+
+ activation_descriptor_init(node, pnode, base_pnode);
+
+ pq_init(node, pnode);
/*
* The below initialization can't be in firmware because the
* messaging IRQ will be determined by the OS.
*/
apicid = uvhub_to_first_apicid(uvhub) | uv_apicid_hibits;
- uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG,
- ((apicid << 32) | vector));
+ write_mmr_data_config(pnode, ((apicid << 32) | vector));
}
/*
* We will set BAU_MISC_CONTROL with a timeout period.
* But the BIOS has set UVH_AGING_PRESCALE_SEL and UVH_TRANSACTION_TIMEOUT.
- * So the destination timeout period has be be calculated from them.
+ * So the destination timeout period has to be calculated from them.
*/
-static int
-calculate_destination_timeout(void)
+static int calculate_destination_timeout(void)
{
unsigned long mmr_image;
int mult1;
@@ -1486,73 +1568,92 @@ calculate_destination_timeout(void)
int ret;
unsigned long ts_ns;
- mult1 = UV_INTD_SOFT_ACK_TIMEOUT_PERIOD & BAU_MISC_CONTROL_MULT_MASK;
- mmr_image = uv_read_local_mmr(UVH_AGING_PRESCALE_SEL);
- index = (mmr_image >> BAU_URGENCY_7_SHIFT) & BAU_URGENCY_7_MASK;
- mmr_image = uv_read_local_mmr(UVH_TRANSACTION_TIMEOUT);
- mult2 = (mmr_image >> BAU_TRANS_SHIFT) & BAU_TRANS_MASK;
- base = timeout_base_ns[index];
- ts_ns = base * mult1 * mult2;
- ret = ts_ns / 1000;
+ if (is_uv1_hub()) {
+ mult1 = SOFTACK_TIMEOUT_PERIOD & BAU_MISC_CONTROL_MULT_MASK;
+ mmr_image = uv_read_local_mmr(UVH_AGING_PRESCALE_SEL);
+ index = (mmr_image >> BAU_URGENCY_7_SHIFT) & BAU_URGENCY_7_MASK;
+ mmr_image = uv_read_local_mmr(UVH_TRANSACTION_TIMEOUT);
+ mult2 = (mmr_image >> BAU_TRANS_SHIFT) & BAU_TRANS_MASK;
+ base = timeout_base_ns[index];
+ ts_ns = base * mult1 * mult2;
+ ret = ts_ns / 1000;
+ } else {
+ /* 4 bits 0/1 for 10/80us, 3 bits of multiplier */
+ mmr_image = uv_read_local_mmr(UVH_AGING_PRESCALE_SEL);
+ mmr_image = (mmr_image & UV_SA_MASK) >> UV_SA_SHFT;
+ if (mmr_image & (1L << UV2_ACK_UNITS_SHFT))
+ mult1 = 80;
+ else
+ mult1 = 10;
+ base = mmr_image & UV2_ACK_MASK;
+ ret = mult1 * base;
+ }
return ret;
}
+static void __init init_per_cpu_tunables(void)
+{
+ int cpu;
+ struct bau_control *bcp;
+
+ for_each_present_cpu(cpu) {
+ bcp = &per_cpu(bau_control, cpu);
+ bcp->baudisabled = 0;
+ bcp->statp = &per_cpu(ptcstats, cpu);
+ /* time interval to catch a hardware stay-busy bug */
+ bcp->timeout_interval = usec_2_cycles(2*timeout_us);
+ bcp->max_concurr = max_concurr;
+ bcp->max_concurr_const = max_concurr;
+ bcp->plugged_delay = plugged_delay;
+ bcp->plugsb4reset = plugsb4reset;
+ bcp->timeoutsb4reset = timeoutsb4reset;
+ bcp->ipi_reset_limit = ipi_reset_limit;
+ bcp->complete_threshold = complete_threshold;
+ bcp->cong_response_us = congested_respns_us;
+ bcp->cong_reps = congested_reps;
+ bcp->cong_period = congested_period;
+ }
+}
+
/*
- * initialize the bau_control structure for each cpu
+ * Scan all cpus to collect blade and socket summaries.
*/
-static int __init uv_init_per_cpu(int nuvhubs, int base_part_pnode)
+static int __init get_cpu_topology(int base_pnode,
+ struct uvhub_desc *uvhub_descs,
+ unsigned char *uvhub_mask)
{
- int i;
int cpu;
- int tcpu;
int pnode;
int uvhub;
- int have_hmaster;
- short socket = 0;
- unsigned short socket_mask;
- unsigned char *uvhub_mask;
+ int socket;
struct bau_control *bcp;
struct uvhub_desc *bdp;
struct socket_desc *sdp;
- struct bau_control *hmaster = NULL;
- struct bau_control *smaster = NULL;
- struct socket_desc {
- short num_cpus;
- short cpu_number[MAX_CPUS_PER_SOCKET];
- };
- struct uvhub_desc {
- unsigned short socket_mask;
- short num_cpus;
- short uvhub;
- short pnode;
- struct socket_desc socket[2];
- };
- struct uvhub_desc *uvhub_descs;
-
- timeout_us = calculate_destination_timeout();
- uvhub_descs = kmalloc(nuvhubs * sizeof(struct uvhub_desc), GFP_KERNEL);
- memset(uvhub_descs, 0, nuvhubs * sizeof(struct uvhub_desc));
- uvhub_mask = kzalloc((nuvhubs+7)/8, GFP_KERNEL);
for_each_present_cpu(cpu) {
bcp = &per_cpu(bau_control, cpu);
+
memset(bcp, 0, sizeof(struct bau_control));
+
pnode = uv_cpu_hub_info(cpu)->pnode;
- if ((pnode - base_part_pnode) >= UV_DISTRIBUTION_SIZE) {
+ if ((pnode - base_pnode) >= UV_DISTRIBUTION_SIZE) {
printk(KERN_EMERG
"cpu %d pnode %d-%d beyond %d; BAU disabled\n",
- cpu, pnode, base_part_pnode,
- UV_DISTRIBUTION_SIZE);
+ cpu, pnode, base_pnode, UV_DISTRIBUTION_SIZE);
return 1;
}
+
bcp->osnode = cpu_to_node(cpu);
- bcp->partition_base_pnode = uv_partition_base_pnode;
+ bcp->partition_base_pnode = base_pnode;
+
uvhub = uv_cpu_hub_info(cpu)->numa_blade_id;
*(uvhub_mask + (uvhub/8)) |= (1 << (uvhub%8));
bdp = &uvhub_descs[uvhub];
+
bdp->num_cpus++;
bdp->uvhub = uvhub;
bdp->pnode = pnode;
+
/* kludge: 'assuming' one node per socket, and assuming that
disabling a socket just leaves a gap in node numbers */
socket = bcp->osnode & 1;
@@ -1561,84 +1662,129 @@ static int __init uv_init_per_cpu(int nuvhubs, int base_part_pnode)
sdp->cpu_number[sdp->num_cpus] = cpu;
sdp->num_cpus++;
if (sdp->num_cpus > MAX_CPUS_PER_SOCKET) {
- printk(KERN_EMERG "%d cpus per socket invalid\n", sdp->num_cpus);
+ printk(KERN_EMERG "%d cpus per socket invalid\n",
+ sdp->num_cpus);
return 1;
}
}
+ return 0;
+}
+
+/*
+ * Each socket is to get a local array of pnodes/hubs.
+ */
+static void make_per_cpu_thp(struct bau_control *smaster)
+{
+ int cpu;
+ size_t hpsz = sizeof(struct hub_and_pnode) * num_possible_cpus();
+
+ smaster->thp = kmalloc_node(hpsz, GFP_KERNEL, smaster->osnode);
+ memset(smaster->thp, 0, hpsz);
+ for_each_present_cpu(cpu) {
+ smaster->thp[cpu].pnode = uv_cpu_hub_info(cpu)->pnode;
+ smaster->thp[cpu].uvhub = uv_cpu_hub_info(cpu)->numa_blade_id;
+ }
+}
+
+/*
+ * Initialize all the per_cpu information for the cpu's on a given socket,
+ * given what has been gathered into the socket_desc struct.
+ * And reports the chosen hub and socket masters back to the caller.
+ */
+static int scan_sock(struct socket_desc *sdp, struct uvhub_desc *bdp,
+ struct bau_control **smasterp,
+ struct bau_control **hmasterp)
+{
+ int i;
+ int cpu;
+ struct bau_control *bcp;
+
+ for (i = 0; i < sdp->num_cpus; i++) {
+ cpu = sdp->cpu_number[i];
+ bcp = &per_cpu(bau_control, cpu);
+ bcp->cpu = cpu;
+ if (i == 0) {
+ *smasterp = bcp;
+ if (!(*hmasterp))
+ *hmasterp = bcp;
+ }
+ bcp->cpus_in_uvhub = bdp->num_cpus;
+ bcp->cpus_in_socket = sdp->num_cpus;
+ bcp->socket_master = *smasterp;
+ bcp->uvhub = bdp->uvhub;
+ bcp->uvhub_master = *hmasterp;
+ bcp->uvhub_cpu = uv_cpu_hub_info(cpu)->blade_processor_id;
+ if (bcp->uvhub_cpu >= MAX_CPUS_PER_UVHUB) {
+ printk(KERN_EMERG "%d cpus per uvhub invalid\n",
+ bcp->uvhub_cpu);
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/*
+ * Summarize the blade and socket topology into the per_cpu structures.
+ */
+static int __init summarize_uvhub_sockets(int nuvhubs,
+ struct uvhub_desc *uvhub_descs,
+ unsigned char *uvhub_mask)
+{
+ int socket;
+ int uvhub;
+ unsigned short socket_mask;
+
for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
+ struct uvhub_desc *bdp;
+ struct bau_control *smaster = NULL;
+ struct bau_control *hmaster = NULL;
+
if (!(*(uvhub_mask + (uvhub/8)) & (1 << (uvhub%8))))
continue;
- have_hmaster = 0;
+
bdp = &uvhub_descs[uvhub];
socket_mask = bdp->socket_mask;
socket = 0;
while (socket_mask) {
- if (!(socket_mask & 1))
- goto nextsocket;
- sdp = &bdp->socket[socket];
- for (i = 0; i < sdp->num_cpus; i++) {
- cpu = sdp->cpu_number[i];
- bcp = &per_cpu(bau_control, cpu);
- bcp->cpu = cpu;
- if (i == 0) {
- smaster = bcp;
- if (!have_hmaster) {
- have_hmaster++;
- hmaster = bcp;
- }
- }
- bcp->cpus_in_uvhub = bdp->num_cpus;
- bcp->cpus_in_socket = sdp->num_cpus;
- bcp->socket_master = smaster;
- bcp->uvhub = bdp->uvhub;
- bcp->uvhub_master = hmaster;
- bcp->uvhub_cpu = uv_cpu_hub_info(cpu)->
- blade_processor_id;
- if (bcp->uvhub_cpu >= MAX_CPUS_PER_UVHUB) {
- printk(KERN_EMERG
- "%d cpus per uvhub invalid\n",
- bcp->uvhub_cpu);
+ struct socket_desc *sdp;
+ if ((socket_mask & 1)) {
+ sdp = &bdp->socket[socket];
+ if (scan_sock(sdp, bdp, &smaster, &hmaster))
return 1;
- }
}
-nextsocket:
socket++;
socket_mask = (socket_mask >> 1);
- /* each socket gets a local array of pnodes/hubs */
- bcp = smaster;
- bcp->target_hub_and_pnode = kmalloc_node(
- sizeof(struct hub_and_pnode) *
- num_possible_cpus(), GFP_KERNEL, bcp->osnode);
- memset(bcp->target_hub_and_pnode, 0,
- sizeof(struct hub_and_pnode) *
- num_possible_cpus());
- for_each_present_cpu(tcpu) {
- bcp->target_hub_and_pnode[tcpu].pnode =
- uv_cpu_hub_info(tcpu)->pnode;
- bcp->target_hub_and_pnode[tcpu].uvhub =
- uv_cpu_hub_info(tcpu)->numa_blade_id;
- }
+ make_per_cpu_thp(smaster);
}
}
+ return 0;
+}
+
+/*
+ * initialize the bau_control structure for each cpu
+ */
+static int __init init_per_cpu(int nuvhubs, int base_part_pnode)
+{
+ unsigned char *uvhub_mask;
+ void *vp;
+ struct uvhub_desc *uvhub_descs;
+
+ timeout_us = calculate_destination_timeout();
+
+ vp = kmalloc(nuvhubs * sizeof(struct uvhub_desc), GFP_KERNEL);
+ uvhub_descs = (struct uvhub_desc *)vp;
+ memset(uvhub_descs, 0, nuvhubs * sizeof(struct uvhub_desc));
+ uvhub_mask = kzalloc((nuvhubs+7)/8, GFP_KERNEL);
+
+ if (get_cpu_topology(base_part_pnode, uvhub_descs, uvhub_mask))
+ return 1;
+
+ if (summarize_uvhub_sockets(nuvhubs, uvhub_descs, uvhub_mask))
+ return 1;
+
kfree(uvhub_descs);
kfree(uvhub_mask);
- for_each_present_cpu(cpu) {
- bcp = &per_cpu(bau_control, cpu);
- bcp->baudisabled = 0;
- bcp->statp = &per_cpu(ptcstats, cpu);
- /* time interval to catch a hardware stay-busy bug */
- bcp->timeout_interval = microsec_2_cycles(2*timeout_us);
- bcp->max_bau_concurrent = max_bau_concurrent;
- bcp->max_bau_concurrent_constant = max_bau_concurrent;
- bcp->plugged_delay = plugged_delay;
- bcp->plugsb4reset = plugsb4reset;
- bcp->timeoutsb4reset = timeoutsb4reset;
- bcp->ipi_reset_limit = ipi_reset_limit;
- bcp->complete_threshold = complete_threshold;
- bcp->congested_response_us = congested_response_us;
- bcp->congested_reps = congested_reps;
- bcp->congested_period = congested_period;
- }
+ init_per_cpu_tunables();
return 0;
}
@@ -1651,8 +1797,9 @@ static int __init uv_bau_init(void)
int pnode;
int nuvhubs;
int cur_cpu;
+ int cpus;
int vector;
- unsigned long mmr;
+ cpumask_var_t *mask;
if (!is_uv_system())
return 0;
@@ -1660,24 +1807,25 @@ static int __init uv_bau_init(void)
if (nobau)
return 0;
- for_each_possible_cpu(cur_cpu)
- zalloc_cpumask_var_node(&per_cpu(uv_flush_tlb_mask, cur_cpu),
- GFP_KERNEL, cpu_to_node(cur_cpu));
+ for_each_possible_cpu(cur_cpu) {
+ mask = &per_cpu(uv_flush_tlb_mask, cur_cpu);
+ zalloc_cpumask_var_node(mask, GFP_KERNEL, cpu_to_node(cur_cpu));
+ }
uv_nshift = uv_hub_info->m_val;
uv_mmask = (1UL << uv_hub_info->m_val) - 1;
nuvhubs = uv_num_possible_blades();
spin_lock_init(&disable_lock);
- congested_cycles = microsec_2_cycles(congested_response_us);
+ congested_cycles = usec_2_cycles(congested_respns_us);
- uv_partition_base_pnode = 0x7fffffff;
+ uv_base_pnode = 0x7fffffff;
for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
- if (uv_blade_nr_possible_cpus(uvhub) &&
- (uv_blade_to_pnode(uvhub) < uv_partition_base_pnode))
- uv_partition_base_pnode = uv_blade_to_pnode(uvhub);
+ cpus = uv_blade_nr_possible_cpus(uvhub);
+ if (cpus && (uv_blade_to_pnode(uvhub) < uv_base_pnode))
+ uv_base_pnode = uv_blade_to_pnode(uvhub);
}
- if (uv_init_per_cpu(nuvhubs, uv_partition_base_pnode)) {
+ if (init_per_cpu(nuvhubs, uv_base_pnode)) {
nobau = 1;
return 0;
}
@@ -1685,21 +1833,21 @@ static int __init uv_bau_init(void)
vector = UV_BAU_MESSAGE;
for_each_possible_blade(uvhub)
if (uv_blade_nr_possible_cpus(uvhub))
- uv_init_uvhub(uvhub, vector, uv_partition_base_pnode);
+ init_uvhub(uvhub, vector, uv_base_pnode);
- uv_enable_timeouts();
+ enable_timeouts();
alloc_intr_gate(vector, uv_bau_message_intr1);
for_each_possible_blade(uvhub) {
if (uv_blade_nr_possible_cpus(uvhub)) {
+ unsigned long val;
+ unsigned long mmr;
pnode = uv_blade_to_pnode(uvhub);
/* INIT the bau */
- uv_write_global_mmr64(pnode,
- UVH_LB_BAU_SB_ACTIVATION_CONTROL,
- ((unsigned long)1 << 63));
+ val = 1L << 63;
+ write_gmmr_activation(pnode, val);
mmr = 1; /* should be 1 to broadcast to both sockets */
- uv_write_global_mmr64(pnode, UVH_BAU_DATA_BROADCAST,
- mmr);
+ write_mmr_data_broadcast(pnode, mmr);
}
}
diff --git a/arch/x86/platform/uv/uv_time.c b/arch/x86/platform/uv/uv_time.c
index 0eb90184515f..9f29a01ee1b3 100644
--- a/arch/x86/platform/uv/uv_time.c
+++ b/arch/x86/platform/uv/uv_time.c
@@ -99,8 +99,12 @@ static void uv_rtc_send_IPI(int cpu)
/* Check for an RTC interrupt pending */
static int uv_intr_pending(int pnode)
{
- return uv_read_global_mmr64(pnode, UVH_EVENT_OCCURRED0) &
- UVH_EVENT_OCCURRED0_RTC1_MASK;
+ if (is_uv1_hub())
+ return uv_read_global_mmr64(pnode, UVH_EVENT_OCCURRED0) &
+ UV1H_EVENT_OCCURRED0_RTC1_MASK;
+ else
+ return uv_read_global_mmr64(pnode, UV2H_EVENT_OCCURRED2) &
+ UV2H_EVENT_OCCURRED2_RTC_1_MASK;
}
/* Setup interrupt and return non-zero if early expiration occurred. */
@@ -114,8 +118,12 @@ static int uv_setup_intr(int cpu, u64 expires)
UVH_RTC1_INT_CONFIG_M_MASK);
uv_write_global_mmr64(pnode, UVH_INT_CMPB, -1L);
- uv_write_global_mmr64(pnode, UVH_EVENT_OCCURRED0_ALIAS,
- UVH_EVENT_OCCURRED0_RTC1_MASK);
+ if (is_uv1_hub())
+ uv_write_global_mmr64(pnode, UVH_EVENT_OCCURRED0_ALIAS,
+ UV1H_EVENT_OCCURRED0_RTC1_MASK);
+ else
+ uv_write_global_mmr64(pnode, UV2H_EVENT_OCCURRED2_ALIAS,
+ UV2H_EVENT_OCCURRED2_RTC_1_MASK);
val = (X86_PLATFORM_IPI_VECTOR << UVH_RTC1_INT_CONFIG_VECTOR_SHFT) |
((u64)apicid << UVH_RTC1_INT_CONFIG_APIC_ID_SHFT);
diff --git a/arch/xtensa/include/asm/unistd.h b/arch/xtensa/include/asm/unistd.h
index 528042c2951e..a6f934f37f1a 100644
--- a/arch/xtensa/include/asm/unistd.h
+++ b/arch/xtensa/include/asm/unistd.h
@@ -683,8 +683,10 @@ __SYSCALL(305, sys_ni_syscall, 0)
__SYSCALL(306, sys_eventfd, 1)
#define __NR_recvmmsg 307
__SYSCALL(307, sys_recvmmsg, 5)
+#define __NR_setns 308
+__SYSCALL(308, sys_setns, 2)
-#define __NR_syscall_count 308
+#define __NR_syscall_count 309
/*
* sysxtensa syscall handler
diff --git a/block/blk-core.c b/block/blk-core.c
index c8303e9d919d..d2f8f4049abd 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -345,6 +345,7 @@ void blk_put_queue(struct request_queue *q)
{
kobject_put(&q->kobj);
}
+EXPORT_SYMBOL(blk_put_queue);
/*
* Note: If a driver supplied the queue lock, it should not zap that lock
@@ -566,6 +567,7 @@ int blk_get_queue(struct request_queue *q)
return 1;
}
+EXPORT_SYMBOL(blk_get_queue);
static inline void blk_free_request(struct request_queue *q, struct request *rq)
{
@@ -1130,7 +1132,6 @@ static bool bio_attempt_front_merge(struct request_queue *q,
struct request *req, struct bio *bio)
{
const int ff = bio->bi_rw & REQ_FAILFAST_MASK;
- sector_t sector;
if (!ll_front_merge_fn(q, req, bio))
return false;
@@ -1140,8 +1141,6 @@ static bool bio_attempt_front_merge(struct request_queue *q,
if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
blk_rq_set_mixed_merge(req);
- sector = bio->bi_sector;
-
bio->bi_next = req->bio;
req->bio = bio;
diff --git a/block/genhd.c b/block/genhd.c
index 2dd988723d73..95822ae25cfe 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -1728,7 +1728,7 @@ static void disk_add_events(struct gendisk *disk)
{
struct disk_events *ev;
- if (!disk->fops->check_events || !(disk->events | disk->async_events))
+ if (!disk->fops->check_events)
return;
ev = kzalloc(sizeof(*ev), GFP_KERNEL);
diff --git a/drivers/Makefile b/drivers/Makefile
index 6b17f5864340..09f3232bcdcd 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -17,6 +17,9 @@ obj-$(CONFIG_SFI) += sfi/
# was used and do nothing if so
obj-$(CONFIG_PNP) += pnp/
obj-$(CONFIG_ARM_AMBA) += amba/
+# Many drivers will want to use DMA so this has to be made available
+# really early.
+obj-$(CONFIG_DMA_ENGINE) += dma/
obj-$(CONFIG_VIRTIO) += virtio/
obj-$(CONFIG_XEN) += xen/
@@ -92,7 +95,6 @@ obj-$(CONFIG_EISA) += eisa/
obj-y += lguest/
obj-$(CONFIG_CPU_FREQ) += cpufreq/
obj-$(CONFIG_CPU_IDLE) += cpuidle/
-obj-$(CONFIG_DMA_ENGINE) += dma/
obj-$(CONFIG_MMC) += mmc/
obj-$(CONFIG_MEMSTICK) += memstick/
obj-y += leds/
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index 7025593a58c8..d74926e0939e 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -603,6 +603,10 @@ int amba_device_register(struct amba_device *dev, struct resource *parent)
if (ret)
goto err_out;
+ /* Hard-coded primecell ID instead of plug-n-play */
+ if (dev->periphid != 0)
+ goto skip_probe;
+
/*
* Dynamically calculate the size of the resource
* and use this for iomap
@@ -643,6 +647,7 @@ int amba_device_register(struct amba_device *dev, struct resource *parent)
if (ret)
goto err_release;
+ skip_probe:
ret = device_add(&dev->dev);
if (ret)
goto err_release;
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index b7f51e4594f8..dba1c32e1ddf 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -35,10 +35,6 @@
*/
struct brd_device {
int brd_number;
- int brd_refcnt;
- loff_t brd_offset;
- loff_t brd_sizelimit;
- unsigned brd_blocksize;
struct request_queue *brd_queue;
struct gendisk *brd_disk;
@@ -440,11 +436,11 @@ static int rd_nr;
int rd_size = CONFIG_BLK_DEV_RAM_SIZE;
static int max_part;
static int part_shift;
-module_param(rd_nr, int, 0);
+module_param(rd_nr, int, S_IRUGO);
MODULE_PARM_DESC(rd_nr, "Maximum number of brd devices");
-module_param(rd_size, int, 0);
+module_param(rd_size, int, S_IRUGO);
MODULE_PARM_DESC(rd_size, "Size of each RAM disk in kbytes.");
-module_param(max_part, int, 0);
+module_param(max_part, int, S_IRUGO);
MODULE_PARM_DESC(max_part, "Maximum number of partitions per RAM disk");
MODULE_LICENSE("GPL");
MODULE_ALIAS_BLOCKDEV_MAJOR(RAMDISK_MAJOR);
@@ -552,7 +548,7 @@ static struct kobject *brd_probe(dev_t dev, int *part, void *data)
struct kobject *kobj;
mutex_lock(&brd_devices_mutex);
- brd = brd_init_one(dev & MINORMASK);
+ brd = brd_init_one(MINOR(dev) >> part_shift);
kobj = brd ? get_disk(brd->brd_disk) : ERR_PTR(-ENOMEM);
mutex_unlock(&brd_devices_mutex);
@@ -575,25 +571,39 @@ static int __init brd_init(void)
*
* (1) if rd_nr is specified, create that many upfront, and this
* also becomes a hard limit.
- * (2) if rd_nr is not specified, create 1 rd device on module
- * load, user can further extend brd device by create dev node
- * themselves and have kernel automatically instantiate actual
- * device on-demand.
+ * (2) if rd_nr is not specified, create CONFIG_BLK_DEV_RAM_COUNT
+ * (default 16) rd device on module load, user can further
+ * extend brd device by create dev node themselves and have
+ * kernel automatically instantiate actual device on-demand.
*/
part_shift = 0;
- if (max_part > 0)
+ if (max_part > 0) {
part_shift = fls(max_part);
+ /*
+ * Adjust max_part according to part_shift as it is exported
+ * to user space so that user can decide correct minor number
+ * if [s]he want to create more devices.
+ *
+ * Note that -1 is required because partition 0 is reserved
+ * for the whole disk.
+ */
+ max_part = (1UL << part_shift) - 1;
+ }
+
+ if ((1UL << part_shift) > DISK_MAX_PARTS)
+ return -EINVAL;
+
if (rd_nr > 1UL << (MINORBITS - part_shift))
return -EINVAL;
if (rd_nr) {
nr = rd_nr;
- range = rd_nr;
+ range = rd_nr << part_shift;
} else {
nr = CONFIG_BLK_DEV_RAM_COUNT;
- range = 1UL << (MINORBITS - part_shift);
+ range = 1UL << MINORBITS;
}
if (register_blkdev(RAMDISK_MAJOR, "ramdisk"))
@@ -632,7 +642,7 @@ static void __exit brd_exit(void)
unsigned long range;
struct brd_device *brd, *next;
- range = rd_nr ? rd_nr : 1UL << (MINORBITS - part_shift);
+ range = rd_nr ? rd_nr << part_shift : 1UL << MINORBITS;
list_for_each_entry_safe(brd, next, &brd_devices, brd_list)
brd_del_one(brd);
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index c59a672a3de0..76c8da78212b 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1540,9 +1540,9 @@ static const struct block_device_operations lo_fops = {
* And now the modules code and kernel interface.
*/
static int max_loop;
-module_param(max_loop, int, 0);
+module_param(max_loop, int, S_IRUGO);
MODULE_PARM_DESC(max_loop, "Maximum number of loop devices");
-module_param(max_part, int, 0);
+module_param(max_part, int, S_IRUGO);
MODULE_PARM_DESC(max_part, "Maximum number of partitions per loop device");
MODULE_LICENSE("GPL");
MODULE_ALIAS_BLOCKDEV_MAJOR(LOOP_MAJOR);
@@ -1688,9 +1688,20 @@ static int __init loop_init(void)
*/
part_shift = 0;
- if (max_part > 0)
+ if (max_part > 0) {
part_shift = fls(max_part);
+ /*
+ * Adjust max_part according to part_shift as it is exported
+ * to user space so that user can decide correct minor number
+ * if [s]he want to create more devices.
+ *
+ * Note that -1 is required because partition 0 is reserved
+ * for the whole disk.
+ */
+ max_part = (1UL << part_shift) - 1;
+ }
+
if ((1UL << part_shift) > DISK_MAX_PARTS)
return -EINVAL;
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index a572600e44eb..25cf327cd1cb 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -200,16 +200,18 @@ config PL330_DMA
platform_data for a dma-pl330 device.
config PCH_DMA
- tristate "Intel EG20T PCH / OKI SEMICONDUCTOR ML7213 IOH DMA support"
+ tristate "Intel EG20T PCH / OKI Semi IOH(ML7213/ML7223) DMA support"
depends on PCI && X86
select DMA_ENGINE
help
Enable support for Intel EG20T PCH DMA engine.
- This driver also can be used for OKI SEMICONDUCTOR ML7213 IOH(Input/
- Output Hub) which is for IVI(In-Vehicle Infotainment) use.
- ML7213 is companion chip for Intel Atom E6xx series.
- ML7213 is completely compatible for Intel EG20T PCH.
+ This driver also can be used for OKI SEMICONDUCTOR IOH(Input/
+ Output Hub), ML7213 and ML7223.
+ ML7213 IOH is for IVI(In-Vehicle Infotainment) use and ML7223 IOH is
+ for MP(Media Phone) use.
+ ML7213/ML7223 is companion chip for Intel Atom E6xx series.
+ ML7213/ML7223 is completely compatible for Intel EG20T PCH.
config IMX_SDMA
tristate "i.MX SDMA support"
diff --git a/drivers/dma/TODO b/drivers/dma/TODO
new file mode 100644
index 000000000000..a4af8589330c
--- /dev/null
+++ b/drivers/dma/TODO
@@ -0,0 +1,14 @@
+TODO for slave dma
+
+1. Move remaining drivers to use new slave interface
+2. Remove old slave pointer machansim
+3. Make issue_pending to start the transaction in below drivers
+ - mpc512x_dma
+ - imx-dma
+ - imx-sdma
+ - mxs-dma.c
+ - dw_dmac
+ - intel_mid_dma
+ - ste_dma40
+4. Check other subsystems for dma drivers and merge/move to dmaengine
+5. Remove dma_slave_config's dma direction.
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 235f53bf494e..36144f88d718 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -37,8 +37,8 @@
#define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO)
#define ATC_DEFAULT_CTRLA (0)
-#define ATC_DEFAULT_CTRLB (ATC_SIF(0) \
- |ATC_DIF(1))
+#define ATC_DEFAULT_CTRLB (ATC_SIF(AT_DMA_MEM_IF) \
+ |ATC_DIF(AT_DMA_MEM_IF))
/*
* Initial number of descriptors to allocate for each channel. This could
@@ -165,6 +165,29 @@ static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc)
}
/**
+ * atc_desc_chain - build chain adding a descripor
+ * @first: address of first descripor of the chain
+ * @prev: address of previous descripor of the chain
+ * @desc: descriptor to queue
+ *
+ * Called from prep_* functions
+ */
+static void atc_desc_chain(struct at_desc **first, struct at_desc **prev,
+ struct at_desc *desc)
+{
+ if (!(*first)) {
+ *first = desc;
+ } else {
+ /* inform the HW lli about chaining */
+ (*prev)->lli.dscr = desc->txd.phys;
+ /* insert the link descriptor to the LD ring */
+ list_add_tail(&desc->desc_node,
+ &(*first)->tx_list);
+ }
+ *prev = desc;
+}
+
+/**
* atc_assign_cookie - compute and assign new cookie
* @atchan: channel we work on
* @desc: descriptor to assign cookie for
@@ -237,16 +260,12 @@ static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first)
static void
atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
{
- dma_async_tx_callback callback;
- void *param;
struct dma_async_tx_descriptor *txd = &desc->txd;
dev_vdbg(chan2dev(&atchan->chan_common),
"descriptor %u complete\n", txd->cookie);
atchan->completed_cookie = txd->cookie;
- callback = txd->callback;
- param = txd->callback_param;
/* move children to free_list */
list_splice_init(&desc->tx_list, &atchan->free_list);
@@ -278,12 +297,19 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
}
}
- /*
- * The API requires that no submissions are done from a
- * callback, so we don't need to drop the lock here
- */
- if (callback)
- callback(param);
+ /* for cyclic transfers,
+ * no need to replay callback function while stopping */
+ if (!test_bit(ATC_IS_CYCLIC, &atchan->status)) {
+ dma_async_tx_callback callback = txd->callback;
+ void *param = txd->callback_param;
+
+ /*
+ * The API requires that no submissions are done from a
+ * callback, so we don't need to drop the lock here
+ */
+ if (callback)
+ callback(param);
+ }
dma_run_dependencies(txd);
}
@@ -419,6 +445,26 @@ static void atc_handle_error(struct at_dma_chan *atchan)
atc_chain_complete(atchan, bad_desc);
}
+/**
+ * atc_handle_cyclic - at the end of a period, run callback function
+ * @atchan: channel used for cyclic operations
+ *
+ * Called with atchan->lock held and bh disabled
+ */
+static void atc_handle_cyclic(struct at_dma_chan *atchan)
+{
+ struct at_desc *first = atc_first_active(atchan);
+ struct dma_async_tx_descriptor *txd = &first->txd;
+ dma_async_tx_callback callback = txd->callback;
+ void *param = txd->callback_param;
+
+ dev_vdbg(chan2dev(&atchan->chan_common),
+ "new cyclic period llp 0x%08x\n",
+ channel_readl(atchan, DSCR));
+
+ if (callback)
+ callback(param);
+}
/*-- IRQ & Tasklet ---------------------------------------------------*/
@@ -426,16 +472,11 @@ static void atc_tasklet(unsigned long data)
{
struct at_dma_chan *atchan = (struct at_dma_chan *)data;
- /* Channel cannot be enabled here */
- if (atc_chan_is_enabled(atchan)) {
- dev_err(chan2dev(&atchan->chan_common),
- "BUG: channel enabled in tasklet\n");
- return;
- }
-
spin_lock(&atchan->lock);
- if (test_and_clear_bit(0, &atchan->error_status))
+ if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status))
atc_handle_error(atchan);
+ else if (test_bit(ATC_IS_CYCLIC, &atchan->status))
+ atc_handle_cyclic(atchan);
else
atc_advance_work(atchan);
@@ -464,12 +505,13 @@ static irqreturn_t at_dma_interrupt(int irq, void *dev_id)
for (i = 0; i < atdma->dma_common.chancnt; i++) {
atchan = &atdma->chan[i];
- if (pending & (AT_DMA_CBTC(i) | AT_DMA_ERR(i))) {
+ if (pending & (AT_DMA_BTC(i) | AT_DMA_ERR(i))) {
if (pending & AT_DMA_ERR(i)) {
/* Disable channel on AHB error */
- dma_writel(atdma, CHDR, atchan->mask);
+ dma_writel(atdma, CHDR,
+ AT_DMA_RES(i) | atchan->mask);
/* Give information to tasklet */
- set_bit(0, &atchan->error_status);
+ set_bit(ATC_IS_ERROR, &atchan->status);
}
tasklet_schedule(&atchan->tasklet);
ret = IRQ_HANDLED;
@@ -549,7 +591,7 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
}
ctrla = ATC_DEFAULT_CTRLA;
- ctrlb = ATC_DEFAULT_CTRLB
+ ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN
| ATC_SRC_ADDR_MODE_INCR
| ATC_DST_ADDR_MODE_INCR
| ATC_FC_MEM2MEM;
@@ -584,16 +626,7 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
desc->txd.cookie = 0;
- if (!first) {
- first = desc;
- } else {
- /* inform the HW lli about chaining */
- prev->lli.dscr = desc->txd.phys;
- /* insert the link descriptor to the LD ring */
- list_add_tail(&desc->desc_node,
- &first->tx_list);
- }
- prev = desc;
+ atc_desc_chain(&first, &prev, desc);
}
/* First descriptor of the chain embedds additional information */
@@ -639,7 +672,8 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
struct scatterlist *sg;
size_t total_len = 0;
- dev_vdbg(chan2dev(chan), "prep_slave_sg: %s f0x%lx\n",
+ dev_vdbg(chan2dev(chan), "prep_slave_sg (%d): %s f0x%lx\n",
+ sg_len,
direction == DMA_TO_DEVICE ? "TO DEVICE" : "FROM DEVICE",
flags);
@@ -651,14 +685,15 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
reg_width = atslave->reg_width;
ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla;
- ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN;
+ ctrlb = ATC_IEN;
switch (direction) {
case DMA_TO_DEVICE:
ctrla |= ATC_DST_WIDTH(reg_width);
ctrlb |= ATC_DST_ADDR_MODE_FIXED
| ATC_SRC_ADDR_MODE_INCR
- | ATC_FC_MEM2PER;
+ | ATC_FC_MEM2PER
+ | ATC_SIF(AT_DMA_MEM_IF) | ATC_DIF(AT_DMA_PER_IF);
reg = atslave->tx_reg;
for_each_sg(sgl, sg, sg_len, i) {
struct at_desc *desc;
@@ -682,16 +717,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
| len >> mem_width;
desc->lli.ctrlb = ctrlb;
- if (!first) {
- first = desc;
- } else {
- /* inform the HW lli about chaining */
- prev->lli.dscr = desc->txd.phys;
- /* insert the link descriptor to the LD ring */
- list_add_tail(&desc->desc_node,
- &first->tx_list);
- }
- prev = desc;
+ atc_desc_chain(&first, &prev, desc);
total_len += len;
}
break;
@@ -699,7 +725,8 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
ctrla |= ATC_SRC_WIDTH(reg_width);
ctrlb |= ATC_DST_ADDR_MODE_INCR
| ATC_SRC_ADDR_MODE_FIXED
- | ATC_FC_PER2MEM;
+ | ATC_FC_PER2MEM
+ | ATC_SIF(AT_DMA_PER_IF) | ATC_DIF(AT_DMA_MEM_IF);
reg = atslave->rx_reg;
for_each_sg(sgl, sg, sg_len, i) {
@@ -724,16 +751,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
| len >> reg_width;
desc->lli.ctrlb = ctrlb;
- if (!first) {
- first = desc;
- } else {
- /* inform the HW lli about chaining */
- prev->lli.dscr = desc->txd.phys;
- /* insert the link descriptor to the LD ring */
- list_add_tail(&desc->desc_node,
- &first->tx_list);
- }
- prev = desc;
+ atc_desc_chain(&first, &prev, desc);
total_len += len;
}
break;
@@ -759,41 +777,211 @@ err_desc_get:
return NULL;
}
+/**
+ * atc_dma_cyclic_check_values
+ * Check for too big/unaligned periods and unaligned DMA buffer
+ */
+static int
+atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr,
+ size_t period_len, enum dma_data_direction direction)
+{
+ if (period_len > (ATC_BTSIZE_MAX << reg_width))
+ goto err_out;
+ if (unlikely(period_len & ((1 << reg_width) - 1)))
+ goto err_out;
+ if (unlikely(buf_addr & ((1 << reg_width) - 1)))
+ goto err_out;
+ if (unlikely(!(direction & (DMA_TO_DEVICE | DMA_FROM_DEVICE))))
+ goto err_out;
+
+ return 0;
+
+err_out:
+ return -EINVAL;
+}
+
+/**
+ * atc_dma_cyclic_fill_desc - Fill one period decriptor
+ */
+static int
+atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc,
+ unsigned int period_index, dma_addr_t buf_addr,
+ size_t period_len, enum dma_data_direction direction)
+{
+ u32 ctrla;
+ unsigned int reg_width = atslave->reg_width;
+
+ /* prepare common CRTLA value */
+ ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla
+ | ATC_DST_WIDTH(reg_width)
+ | ATC_SRC_WIDTH(reg_width)
+ | period_len >> reg_width;
+
+ switch (direction) {
+ case DMA_TO_DEVICE:
+ desc->lli.saddr = buf_addr + (period_len * period_index);
+ desc->lli.daddr = atslave->tx_reg;
+ desc->lli.ctrla = ctrla;
+ desc->lli.ctrlb = ATC_DST_ADDR_MODE_FIXED
+ | ATC_SRC_ADDR_MODE_INCR
+ | ATC_FC_MEM2PER
+ | ATC_SIF(AT_DMA_MEM_IF)
+ | ATC_DIF(AT_DMA_PER_IF);
+ break;
+
+ case DMA_FROM_DEVICE:
+ desc->lli.saddr = atslave->rx_reg;
+ desc->lli.daddr = buf_addr + (period_len * period_index);
+ desc->lli.ctrla = ctrla;
+ desc->lli.ctrlb = ATC_DST_ADDR_MODE_INCR
+ | ATC_SRC_ADDR_MODE_FIXED
+ | ATC_FC_PER2MEM
+ | ATC_SIF(AT_DMA_PER_IF)
+ | ATC_DIF(AT_DMA_MEM_IF);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * atc_prep_dma_cyclic - prepare the cyclic DMA transfer
+ * @chan: the DMA channel to prepare
+ * @buf_addr: physical DMA address where the buffer starts
+ * @buf_len: total number of bytes for the entire buffer
+ * @period_len: number of bytes for each period
+ * @direction: transfer direction, to or from device
+ */
+static struct dma_async_tx_descriptor *
+atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
+ size_t period_len, enum dma_data_direction direction)
+{
+ struct at_dma_chan *atchan = to_at_dma_chan(chan);
+ struct at_dma_slave *atslave = chan->private;
+ struct at_desc *first = NULL;
+ struct at_desc *prev = NULL;
+ unsigned long was_cyclic;
+ unsigned int periods = buf_len / period_len;
+ unsigned int i;
+
+ dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@0x%08x - %d (%d/%d)\n",
+ direction == DMA_TO_DEVICE ? "TO DEVICE" : "FROM DEVICE",
+ buf_addr,
+ periods, buf_len, period_len);
+
+ if (unlikely(!atslave || !buf_len || !period_len)) {
+ dev_dbg(chan2dev(chan), "prep_dma_cyclic: length is zero!\n");
+ return NULL;
+ }
+
+ was_cyclic = test_and_set_bit(ATC_IS_CYCLIC, &atchan->status);
+ if (was_cyclic) {
+ dev_dbg(chan2dev(chan), "prep_dma_cyclic: channel in use!\n");
+ return NULL;
+ }
+
+ /* Check for too big/unaligned periods and unaligned DMA buffer */
+ if (atc_dma_cyclic_check_values(atslave->reg_width, buf_addr,
+ period_len, direction))
+ goto err_out;
+
+ /* build cyclic linked list */
+ for (i = 0; i < periods; i++) {
+ struct at_desc *desc;
+
+ desc = atc_desc_get(atchan);
+ if (!desc)
+ goto err_desc_get;
+
+ if (atc_dma_cyclic_fill_desc(atslave, desc, i, buf_addr,
+ period_len, direction))
+ goto err_desc_get;
+
+ atc_desc_chain(&first, &prev, desc);
+ }
+
+ /* lets make a cyclic list */
+ prev->lli.dscr = first->txd.phys;
+
+ /* First descriptor of the chain embedds additional information */
+ first->txd.cookie = -EBUSY;
+ first->len = buf_len;
+
+ return &first->txd;
+
+err_desc_get:
+ dev_err(chan2dev(chan), "not enough descriptors available\n");
+ atc_desc_put(atchan, first);
+err_out:
+ clear_bit(ATC_IS_CYCLIC, &atchan->status);
+ return NULL;
+}
+
+
static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
unsigned long arg)
{
struct at_dma_chan *atchan = to_at_dma_chan(chan);
struct at_dma *atdma = to_at_dma(chan->device);
- struct at_desc *desc, *_desc;
+ int chan_id = atchan->chan_common.chan_id;
+
LIST_HEAD(list);
- /* Only supports DMA_TERMINATE_ALL */
- if (cmd != DMA_TERMINATE_ALL)
- return -ENXIO;
+ dev_vdbg(chan2dev(chan), "atc_control (%d)\n", cmd);
- /*
- * This is only called when something went wrong elsewhere, so
- * we don't really care about the data. Just disable the
- * channel. We still have to poll the channel enable bit due
- * to AHB/HSB limitations.
- */
- spin_lock_bh(&atchan->lock);
+ if (cmd == DMA_PAUSE) {
+ spin_lock_bh(&atchan->lock);
- dma_writel(atdma, CHDR, atchan->mask);
+ dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id));
+ set_bit(ATC_IS_PAUSED, &atchan->status);
- /* confirm that this channel is disabled */
- while (dma_readl(atdma, CHSR) & atchan->mask)
- cpu_relax();
+ spin_unlock_bh(&atchan->lock);
+ } else if (cmd == DMA_RESUME) {
+ if (!test_bit(ATC_IS_PAUSED, &atchan->status))
+ return 0;
- /* active_list entries will end up before queued entries */
- list_splice_init(&atchan->queue, &list);
- list_splice_init(&atchan->active_list, &list);
+ spin_lock_bh(&atchan->lock);
- /* Flush all pending and queued descriptors */
- list_for_each_entry_safe(desc, _desc, &list, desc_node)
- atc_chain_complete(atchan, desc);
+ dma_writel(atdma, CHDR, AT_DMA_RES(chan_id));
+ clear_bit(ATC_IS_PAUSED, &atchan->status);
- spin_unlock_bh(&atchan->lock);
+ spin_unlock_bh(&atchan->lock);
+ } else if (cmd == DMA_TERMINATE_ALL) {
+ struct at_desc *desc, *_desc;
+ /*
+ * This is only called when something went wrong elsewhere, so
+ * we don't really care about the data. Just disable the
+ * channel. We still have to poll the channel enable bit due
+ * to AHB/HSB limitations.
+ */
+ spin_lock_bh(&atchan->lock);
+
+ /* disabling channel: must also remove suspend state */
+ dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask);
+
+ /* confirm that this channel is disabled */
+ while (dma_readl(atdma, CHSR) & atchan->mask)
+ cpu_relax();
+
+ /* active_list entries will end up before queued entries */
+ list_splice_init(&atchan->queue, &list);
+ list_splice_init(&atchan->active_list, &list);
+
+ /* Flush all pending and queued descriptors */
+ list_for_each_entry_safe(desc, _desc, &list, desc_node)
+ atc_chain_complete(atchan, desc);
+
+ clear_bit(ATC_IS_PAUSED, &atchan->status);
+ /* if channel dedicated to cyclic operations, free it */
+ clear_bit(ATC_IS_CYCLIC, &atchan->status);
+
+ spin_unlock_bh(&atchan->lock);
+ } else {
+ return -ENXIO;
+ }
return 0;
}
@@ -835,9 +1023,17 @@ atc_tx_status(struct dma_chan *chan,
spin_unlock_bh(&atchan->lock);
- dma_set_tx_state(txstate, last_complete, last_used, 0);
- dev_vdbg(chan2dev(chan), "tx_status: %d (d%d, u%d)\n",
- cookie, last_complete ? last_complete : 0,
+ if (ret != DMA_SUCCESS)
+ dma_set_tx_state(txstate, last_complete, last_used,
+ atc_first_active(atchan)->len);
+ else
+ dma_set_tx_state(txstate, last_complete, last_used, 0);
+
+ if (test_bit(ATC_IS_PAUSED, &atchan->status))
+ ret = DMA_PAUSED;
+
+ dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d (d%d, u%d)\n",
+ ret, cookie, last_complete ? last_complete : 0,
last_used ? last_used : 0);
return ret;
@@ -853,6 +1049,10 @@ static void atc_issue_pending(struct dma_chan *chan)
dev_vdbg(chan2dev(chan), "issue_pending\n");
+ /* Not needed for cyclic transfers */
+ if (test_bit(ATC_IS_CYCLIC, &atchan->status))
+ return;
+
spin_lock_bh(&atchan->lock);
if (!atc_chan_is_enabled(atchan)) {
atc_advance_work(atchan);
@@ -959,6 +1159,7 @@ static void atc_free_chan_resources(struct dma_chan *chan)
}
list_splice_init(&atchan->free_list, &list);
atchan->descs_allocated = 0;
+ atchan->status = 0;
dev_vdbg(chan2dev(chan), "free_chan_resources: done\n");
}
@@ -1092,10 +1293,15 @@ static int __init at_dma_probe(struct platform_device *pdev)
if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask))
atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy;
- if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) {
+ if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask))
atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg;
+
+ if (dma_has_cap(DMA_CYCLIC, atdma->dma_common.cap_mask))
+ atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic;
+
+ if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ||
+ dma_has_cap(DMA_CYCLIC, atdma->dma_common.cap_mask))
atdma->dma_common.device_control = atc_control;
- }
dma_writel(atdma, EN, AT_DMA_ENABLE);
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index 495457e3dc4b..087dbf1dd39c 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -103,6 +103,10 @@
/* Bitfields in CTRLB */
#define ATC_SIF(i) (0x3 & (i)) /* Src tx done via AHB-Lite Interface i */
#define ATC_DIF(i) ((0x3 & (i)) << 4) /* Dst tx done via AHB-Lite Interface i */
+ /* Specify AHB interfaces */
+#define AT_DMA_MEM_IF 0 /* interface 0 as memory interface */
+#define AT_DMA_PER_IF 1 /* interface 1 as peripheral interface */
+
#define ATC_SRC_PIP (0x1 << 8) /* Source Picture-in-Picture enabled */
#define ATC_DST_PIP (0x1 << 12) /* Destination Picture-in-Picture enabled */
#define ATC_SRC_DSCR_DIS (0x1 << 16) /* Src Descriptor fetch disable */
@@ -181,12 +185,23 @@ txd_to_at_desc(struct dma_async_tx_descriptor *txd)
/*-- Channels --------------------------------------------------------*/
/**
+ * atc_status - information bits stored in channel status flag
+ *
+ * Manipulated with atomic operations.
+ */
+enum atc_status {
+ ATC_IS_ERROR = 0,
+ ATC_IS_PAUSED = 1,
+ ATC_IS_CYCLIC = 24,
+};
+
+/**
* struct at_dma_chan - internal representation of an Atmel HDMAC channel
* @chan_common: common dmaengine channel object members
* @device: parent device
* @ch_regs: memory mapped register base
* @mask: channel index in a mask
- * @error_status: transmit error status information from irq handler
+ * @status: transmit status information from irq/prep* functions
* to tasklet (use atomic operations)
* @tasklet: bottom half to finish transaction work
* @lock: serializes enqueue/dequeue operations to descriptors lists
@@ -201,7 +216,7 @@ struct at_dma_chan {
struct at_dma *device;
void __iomem *ch_regs;
u8 mask;
- unsigned long error_status;
+ unsigned long status;
struct tasklet_struct tasklet;
spinlock_t lock;
@@ -309,8 +324,8 @@ static void atc_setup_irq(struct at_dma_chan *atchan, int on)
struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
u32 ebci;
- /* enable interrupts on buffer chain completion & error */
- ebci = AT_DMA_CBTC(atchan->chan_common.chan_id)
+ /* enable interrupts on buffer transfer completion & error */
+ ebci = AT_DMA_BTC(atchan->chan_common.chan_id)
| AT_DMA_ERR(atchan->chan_common.chan_id);
if (on)
dma_writel(atdma, EBCIER, ebci);
@@ -347,7 +362,12 @@ static inline int atc_chan_is_enabled(struct at_dma_chan *atchan)
*/
static void set_desc_eol(struct at_desc *desc)
{
- desc->lli.ctrlb |= ATC_SRC_DSCR_DIS | ATC_DST_DSCR_DIS;
+ u32 ctrlb = desc->lli.ctrlb;
+
+ ctrlb &= ~ATC_IEN;
+ ctrlb |= ATC_SRC_DSCR_DIS | ATC_DST_DSCR_DIS;
+
+ desc->lli.ctrlb = ctrlb;
desc->lli.dscr = 0;
}
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index f48e54006518..af8c0b5ed70f 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -1610,7 +1610,7 @@ int __init coh901318_init(void)
{
return platform_driver_probe(&coh901318_driver, coh901318_probe);
}
-arch_initcall(coh901318_init);
+subsys_initcall(coh901318_init);
void __exit coh901318_exit(void)
{
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index 2a2e2fa00e91..4d180ca9a1d8 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -3,6 +3,7 @@
* AVR32 systems.)
*
* Copyright (C) 2007-2008 Atmel Corporation
+ * Copyright (C) 2010-2011 ST Microelectronics
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -93,8 +94,9 @@ static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
struct dw_desc *desc, *_desc;
struct dw_desc *ret = NULL;
unsigned int i = 0;
+ unsigned long flags;
- spin_lock_bh(&dwc->lock);
+ spin_lock_irqsave(&dwc->lock, flags);
list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) {
if (async_tx_test_ack(&desc->txd)) {
list_del(&desc->desc_node);
@@ -104,7 +106,7 @@ static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc);
i++;
}
- spin_unlock_bh(&dwc->lock);
+ spin_unlock_irqrestore(&dwc->lock, flags);
dev_vdbg(chan2dev(&dwc->chan), "scanned %u descriptors on freelist\n", i);
@@ -130,12 +132,14 @@ static void dwc_sync_desc_for_cpu(struct dw_dma_chan *dwc, struct dw_desc *desc)
*/
static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
{
+ unsigned long flags;
+
if (desc) {
struct dw_desc *child;
dwc_sync_desc_for_cpu(dwc, desc);
- spin_lock_bh(&dwc->lock);
+ spin_lock_irqsave(&dwc->lock, flags);
list_for_each_entry(child, &desc->tx_list, desc_node)
dev_vdbg(chan2dev(&dwc->chan),
"moving child desc %p to freelist\n",
@@ -143,7 +147,7 @@ static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
list_splice_init(&desc->tx_list, &dwc->free_list);
dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc);
list_add(&desc->desc_node, &dwc->free_list);
- spin_unlock_bh(&dwc->lock);
+ spin_unlock_irqrestore(&dwc->lock, flags);
}
}
@@ -195,18 +199,23 @@ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
/*----------------------------------------------------------------------*/
static void
-dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc)
+dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
+ bool callback_required)
{
- dma_async_tx_callback callback;
- void *param;
+ dma_async_tx_callback callback = NULL;
+ void *param = NULL;
struct dma_async_tx_descriptor *txd = &desc->txd;
struct dw_desc *child;
+ unsigned long flags;
dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie);
+ spin_lock_irqsave(&dwc->lock, flags);
dwc->completed = txd->cookie;
- callback = txd->callback;
- param = txd->callback_param;
+ if (callback_required) {
+ callback = txd->callback;
+ param = txd->callback_param;
+ }
dwc_sync_desc_for_cpu(dwc, desc);
@@ -238,11 +247,9 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc)
}
}
- /*
- * The API requires that no submissions are done from a
- * callback, so we don't need to drop the lock here
- */
- if (callback)
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ if (callback_required && callback)
callback(param);
}
@@ -250,7 +257,9 @@ static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
{
struct dw_desc *desc, *_desc;
LIST_HEAD(list);
+ unsigned long flags;
+ spin_lock_irqsave(&dwc->lock, flags);
if (dma_readl(dw, CH_EN) & dwc->mask) {
dev_err(chan2dev(&dwc->chan),
"BUG: XFER bit set, but channel not idle!\n");
@@ -271,8 +280,10 @@ static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
dwc_dostart(dwc, dwc_first_active(dwc));
}
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
list_for_each_entry_safe(desc, _desc, &list, desc_node)
- dwc_descriptor_complete(dwc, desc);
+ dwc_descriptor_complete(dwc, desc, true);
}
static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
@@ -281,7 +292,9 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
struct dw_desc *desc, *_desc;
struct dw_desc *child;
u32 status_xfer;
+ unsigned long flags;
+ spin_lock_irqsave(&dwc->lock, flags);
/*
* Clear block interrupt flag before scanning so that we don't
* miss any, and read LLP before RAW_XFER to ensure it is
@@ -294,30 +307,47 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
if (status_xfer & dwc->mask) {
/* Everything we've submitted is done */
dma_writel(dw, CLEAR.XFER, dwc->mask);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
dwc_complete_all(dw, dwc);
return;
}
- if (list_empty(&dwc->active_list))
+ if (list_empty(&dwc->active_list)) {
+ spin_unlock_irqrestore(&dwc->lock, flags);
return;
+ }
dev_vdbg(chan2dev(&dwc->chan), "scan_descriptors: llp=0x%x\n", llp);
list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
- if (desc->lli.llp == llp)
+ /* check first descriptors addr */
+ if (desc->txd.phys == llp) {
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ return;
+ }
+
+ /* check first descriptors llp */
+ if (desc->lli.llp == llp) {
/* This one is currently in progress */
+ spin_unlock_irqrestore(&dwc->lock, flags);
return;
+ }
list_for_each_entry(child, &desc->tx_list, desc_node)
- if (child->lli.llp == llp)
+ if (child->lli.llp == llp) {
/* Currently in progress */
+ spin_unlock_irqrestore(&dwc->lock, flags);
return;
+ }
/*
* No descriptors so far seem to be in progress, i.e.
* this one must be done.
*/
- dwc_descriptor_complete(dwc, desc);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ dwc_descriptor_complete(dwc, desc, true);
+ spin_lock_irqsave(&dwc->lock, flags);
}
dev_err(chan2dev(&dwc->chan),
@@ -332,6 +362,7 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
list_move(dwc->queue.next, &dwc->active_list);
dwc_dostart(dwc, dwc_first_active(dwc));
}
+ spin_unlock_irqrestore(&dwc->lock, flags);
}
static void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli)
@@ -346,9 +377,12 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
{
struct dw_desc *bad_desc;
struct dw_desc *child;
+ unsigned long flags;
dwc_scan_descriptors(dw, dwc);
+ spin_lock_irqsave(&dwc->lock, flags);
+
/*
* The descriptor currently at the head of the active list is
* borked. Since we don't have any way to report errors, we'll
@@ -378,8 +412,10 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
list_for_each_entry(child, &bad_desc->tx_list, desc_node)
dwc_dump_lli(dwc, &child->lli);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
/* Pretend the descriptor completed successfully */
- dwc_descriptor_complete(dwc, bad_desc);
+ dwc_descriptor_complete(dwc, bad_desc, true);
}
/* --------------------- Cyclic DMA API extensions -------------------- */
@@ -402,6 +438,8 @@ EXPORT_SYMBOL(dw_dma_get_dst_addr);
static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
u32 status_block, u32 status_err, u32 status_xfer)
{
+ unsigned long flags;
+
if (status_block & dwc->mask) {
void (*callback)(void *param);
void *callback_param;
@@ -412,11 +450,9 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
callback = dwc->cdesc->period_callback;
callback_param = dwc->cdesc->period_callback_param;
- if (callback) {
- spin_unlock(&dwc->lock);
+
+ if (callback)
callback(callback_param);
- spin_lock(&dwc->lock);
- }
}
/*
@@ -430,6 +466,9 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s "
"interrupt, stopping DMA transfer\n",
status_xfer ? "xfer" : "error");
+
+ spin_lock_irqsave(&dwc->lock, flags);
+
dev_err(chan2dev(&dwc->chan),
" SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
channel_readl(dwc, SAR),
@@ -453,6 +492,8 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
for (i = 0; i < dwc->cdesc->periods; i++)
dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli);
+
+ spin_unlock_irqrestore(&dwc->lock, flags);
}
}
@@ -476,7 +517,6 @@ static void dw_dma_tasklet(unsigned long data)
for (i = 0; i < dw->dma.chancnt; i++) {
dwc = &dw->chan[i];
- spin_lock(&dwc->lock);
if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
dwc_handle_cyclic(dw, dwc, status_block, status_err,
status_xfer);
@@ -484,7 +524,6 @@ static void dw_dma_tasklet(unsigned long data)
dwc_handle_error(dw, dwc);
else if ((status_block | status_xfer) & (1 << i))
dwc_scan_descriptors(dw, dwc);
- spin_unlock(&dwc->lock);
}
/*
@@ -539,8 +578,9 @@ static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
struct dw_desc *desc = txd_to_dw_desc(tx);
struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan);
dma_cookie_t cookie;
+ unsigned long flags;
- spin_lock_bh(&dwc->lock);
+ spin_lock_irqsave(&dwc->lock, flags);
cookie = dwc_assign_cookie(dwc, desc);
/*
@@ -560,7 +600,7 @@ static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
list_add_tail(&desc->desc_node, &dwc->queue);
}
- spin_unlock_bh(&dwc->lock);
+ spin_unlock_irqrestore(&dwc->lock, flags);
return cookie;
}
@@ -689,9 +729,15 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
reg = dws->tx_reg;
for_each_sg(sgl, sg, sg_len, i) {
struct dw_desc *desc;
- u32 len;
- u32 mem;
+ u32 len, dlen, mem;
+
+ mem = sg_phys(sg);
+ len = sg_dma_len(sg);
+ mem_width = 2;
+ if (unlikely(mem & 3 || len & 3))
+ mem_width = 0;
+slave_sg_todev_fill_desc:
desc = dwc_desc_get(dwc);
if (!desc) {
dev_err(chan2dev(chan),
@@ -699,16 +745,19 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
goto err_desc_get;
}
- mem = sg_phys(sg);
- len = sg_dma_len(sg);
- mem_width = 2;
- if (unlikely(mem & 3 || len & 3))
- mem_width = 0;
-
desc->lli.sar = mem;
desc->lli.dar = reg;
desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width);
- desc->lli.ctlhi = len >> mem_width;
+ if ((len >> mem_width) > DWC_MAX_COUNT) {
+ dlen = DWC_MAX_COUNT << mem_width;
+ mem += dlen;
+ len -= dlen;
+ } else {
+ dlen = len;
+ len = 0;
+ }
+
+ desc->lli.ctlhi = dlen >> mem_width;
if (!first) {
first = desc;
@@ -722,7 +771,10 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
&first->tx_list);
}
prev = desc;
- total_len += len;
+ total_len += dlen;
+
+ if (len)
+ goto slave_sg_todev_fill_desc;
}
break;
case DMA_FROM_DEVICE:
@@ -735,15 +787,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
reg = dws->rx_reg;
for_each_sg(sgl, sg, sg_len, i) {
struct dw_desc *desc;
- u32 len;
- u32 mem;
-
- desc = dwc_desc_get(dwc);
- if (!desc) {
- dev_err(chan2dev(chan),
- "not enough descriptors available\n");
- goto err_desc_get;
- }
+ u32 len, dlen, mem;
mem = sg_phys(sg);
len = sg_dma_len(sg);
@@ -751,10 +795,26 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
if (unlikely(mem & 3 || len & 3))
mem_width = 0;
+slave_sg_fromdev_fill_desc:
+ desc = dwc_desc_get(dwc);
+ if (!desc) {
+ dev_err(chan2dev(chan),
+ "not enough descriptors available\n");
+ goto err_desc_get;
+ }
+
desc->lli.sar = reg;
desc->lli.dar = mem;
desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width);
- desc->lli.ctlhi = len >> reg_width;
+ if ((len >> reg_width) > DWC_MAX_COUNT) {
+ dlen = DWC_MAX_COUNT << reg_width;
+ mem += dlen;
+ len -= dlen;
+ } else {
+ dlen = len;
+ len = 0;
+ }
+ desc->lli.ctlhi = dlen >> reg_width;
if (!first) {
first = desc;
@@ -768,7 +828,10 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
&first->tx_list);
}
prev = desc;
- total_len += len;
+ total_len += dlen;
+
+ if (len)
+ goto slave_sg_fromdev_fill_desc;
}
break;
default:
@@ -799,34 +862,51 @@ static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
struct dw_dma *dw = to_dw_dma(chan->device);
struct dw_desc *desc, *_desc;
+ unsigned long flags;
+ u32 cfglo;
LIST_HEAD(list);
- /* Only supports DMA_TERMINATE_ALL */
- if (cmd != DMA_TERMINATE_ALL)
- return -ENXIO;
+ if (cmd == DMA_PAUSE) {
+ spin_lock_irqsave(&dwc->lock, flags);
- /*
- * This is only called when something went wrong elsewhere, so
- * we don't really care about the data. Just disable the
- * channel. We still have to poll the channel enable bit due
- * to AHB/HSB limitations.
- */
- spin_lock_bh(&dwc->lock);
+ cfglo = channel_readl(dwc, CFG_LO);
+ channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP);
+ while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY))
+ cpu_relax();
- channel_clear_bit(dw, CH_EN, dwc->mask);
+ dwc->paused = true;
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ } else if (cmd == DMA_RESUME) {
+ if (!dwc->paused)
+ return 0;
- while (dma_readl(dw, CH_EN) & dwc->mask)
- cpu_relax();
+ spin_lock_irqsave(&dwc->lock, flags);
- /* active_list entries will end up before queued entries */
- list_splice_init(&dwc->queue, &list);
- list_splice_init(&dwc->active_list, &list);
+ cfglo = channel_readl(dwc, CFG_LO);
+ channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP);
+ dwc->paused = false;
- spin_unlock_bh(&dwc->lock);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ } else if (cmd == DMA_TERMINATE_ALL) {
+ spin_lock_irqsave(&dwc->lock, flags);
- /* Flush all pending and queued descriptors */
- list_for_each_entry_safe(desc, _desc, &list, desc_node)
- dwc_descriptor_complete(dwc, desc);
+ channel_clear_bit(dw, CH_EN, dwc->mask);
+ while (dma_readl(dw, CH_EN) & dwc->mask)
+ cpu_relax();
+
+ dwc->paused = false;
+
+ /* active_list entries will end up before queued entries */
+ list_splice_init(&dwc->queue, &list);
+ list_splice_init(&dwc->active_list, &list);
+
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ /* Flush all pending and queued descriptors */
+ list_for_each_entry_safe(desc, _desc, &list, desc_node)
+ dwc_descriptor_complete(dwc, desc, false);
+ } else
+ return -ENXIO;
return 0;
}
@@ -846,9 +926,7 @@ dwc_tx_status(struct dma_chan *chan,
ret = dma_async_is_complete(cookie, last_complete, last_used);
if (ret != DMA_SUCCESS) {
- spin_lock_bh(&dwc->lock);
dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
- spin_unlock_bh(&dwc->lock);
last_complete = dwc->completed;
last_used = chan->cookie;
@@ -856,7 +934,14 @@ dwc_tx_status(struct dma_chan *chan,
ret = dma_async_is_complete(cookie, last_complete, last_used);
}
- dma_set_tx_state(txstate, last_complete, last_used, 0);
+ if (ret != DMA_SUCCESS)
+ dma_set_tx_state(txstate, last_complete, last_used,
+ dwc_first_active(dwc)->len);
+ else
+ dma_set_tx_state(txstate, last_complete, last_used, 0);
+
+ if (dwc->paused)
+ return DMA_PAUSED;
return ret;
}
@@ -865,10 +950,8 @@ static void dwc_issue_pending(struct dma_chan *chan)
{
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
- spin_lock_bh(&dwc->lock);
if (!list_empty(&dwc->queue))
dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
- spin_unlock_bh(&dwc->lock);
}
static int dwc_alloc_chan_resources(struct dma_chan *chan)
@@ -880,6 +963,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
int i;
u32 cfghi;
u32 cfglo;
+ unsigned long flags;
dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
@@ -917,16 +1001,16 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
* doesn't mean what you think it means), and status writeback.
*/
- spin_lock_bh(&dwc->lock);
+ spin_lock_irqsave(&dwc->lock, flags);
i = dwc->descs_allocated;
while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) {
- spin_unlock_bh(&dwc->lock);
+ spin_unlock_irqrestore(&dwc->lock, flags);
desc = kzalloc(sizeof(struct dw_desc), GFP_KERNEL);
if (!desc) {
dev_info(chan2dev(chan),
"only allocated %d descriptors\n", i);
- spin_lock_bh(&dwc->lock);
+ spin_lock_irqsave(&dwc->lock, flags);
break;
}
@@ -938,7 +1022,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
sizeof(desc->lli), DMA_TO_DEVICE);
dwc_desc_put(dwc, desc);
- spin_lock_bh(&dwc->lock);
+ spin_lock_irqsave(&dwc->lock, flags);
i = ++dwc->descs_allocated;
}
@@ -947,7 +1031,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
channel_set_bit(dw, MASK.BLOCK, dwc->mask);
channel_set_bit(dw, MASK.ERROR, dwc->mask);
- spin_unlock_bh(&dwc->lock);
+ spin_unlock_irqrestore(&dwc->lock, flags);
dev_dbg(chan2dev(chan),
"alloc_chan_resources allocated %d descriptors\n", i);
@@ -960,6 +1044,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
struct dw_dma *dw = to_dw_dma(chan->device);
struct dw_desc *desc, *_desc;
+ unsigned long flags;
LIST_HEAD(list);
dev_dbg(chan2dev(chan), "free_chan_resources (descs allocated=%u)\n",
@@ -970,7 +1055,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
BUG_ON(!list_empty(&dwc->queue));
BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask);
- spin_lock_bh(&dwc->lock);
+ spin_lock_irqsave(&dwc->lock, flags);
list_splice_init(&dwc->free_list, &list);
dwc->descs_allocated = 0;
@@ -979,7 +1064,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
channel_clear_bit(dw, MASK.BLOCK, dwc->mask);
channel_clear_bit(dw, MASK.ERROR, dwc->mask);
- spin_unlock_bh(&dwc->lock);
+ spin_unlock_irqrestore(&dwc->lock, flags);
list_for_each_entry_safe(desc, _desc, &list, desc_node) {
dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc);
@@ -1004,13 +1089,14 @@ int dw_dma_cyclic_start(struct dma_chan *chan)
{
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
struct dw_dma *dw = to_dw_dma(dwc->chan.device);
+ unsigned long flags;
if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n");
return -ENODEV;
}
- spin_lock(&dwc->lock);
+ spin_lock_irqsave(&dwc->lock, flags);
/* assert channel is idle */
if (dma_readl(dw, CH_EN) & dwc->mask) {
@@ -1023,7 +1109,7 @@ int dw_dma_cyclic_start(struct dma_chan *chan)
channel_readl(dwc, LLP),
channel_readl(dwc, CTL_HI),
channel_readl(dwc, CTL_LO));
- spin_unlock(&dwc->lock);
+ spin_unlock_irqrestore(&dwc->lock, flags);
return -EBUSY;
}
@@ -1038,7 +1124,7 @@ int dw_dma_cyclic_start(struct dma_chan *chan)
channel_set_bit(dw, CH_EN, dwc->mask);
- spin_unlock(&dwc->lock);
+ spin_unlock_irqrestore(&dwc->lock, flags);
return 0;
}
@@ -1054,14 +1140,15 @@ void dw_dma_cyclic_stop(struct dma_chan *chan)
{
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
struct dw_dma *dw = to_dw_dma(dwc->chan.device);
+ unsigned long flags;
- spin_lock(&dwc->lock);
+ spin_lock_irqsave(&dwc->lock, flags);
channel_clear_bit(dw, CH_EN, dwc->mask);
while (dma_readl(dw, CH_EN) & dwc->mask)
cpu_relax();
- spin_unlock(&dwc->lock);
+ spin_unlock_irqrestore(&dwc->lock, flags);
}
EXPORT_SYMBOL(dw_dma_cyclic_stop);
@@ -1090,17 +1177,18 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
unsigned int reg_width;
unsigned int periods;
unsigned int i;
+ unsigned long flags;
- spin_lock_bh(&dwc->lock);
+ spin_lock_irqsave(&dwc->lock, flags);
if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) {
- spin_unlock_bh(&dwc->lock);
+ spin_unlock_irqrestore(&dwc->lock, flags);
dev_dbg(chan2dev(&dwc->chan),
"queue and/or active list are not empty\n");
return ERR_PTR(-EBUSY);
}
was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
- spin_unlock_bh(&dwc->lock);
+ spin_unlock_irqrestore(&dwc->lock, flags);
if (was_cyclic) {
dev_dbg(chan2dev(&dwc->chan),
"channel already prepared for cyclic DMA\n");
@@ -1214,13 +1302,14 @@ void dw_dma_cyclic_free(struct dma_chan *chan)
struct dw_dma *dw = to_dw_dma(dwc->chan.device);
struct dw_cyclic_desc *cdesc = dwc->cdesc;
int i;
+ unsigned long flags;
dev_dbg(chan2dev(&dwc->chan), "cyclic free\n");
if (!cdesc)
return;
- spin_lock_bh(&dwc->lock);
+ spin_lock_irqsave(&dwc->lock, flags);
channel_clear_bit(dw, CH_EN, dwc->mask);
while (dma_readl(dw, CH_EN) & dwc->mask)
@@ -1230,7 +1319,7 @@ void dw_dma_cyclic_free(struct dma_chan *chan)
dma_writel(dw, CLEAR.ERROR, dwc->mask);
dma_writel(dw, CLEAR.XFER, dwc->mask);
- spin_unlock_bh(&dwc->lock);
+ spin_unlock_irqrestore(&dwc->lock, flags);
for (i = 0; i < cdesc->periods; i++)
dwc_desc_put(dwc, cdesc->desc[i]);
@@ -1487,3 +1576,4 @@ module_exit(dw_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver");
MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
+MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h
index 720f821527f8..c3419518d701 100644
--- a/drivers/dma/dw_dmac_regs.h
+++ b/drivers/dma/dw_dmac_regs.h
@@ -2,6 +2,7 @@
* Driver for the Synopsys DesignWare AHB DMA Controller
*
* Copyright (C) 2005-2007 Atmel Corporation
+ * Copyright (C) 2010-2011 ST Microelectronics
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -138,6 +139,7 @@ struct dw_dma_chan {
void __iomem *ch_regs;
u8 mask;
u8 priority;
+ bool paused;
spinlock_t lock;
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c
index 3d4ec38b9b62..f653517ef744 100644
--- a/drivers/dma/intel_mid_dma.c
+++ b/drivers/dma/intel_mid_dma.c
@@ -1292,8 +1292,7 @@ static int __devinit intel_mid_dma_probe(struct pci_dev *pdev,
if (err)
goto err_dma;
- pm_runtime_set_active(&pdev->dev);
- pm_runtime_enable(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
pm_runtime_allow(&pdev->dev);
return 0;
@@ -1322,6 +1321,9 @@ err_enable_device:
static void __devexit intel_mid_dma_remove(struct pci_dev *pdev)
{
struct middma_device *device = pci_get_drvdata(pdev);
+
+ pm_runtime_get_noresume(&pdev->dev);
+ pm_runtime_forbid(&pdev->dev);
middma_shutdown(pdev);
pci_dev_put(pdev);
kfree(device);
@@ -1385,13 +1387,20 @@ int dma_resume(struct pci_dev *pci)
static int dma_runtime_suspend(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
- return dma_suspend(pci_dev, PMSG_SUSPEND);
+ struct middma_device *device = pci_get_drvdata(pci_dev);
+
+ device->state = SUSPENDED;
+ return 0;
}
static int dma_runtime_resume(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
- return dma_resume(pci_dev);
+ struct middma_device *device = pci_get_drvdata(pci_dev);
+
+ device->state = RUNNING;
+ iowrite32(REG_BIT0, device->dma_base + DMA_CFG);
+ return 0;
}
static int dma_runtime_idle(struct device *dev)
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
index f4a51d4d0349..5d65f8377971 100644
--- a/drivers/dma/ioat/dma_v2.c
+++ b/drivers/dma/ioat/dma_v2.c
@@ -508,6 +508,7 @@ int ioat2_alloc_chan_resources(struct dma_chan *c)
struct ioat_ring_ent **ring;
u64 status;
int order;
+ int i = 0;
/* have we already been set up? */
if (ioat->ring)
@@ -548,8 +549,11 @@ int ioat2_alloc_chan_resources(struct dma_chan *c)
ioat2_start_null_desc(ioat);
/* check that we got off the ground */
- udelay(5);
- status = ioat_chansts(chan);
+ do {
+ udelay(1);
+ status = ioat_chansts(chan);
+ } while (i++ < 20 && !is_ioat_active(status) && !is_ioat_idle(status));
+
if (is_ioat_active(status) || is_ioat_idle(status)) {
set_bit(IOAT_RUN, &chan->state);
return 1 << ioat->alloc_order;
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index c6b01f535b29..e03f811a83dd 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -619,7 +619,7 @@ iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
if (unlikely(!len))
return NULL;
- BUG_ON(unlikely(len > IOP_ADMA_MAX_BYTE_COUNT));
+ BUG_ON(len > IOP_ADMA_MAX_BYTE_COUNT);
dev_dbg(iop_chan->device->common.dev, "%s len: %u\n",
__func__, len);
@@ -652,7 +652,7 @@ iop_adma_prep_dma_memset(struct dma_chan *chan, dma_addr_t dma_dest,
if (unlikely(!len))
return NULL;
- BUG_ON(unlikely(len > IOP_ADMA_MAX_BYTE_COUNT));
+ BUG_ON(len > IOP_ADMA_MAX_BYTE_COUNT);
dev_dbg(iop_chan->device->common.dev, "%s len: %u\n",
__func__, len);
@@ -686,7 +686,7 @@ iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest,
if (unlikely(!len))
return NULL;
- BUG_ON(unlikely(len > IOP_ADMA_XOR_MAX_BYTE_COUNT));
+ BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT);
dev_dbg(iop_chan->device->common.dev,
"%s src_cnt: %d len: %u flags: %lx\n",
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index a25f5f61e0e0..954e334e01bb 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -671,7 +671,7 @@ mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
return NULL;
- BUG_ON(unlikely(len > MV_XOR_MAX_BYTE_COUNT));
+ BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
spin_lock_bh(&mv_chan->lock);
slot_cnt = mv_chan_memcpy_slot_count(len);
@@ -710,7 +710,7 @@ mv_xor_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
return NULL;
- BUG_ON(unlikely(len > MV_XOR_MAX_BYTE_COUNT));
+ BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
spin_lock_bh(&mv_chan->lock);
slot_cnt = mv_chan_memset_slot_count(len);
@@ -744,7 +744,7 @@ mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
return NULL;
- BUG_ON(unlikely(len > MV_XOR_MAX_BYTE_COUNT));
+ BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
dev_dbg(mv_chan->device->common.dev,
"%s src_cnt: %d len: dest %x %u flags: %ld\n",
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index 8d8fef1480a9..ff5b38f9d45b 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -77,10 +77,10 @@ struct pch_dma_regs {
u32 dma_ctl0;
u32 dma_ctl1;
u32 dma_ctl2;
- u32 reserved1;
+ u32 dma_ctl3;
u32 dma_sts0;
u32 dma_sts1;
- u32 reserved2;
+ u32 dma_sts2;
u32 reserved3;
struct pch_dma_desc_regs desc[MAX_CHAN_NR];
};
@@ -130,6 +130,7 @@ struct pch_dma {
#define PCH_DMA_CTL0 0x00
#define PCH_DMA_CTL1 0x04
#define PCH_DMA_CTL2 0x08
+#define PCH_DMA_CTL3 0x0C
#define PCH_DMA_STS0 0x10
#define PCH_DMA_STS1 0x14
@@ -138,7 +139,8 @@ struct pch_dma {
#define dma_writel(pd, name, val) \
writel((val), (pd)->membase + PCH_DMA_##name)
-static inline struct pch_dma_desc *to_pd_desc(struct dma_async_tx_descriptor *txd)
+static inline
+struct pch_dma_desc *to_pd_desc(struct dma_async_tx_descriptor *txd)
{
return container_of(txd, struct pch_dma_desc, txd);
}
@@ -163,13 +165,15 @@ static inline struct device *chan2parent(struct dma_chan *chan)
return chan->dev->device.parent;
}
-static inline struct pch_dma_desc *pdc_first_active(struct pch_dma_chan *pd_chan)
+static inline
+struct pch_dma_desc *pdc_first_active(struct pch_dma_chan *pd_chan)
{
return list_first_entry(&pd_chan->active_list,
struct pch_dma_desc, desc_node);
}
-static inline struct pch_dma_desc *pdc_first_queued(struct pch_dma_chan *pd_chan)
+static inline
+struct pch_dma_desc *pdc_first_queued(struct pch_dma_chan *pd_chan)
{
return list_first_entry(&pd_chan->queue,
struct pch_dma_desc, desc_node);
@@ -199,16 +203,30 @@ static void pdc_set_dir(struct dma_chan *chan)
struct pch_dma *pd = to_pd(chan->device);
u32 val;
- val = dma_readl(pd, CTL0);
+ if (chan->chan_id < 8) {
+ val = dma_readl(pd, CTL0);
- if (pd_chan->dir == DMA_TO_DEVICE)
- val |= 0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +
- DMA_CTL0_DIR_SHIFT_BITS);
- else
- val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +
- DMA_CTL0_DIR_SHIFT_BITS));
+ if (pd_chan->dir == DMA_TO_DEVICE)
+ val |= 0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +
+ DMA_CTL0_DIR_SHIFT_BITS);
+ else
+ val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +
+ DMA_CTL0_DIR_SHIFT_BITS));
+
+ dma_writel(pd, CTL0, val);
+ } else {
+ int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */
+ val = dma_readl(pd, CTL3);
- dma_writel(pd, CTL0, val);
+ if (pd_chan->dir == DMA_TO_DEVICE)
+ val |= 0x1 << (DMA_CTL0_BITS_PER_CH * ch +
+ DMA_CTL0_DIR_SHIFT_BITS);
+ else
+ val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * ch +
+ DMA_CTL0_DIR_SHIFT_BITS));
+
+ dma_writel(pd, CTL3, val);
+ }
dev_dbg(chan2dev(chan), "pdc_set_dir: chan %d -> %x\n",
chan->chan_id, val);
@@ -219,13 +237,26 @@ static void pdc_set_mode(struct dma_chan *chan, u32 mode)
struct pch_dma *pd = to_pd(chan->device);
u32 val;
- val = dma_readl(pd, CTL0);
+ if (chan->chan_id < 8) {
+ val = dma_readl(pd, CTL0);
+
+ val &= ~(DMA_CTL0_MODE_MASK_BITS <<
+ (DMA_CTL0_BITS_PER_CH * chan->chan_id));
+ val |= mode << (DMA_CTL0_BITS_PER_CH * chan->chan_id);
- val &= ~(DMA_CTL0_MODE_MASK_BITS <<
- (DMA_CTL0_BITS_PER_CH * chan->chan_id));
- val |= mode << (DMA_CTL0_BITS_PER_CH * chan->chan_id);
+ dma_writel(pd, CTL0, val);
+ } else {
+ int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */
+
+ val = dma_readl(pd, CTL3);
+
+ val &= ~(DMA_CTL0_MODE_MASK_BITS <<
+ (DMA_CTL0_BITS_PER_CH * ch));
+ val |= mode << (DMA_CTL0_BITS_PER_CH * ch);
- dma_writel(pd, CTL0, val);
+ dma_writel(pd, CTL3, val);
+
+ }
dev_dbg(chan2dev(chan), "pdc_set_mode: chan %d -> %x\n",
chan->chan_id, val);
@@ -251,9 +282,6 @@ static bool pdc_is_idle(struct pch_dma_chan *pd_chan)
static void pdc_dostart(struct pch_dma_chan *pd_chan, struct pch_dma_desc* desc)
{
- struct pch_dma *pd = to_pd(pd_chan->chan.device);
- u32 val;
-
if (!pdc_is_idle(pd_chan)) {
dev_err(chan2dev(&pd_chan->chan),
"BUG: Attempt to start non-idle channel\n");
@@ -279,10 +307,6 @@ static void pdc_dostart(struct pch_dma_chan *pd_chan, struct pch_dma_desc* desc)
channel_writel(pd_chan, NEXT, desc->txd.phys);
pdc_set_mode(&pd_chan->chan, DMA_CTL0_SG);
}
-
- val = dma_readl(pd, CTL2);
- val |= 1 << (DMA_CTL2_START_SHIFT_BITS + pd_chan->chan.chan_id);
- dma_writel(pd, CTL2, val);
}
static void pdc_chain_complete(struct pch_dma_chan *pd_chan,
@@ -403,7 +427,7 @@ static struct pch_dma_desc *pdc_desc_get(struct pch_dma_chan *pd_chan)
{
struct pch_dma_desc *desc, *_d;
struct pch_dma_desc *ret = NULL;
- int i;
+ int i = 0;
spin_lock(&pd_chan->lock);
list_for_each_entry_safe(desc, _d, &pd_chan->free_list, desc_node) {
@@ -478,7 +502,6 @@ static int pd_alloc_chan_resources(struct dma_chan *chan)
spin_unlock_bh(&pd_chan->lock);
pdc_enable_irq(chan, 1);
- pdc_set_dir(chan);
return pd_chan->descs_allocated;
}
@@ -561,6 +584,9 @@ static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan,
else
return NULL;
+ pd_chan->dir = direction;
+ pdc_set_dir(chan);
+
for_each_sg(sgl, sg, sg_len, i) {
desc = pdc_desc_get(pd_chan);
@@ -703,6 +729,7 @@ static void pch_dma_save_regs(struct pch_dma *pd)
pd->regs.dma_ctl0 = dma_readl(pd, CTL0);
pd->regs.dma_ctl1 = dma_readl(pd, CTL1);
pd->regs.dma_ctl2 = dma_readl(pd, CTL2);
+ pd->regs.dma_ctl3 = dma_readl(pd, CTL3);
list_for_each_entry_safe(chan, _c, &pd->dma.channels, device_node) {
pd_chan = to_pd_chan(chan);
@@ -725,6 +752,7 @@ static void pch_dma_restore_regs(struct pch_dma *pd)
dma_writel(pd, CTL0, pd->regs.dma_ctl0);
dma_writel(pd, CTL1, pd->regs.dma_ctl1);
dma_writel(pd, CTL2, pd->regs.dma_ctl2);
+ dma_writel(pd, CTL3, pd->regs.dma_ctl3);
list_for_each_entry_safe(chan, _c, &pd->dma.channels, device_node) {
pd_chan = to_pd_chan(chan);
@@ -850,8 +878,6 @@ static int __devinit pch_dma_probe(struct pci_dev *pdev,
pd_chan->membase = &regs->desc[i];
- pd_chan->dir = (i % 2) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
-
spin_lock_init(&pd_chan->lock);
INIT_LIST_HEAD(&pd_chan->active_list);
@@ -929,13 +955,23 @@ static void __devexit pch_dma_remove(struct pci_dev *pdev)
#define PCI_DEVICE_ID_ML7213_DMA1_8CH 0x8026
#define PCI_DEVICE_ID_ML7213_DMA2_8CH 0x802B
#define PCI_DEVICE_ID_ML7213_DMA3_4CH 0x8034
+#define PCI_DEVICE_ID_ML7213_DMA4_12CH 0x8032
+#define PCI_DEVICE_ID_ML7223_DMA1_4CH 0x800B
+#define PCI_DEVICE_ID_ML7223_DMA2_4CH 0x800E
+#define PCI_DEVICE_ID_ML7223_DMA3_4CH 0x8017
+#define PCI_DEVICE_ID_ML7223_DMA4_4CH 0x803B
-static const struct pci_device_id pch_dma_id_table[] = {
+DEFINE_PCI_DEVICE_TABLE(pch_dma_id_table) = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_8CH), 8 },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_4CH), 4 },
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA1_8CH), 8}, /* UART Video */
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA2_8CH), 8}, /* PCMIF SPI */
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA3_4CH), 4}, /* FPGA */
+ { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA4_12CH), 12}, /* I2S */
+ { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA1_4CH), 4}, /* UART */
+ { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA2_4CH), 4}, /* Video SPI */
+ { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA3_4CH), 4}, /* Security */
+ { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA4_4CH), 4}, /* FPGA */
{ 0, },
};
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index 3b0247e74cc4..fc457a7e8832 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -2313,7 +2313,7 @@ static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_memcpy(
if (unlikely(!len))
return NULL;
- BUG_ON(unlikely(len > PPC440SPE_ADMA_DMA_MAX_BYTE_COUNT));
+ BUG_ON(len > PPC440SPE_ADMA_DMA_MAX_BYTE_COUNT);
spin_lock_bh(&ppc440spe_chan->lock);
@@ -2354,7 +2354,7 @@ static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_memset(
if (unlikely(!len))
return NULL;
- BUG_ON(unlikely(len > PPC440SPE_ADMA_DMA_MAX_BYTE_COUNT));
+ BUG_ON(len > PPC440SPE_ADMA_DMA_MAX_BYTE_COUNT);
spin_lock_bh(&ppc440spe_chan->lock);
@@ -2397,7 +2397,7 @@ static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_xor(
dma_dest, dma_src, src_cnt));
if (unlikely(!len))
return NULL;
- BUG_ON(unlikely(len > PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT));
+ BUG_ON(len > PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT);
dev_dbg(ppc440spe_chan->device->common.dev,
"ppc440spe adma%d: %s src_cnt: %d len: %u int_en: %d\n",
@@ -2887,7 +2887,7 @@ static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_pq(
ADMA_LL_DBG(prep_dma_pq_dbg(ppc440spe_chan->device->id,
dst, src, src_cnt));
BUG_ON(!len);
- BUG_ON(unlikely(len > PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT));
+ BUG_ON(len > PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT);
BUG_ON(!src_cnt);
if (src_cnt == 1 && dst[1] == src[0]) {
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 94ee15dd3aed..8f222d4db7de 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -1829,7 +1829,7 @@ d40_get_dev_addr(struct d40_chan *chan, enum dma_data_direction direction)
{
struct stedma40_platform_data *plat = chan->base->plat_data;
struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
- dma_addr_t addr;
+ dma_addr_t addr = 0;
if (chan->runtime_addr)
return chan->runtime_addr;
@@ -2962,4 +2962,4 @@ static int __init stedma40_init(void)
{
return platform_driver_probe(&d40_driver, d40_probe);
}
-arch_initcall(stedma40_init);
+subsys_initcall(stedma40_init);
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index b57ec09af891..592397629ddc 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -86,6 +86,30 @@ config GPIO_IT8761E
help
Say yes here to support GPIO functionality of IT8761E super I/O chip.
+config GPIO_EXYNOS4
+ bool "Samsung Exynos4 GPIO library support"
+ default y if CPU_EXYNOS4210
+ help
+ Say yes here to support Samsung Exynos4 series SoCs GPIO library
+
+config GPIO_PLAT_SAMSUNG
+ bool "Samsung SoCs GPIO library support"
+ default y if SAMSUNG_GPIOLIB_4BIT
+ help
+ Say yes here to support Samsung SoCs GPIO library
+
+config GPIO_S5PC100
+ bool "Samsung S5PC100 GPIO library support"
+ default y if CPU_S5PC100
+ help
+ Say yes here to support Samsung S5PC100 SoCs GPIO library
+
+config GPIO_S5PV210
+ bool "Samsung S5PV210/S5PC110 GPIO library support"
+ default y if CPU_S5PV210
+ help
+ Say yes here to support Samsung S5PV210/S5PC110 SoCs GPIO library
+
config GPIO_PL061
bool "PrimeCell PL061 GPIO support"
depends on ARM_AMBA
@@ -303,7 +327,7 @@ comment "PCI GPIO expanders:"
config GPIO_CS5535
tristate "AMD CS5535/CS5536 GPIO support"
- depends on PCI && X86 && !CS5535_GPIO
+ depends on PCI && X86 && !CS5535_GPIO && MFD_CS5535
help
The AMD CS5535 and CS5536 southbridges support 28 GPIO pins that
can be used for quite a number of things. The CS5535/6 is found on
@@ -334,13 +358,19 @@ config GPIO_LANGWELL
Say Y here to support Intel Langwell/Penwell GPIO.
config GPIO_PCH
- tristate "PCH GPIO of Intel Topcliff"
+ tristate "Intel EG20T PCH / OKI SEMICONDUCTOR ML7223 IOH GPIO"
depends on PCI && X86
help
This driver is for PCH(Platform controller Hub) GPIO of Intel Topcliff
which is an IOH(Input/Output Hub) for x86 embedded processor.
This driver can access PCH GPIO device.
+ This driver also can be used for OKI SEMICONDUCTOR IOH(Input/
+ Output Hub), ML7223.
+ ML7223 IOH is for MP(Media Phone) use.
+ ML7223 is companion chip for Intel Atom E6xx series.
+ ML7223 is completely compatible for Intel EG20T PCH.
+
config GPIO_ML_IOH
tristate "OKI SEMICONDUCTOR ML7213 IOH GPIO support"
depends on PCI
@@ -424,4 +454,11 @@ config AB8500_GPIO
depends on AB8500_CORE && BROKEN
help
Select this to enable the AB8500 IC GPIO driver
+
+config GPIO_TPS65910
+ bool "TPS65910 GPIO"
+ depends on MFD_TPS65910
+ help
+ Select this option to enable GPIO driver for the TPS65910
+ chip family.
endif
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index d92ce3a62ae5..b605f8ec6fbe 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -8,6 +8,10 @@ obj-$(CONFIG_GPIO_ADP5520) += adp5520-gpio.o
obj-$(CONFIG_GPIO_ADP5588) += adp5588-gpio.o
obj-$(CONFIG_GPIO_BASIC_MMIO_CORE) += basic_mmio_gpio.o
obj-$(CONFIG_GPIO_BASIC_MMIO) += basic_mmio_gpio.o
+obj-$(CONFIG_GPIO_EXYNOS4) += gpio-exynos4.o
+obj-$(CONFIG_GPIO_PLAT_SAMSUNG) += gpio-plat-samsung.o
+obj-$(CONFIG_GPIO_S5PC100) += gpio-s5pc100.o
+obj-$(CONFIG_GPIO_S5PV210) += gpio-s5pv210.o
obj-$(CONFIG_GPIO_LANGWELL) += langwell_gpio.o
obj-$(CONFIG_GPIO_MAX730X) += max730x.o
obj-$(CONFIG_GPIO_MAX7300) += max7300.o
@@ -16,6 +20,7 @@ obj-$(CONFIG_GPIO_MAX732X) += max732x.o
obj-$(CONFIG_GPIO_MC33880) += mc33880.o
obj-$(CONFIG_GPIO_MCP23S08) += mcp23s08.o
obj-$(CONFIG_GPIO_74X164) += 74x164.o
+obj-$(CONFIG_ARCH_OMAP) += gpio-omap.o
obj-$(CONFIG_GPIO_PCA953X) += pca953x.o
obj-$(CONFIG_GPIO_PCF857X) += pcf857x.o
obj-$(CONFIG_GPIO_PCH) += pch_gpio.o
@@ -34,9 +39,12 @@ obj-$(CONFIG_GPIO_WM831X) += wm831x-gpio.o
obj-$(CONFIG_GPIO_WM8350) += wm8350-gpiolib.o
obj-$(CONFIG_GPIO_WM8994) += wm8994-gpio.o
obj-$(CONFIG_GPIO_SCH) += sch_gpio.o
+obj-$(CONFIG_MACH_U300) += gpio-u300.o
+obj-$(CONFIG_PLAT_NOMADIK) += gpio-nomadik.o
obj-$(CONFIG_GPIO_RDC321X) += rdc321x-gpio.o
obj-$(CONFIG_GPIO_JANZ_TTL) += janz-ttl.o
obj-$(CONFIG_GPIO_SX150X) += sx150x.o
obj-$(CONFIG_GPIO_VX855) += vx855_gpio.o
obj-$(CONFIG_GPIO_ML_IOH) += ml_ioh_gpio.o
obj-$(CONFIG_AB8500_GPIO) += ab8500-gpio.o
+obj-$(CONFIG_GPIO_TPS65910) += tps65910-gpio.o
diff --git a/arch/arm/mach-exynos4/gpiolib.c b/drivers/gpio/gpio-exynos4.c
index d54ca6adb660..d54ca6adb660 100644
--- a/arch/arm/mach-exynos4/gpiolib.c
+++ b/drivers/gpio/gpio-exynos4.c
diff --git a/arch/arm/plat-nomadik/gpio.c b/drivers/gpio/gpio-nomadik.c
index 307b8131aa8c..4961ef9bc153 100644
--- a/arch/arm/plat-nomadik/gpio.c
+++ b/drivers/gpio/gpio-nomadik.c
@@ -57,6 +57,7 @@ struct nmk_gpio_chip {
u32 fwimsc;
u32 slpm;
u32 enabled;
+ u32 pull_up;
};
static struct nmk_gpio_chip *
@@ -103,16 +104,22 @@ static void __nmk_gpio_set_pull(struct nmk_gpio_chip *nmk_chip,
u32 pdis;
pdis = readl(nmk_chip->addr + NMK_GPIO_PDIS);
- if (pull == NMK_GPIO_PULL_NONE)
+ if (pull == NMK_GPIO_PULL_NONE) {
pdis |= bit;
- else
+ nmk_chip->pull_up &= ~bit;
+ } else {
pdis &= ~bit;
+ }
+
writel(pdis, nmk_chip->addr + NMK_GPIO_PDIS);
- if (pull == NMK_GPIO_PULL_UP)
+ if (pull == NMK_GPIO_PULL_UP) {
+ nmk_chip->pull_up |= bit;
writel(bit, nmk_chip->addr + NMK_GPIO_DATS);
- else if (pull == NMK_GPIO_PULL_DOWN)
+ } else if (pull == NMK_GPIO_PULL_DOWN) {
+ nmk_chip->pull_up &= ~bit;
writel(bit, nmk_chip->addr + NMK_GPIO_DATC);
+ }
}
static void __nmk_gpio_make_input(struct nmk_gpio_chip *nmk_chip,
@@ -811,20 +818,43 @@ static void nmk_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
bool pull;
u32 bit = 1 << i;
- if (!label)
- continue;
-
is_out = readl(nmk_chip->addr + NMK_GPIO_DIR) & bit;
pull = !(readl(nmk_chip->addr + NMK_GPIO_PDIS) & bit);
mode = nmk_gpio_get_mode(gpio);
seq_printf(s, " gpio-%-3d (%-20.20s) %s %s %s %s",
- gpio, label,
+ gpio, label ?: "(none)",
is_out ? "out" : "in ",
chip->get
? (chip->get(chip, i) ? "hi" : "lo")
: "? ",
(mode < 0) ? "unknown" : modes[mode],
pull ? "pull" : "none");
+
+ if (label && !is_out) {
+ int irq = gpio_to_irq(gpio);
+ struct irq_desc *desc = irq_to_desc(irq);
+
+ /* This races with request_irq(), set_irq_type(),
+ * and set_irq_wake() ... but those are "rare".
+ */
+ if (irq >= 0 && desc->action) {
+ char *trigger;
+ u32 bitmask = nmk_gpio_get_bitmask(gpio);
+
+ if (nmk_chip->edge_rising & bitmask)
+ trigger = "edge-rising";
+ else if (nmk_chip->edge_falling & bitmask)
+ trigger = "edge-falling";
+ else
+ trigger = "edge-undefined";
+
+ seq_printf(s, " irq-%d %s%s",
+ irq, trigger,
+ irqd_is_wakeup_set(&desc->irq_data)
+ ? " wakeup" : "");
+ }
+ }
+
seq_printf(s, "\n");
}
}
@@ -898,6 +928,25 @@ void nmk_gpio_wakeups_resume(void)
}
}
+/*
+ * Read the pull up/pull down status.
+ * A bit set in 'pull_up' means that pull up
+ * is selected if pull is enabled in PDIS register.
+ * Note: only pull up/down set via this driver can
+ * be detected due to HW limitations.
+ */
+void nmk_gpio_read_pull(int gpio_bank, u32 *pull_up)
+{
+ if (gpio_bank < NUM_BANKS) {
+ struct nmk_gpio_chip *chip = nmk_gpio_chips[gpio_bank];
+
+ if (!chip)
+ return;
+
+ *pull_up = chip->pull_up;
+ }
+}
+
static int __devinit nmk_gpio_probe(struct platform_device *dev)
{
struct nmk_gpio_platform_data *pdata = dev->dev.platform_data;
diff --git a/arch/arm/plat-omap/gpio.c b/drivers/gpio/gpio-omap.c
index efb869390199..6c51191da567 100644
--- a/arch/arm/plat-omap/gpio.c
+++ b/drivers/gpio/gpio-omap.c
@@ -1,6 +1,4 @@
/*
- * linux/arch/arm/plat-omap/gpio.c
- *
* Support functions for OMAP GPIO
*
* Copyright (C) 2003-2005 Nokia Corporation
@@ -30,109 +28,6 @@
#include <mach/gpio.h>
#include <asm/mach/irq.h>
-/*
- * OMAP1510 GPIO registers
- */
-#define OMAP1510_GPIO_DATA_INPUT 0x00
-#define OMAP1510_GPIO_DATA_OUTPUT 0x04
-#define OMAP1510_GPIO_DIR_CONTROL 0x08
-#define OMAP1510_GPIO_INT_CONTROL 0x0c
-#define OMAP1510_GPIO_INT_MASK 0x10
-#define OMAP1510_GPIO_INT_STATUS 0x14
-#define OMAP1510_GPIO_PIN_CONTROL 0x18
-
-#define OMAP1510_IH_GPIO_BASE 64
-
-/*
- * OMAP1610 specific GPIO registers
- */
-#define OMAP1610_GPIO_REVISION 0x0000
-#define OMAP1610_GPIO_SYSCONFIG 0x0010
-#define OMAP1610_GPIO_SYSSTATUS 0x0014
-#define OMAP1610_GPIO_IRQSTATUS1 0x0018
-#define OMAP1610_GPIO_IRQENABLE1 0x001c
-#define OMAP1610_GPIO_WAKEUPENABLE 0x0028
-#define OMAP1610_GPIO_DATAIN 0x002c
-#define OMAP1610_GPIO_DATAOUT 0x0030
-#define OMAP1610_GPIO_DIRECTION 0x0034
-#define OMAP1610_GPIO_EDGE_CTRL1 0x0038
-#define OMAP1610_GPIO_EDGE_CTRL2 0x003c
-#define OMAP1610_GPIO_CLEAR_IRQENABLE1 0x009c
-#define OMAP1610_GPIO_CLEAR_WAKEUPENA 0x00a8
-#define OMAP1610_GPIO_CLEAR_DATAOUT 0x00b0
-#define OMAP1610_GPIO_SET_IRQENABLE1 0x00dc
-#define OMAP1610_GPIO_SET_WAKEUPENA 0x00e8
-#define OMAP1610_GPIO_SET_DATAOUT 0x00f0
-
-/*
- * OMAP7XX specific GPIO registers
- */
-#define OMAP7XX_GPIO_DATA_INPUT 0x00
-#define OMAP7XX_GPIO_DATA_OUTPUT 0x04
-#define OMAP7XX_GPIO_DIR_CONTROL 0x08
-#define OMAP7XX_GPIO_INT_CONTROL 0x0c
-#define OMAP7XX_GPIO_INT_MASK 0x10
-#define OMAP7XX_GPIO_INT_STATUS 0x14
-
-/*
- * omap2+ specific GPIO registers
- */
-#define OMAP24XX_GPIO_REVISION 0x0000
-#define OMAP24XX_GPIO_IRQSTATUS1 0x0018
-#define OMAP24XX_GPIO_IRQSTATUS2 0x0028
-#define OMAP24XX_GPIO_IRQENABLE2 0x002c
-#define OMAP24XX_GPIO_IRQENABLE1 0x001c
-#define OMAP24XX_GPIO_WAKE_EN 0x0020
-#define OMAP24XX_GPIO_CTRL 0x0030
-#define OMAP24XX_GPIO_OE 0x0034
-#define OMAP24XX_GPIO_DATAIN 0x0038
-#define OMAP24XX_GPIO_DATAOUT 0x003c
-#define OMAP24XX_GPIO_LEVELDETECT0 0x0040
-#define OMAP24XX_GPIO_LEVELDETECT1 0x0044
-#define OMAP24XX_GPIO_RISINGDETECT 0x0048
-#define OMAP24XX_GPIO_FALLINGDETECT 0x004c
-#define OMAP24XX_GPIO_DEBOUNCE_EN 0x0050
-#define OMAP24XX_GPIO_DEBOUNCE_VAL 0x0054
-#define OMAP24XX_GPIO_CLEARIRQENABLE1 0x0060
-#define OMAP24XX_GPIO_SETIRQENABLE1 0x0064
-#define OMAP24XX_GPIO_CLEARWKUENA 0x0080
-#define OMAP24XX_GPIO_SETWKUENA 0x0084
-#define OMAP24XX_GPIO_CLEARDATAOUT 0x0090
-#define OMAP24XX_GPIO_SETDATAOUT 0x0094
-
-#define OMAP4_GPIO_REVISION 0x0000
-#define OMAP4_GPIO_EOI 0x0020
-#define OMAP4_GPIO_IRQSTATUSRAW0 0x0024
-#define OMAP4_GPIO_IRQSTATUSRAW1 0x0028
-#define OMAP4_GPIO_IRQSTATUS0 0x002c
-#define OMAP4_GPIO_IRQSTATUS1 0x0030
-#define OMAP4_GPIO_IRQSTATUSSET0 0x0034
-#define OMAP4_GPIO_IRQSTATUSSET1 0x0038
-#define OMAP4_GPIO_IRQSTATUSCLR0 0x003c
-#define OMAP4_GPIO_IRQSTATUSCLR1 0x0040
-#define OMAP4_GPIO_IRQWAKEN0 0x0044
-#define OMAP4_GPIO_IRQWAKEN1 0x0048
-#define OMAP4_GPIO_IRQENABLE1 0x011c
-#define OMAP4_GPIO_WAKE_EN 0x0120
-#define OMAP4_GPIO_IRQSTATUS2 0x0128
-#define OMAP4_GPIO_IRQENABLE2 0x012c
-#define OMAP4_GPIO_CTRL 0x0130
-#define OMAP4_GPIO_OE 0x0134
-#define OMAP4_GPIO_DATAIN 0x0138
-#define OMAP4_GPIO_DATAOUT 0x013c
-#define OMAP4_GPIO_LEVELDETECT0 0x0140
-#define OMAP4_GPIO_LEVELDETECT1 0x0144
-#define OMAP4_GPIO_RISINGDETECT 0x0148
-#define OMAP4_GPIO_FALLINGDETECT 0x014c
-#define OMAP4_GPIO_DEBOUNCENABLE 0x0150
-#define OMAP4_GPIO_DEBOUNCINGTIME 0x0154
-#define OMAP4_GPIO_CLEARIRQENABLE1 0x0160
-#define OMAP4_GPIO_SETIRQENABLE1 0x0164
-#define OMAP4_GPIO_CLEARWKUENA 0x0180
-#define OMAP4_GPIO_SETWKUENA 0x0184
-#define OMAP4_GPIO_CLEARDATAOUT 0x0190
-#define OMAP4_GPIO_SETDATAOUT 0x0194
-
struct gpio_bank {
unsigned long pbase;
void __iomem *base;
diff --git a/arch/arm/plat-samsung/gpiolib.c b/drivers/gpio/gpio-plat-samsung.c
index ea37c0461788..ea37c0461788 100644
--- a/arch/arm/plat-samsung/gpiolib.c
+++ b/drivers/gpio/gpio-plat-samsung.c
diff --git a/arch/arm/mach-s5pc100/gpiolib.c b/drivers/gpio/gpio-s5pc100.c
index 2842394b28b5..2842394b28b5 100644
--- a/arch/arm/mach-s5pc100/gpiolib.c
+++ b/drivers/gpio/gpio-s5pc100.c
diff --git a/arch/arm/mach-s5pv210/gpiolib.c b/drivers/gpio/gpio-s5pv210.c
index 1ba20a703e05..1ba20a703e05 100644
--- a/arch/arm/mach-s5pv210/gpiolib.c
+++ b/drivers/gpio/gpio-s5pv210.c
diff --git a/arch/arm/mach-u300/gpio.c b/drivers/gpio/gpio-u300.c
index d92790140fe5..d92790140fe5 100644
--- a/arch/arm/mach-u300/gpio.c
+++ b/drivers/gpio/gpio-u300.c
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 137a8ca67822..a971e3d043ba 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -1296,7 +1296,7 @@ EXPORT_SYMBOL_GPL(gpio_request_one);
* @array: array of the 'struct gpio'
* @num: how many GPIOs in the array
*/
-int gpio_request_array(struct gpio *array, size_t num)
+int gpio_request_array(const struct gpio *array, size_t num)
{
int i, err;
@@ -1319,7 +1319,7 @@ EXPORT_SYMBOL_GPL(gpio_request_array);
* @array: array of the 'struct gpio'
* @num: how many GPIOs in the array
*/
-void gpio_free_array(struct gpio *array, size_t num)
+void gpio_free_array(const struct gpio *array, size_t num)
{
while (num--)
gpio_free((array++)->gpio);
diff --git a/drivers/gpio/langwell_gpio.c b/drivers/gpio/langwell_gpio.c
index 1b06f67e1f69..bd6571e0097a 100644
--- a/drivers/gpio/langwell_gpio.c
+++ b/drivers/gpio/langwell_gpio.c
@@ -33,6 +33,7 @@
#include <linux/io.h>
#include <linux/gpio.h>
#include <linux/slab.h>
+#include <linux/pm_runtime.h>
/*
* Langwell chip has 64 pins and thus there are 2 32bit registers to control
@@ -63,6 +64,7 @@ struct lnw_gpio {
void *reg_base;
spinlock_t lock;
unsigned irq_base;
+ struct pci_dev *pdev;
};
static void __iomem *gpio_reg(struct gpio_chip *chip, unsigned offset,
@@ -104,11 +106,18 @@ static int lnw_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
u32 value;
unsigned long flags;
+ if (lnw->pdev)
+ pm_runtime_get(&lnw->pdev->dev);
+
spin_lock_irqsave(&lnw->lock, flags);
value = readl(gpdr);
value &= ~BIT(offset % 32);
writel(value, gpdr);
spin_unlock_irqrestore(&lnw->lock, flags);
+
+ if (lnw->pdev)
+ pm_runtime_put(&lnw->pdev->dev);
+
return 0;
}
@@ -120,11 +129,19 @@ static int lnw_gpio_direction_output(struct gpio_chip *chip,
unsigned long flags;
lnw_gpio_set(chip, offset, value);
+
+ if (lnw->pdev)
+ pm_runtime_get(&lnw->pdev->dev);
+
spin_lock_irqsave(&lnw->lock, flags);
value = readl(gpdr);
value |= BIT(offset % 32);
writel(value, gpdr);
spin_unlock_irqrestore(&lnw->lock, flags);
+
+ if (lnw->pdev)
+ pm_runtime_put(&lnw->pdev->dev);
+
return 0;
}
@@ -145,6 +162,10 @@ static int lnw_irq_type(struct irq_data *d, unsigned type)
if (gpio >= lnw->chip.ngpio)
return -EINVAL;
+
+ if (lnw->pdev)
+ pm_runtime_get(&lnw->pdev->dev);
+
spin_lock_irqsave(&lnw->lock, flags);
if (type & IRQ_TYPE_EDGE_RISING)
value = readl(grer) | BIT(gpio % 32);
@@ -159,6 +180,9 @@ static int lnw_irq_type(struct irq_data *d, unsigned type)
writel(value, gfer);
spin_unlock_irqrestore(&lnw->lock, flags);
+ if (lnw->pdev)
+ pm_runtime_put(&lnw->pdev->dev);
+
return 0;
}
@@ -211,6 +235,39 @@ static void lnw_irq_handler(unsigned irq, struct irq_desc *desc)
chip->irq_eoi(data);
}
+#ifdef CONFIG_PM
+static int lnw_gpio_runtime_resume(struct device *dev)
+{
+ return 0;
+}
+
+static int lnw_gpio_runtime_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static int lnw_gpio_runtime_idle(struct device *dev)
+{
+ int err = pm_schedule_suspend(dev, 500);
+
+ if (!err)
+ return 0;
+
+ return -EBUSY;
+}
+
+#else
+#define lnw_gpio_runtime_suspend NULL
+#define lnw_gpio_runtime_resume NULL
+#define lnw_gpio_runtime_idle NULL
+#endif
+
+static const struct dev_pm_ops lnw_gpio_pm_ops = {
+ .runtime_suspend = lnw_gpio_runtime_suspend,
+ .runtime_resume = lnw_gpio_runtime_resume,
+ .runtime_idle = lnw_gpio_runtime_idle,
+};
+
static int __devinit lnw_gpio_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
@@ -270,6 +327,7 @@ static int __devinit lnw_gpio_probe(struct pci_dev *pdev,
lnw->chip.base = gpio_base;
lnw->chip.ngpio = id->driver_data;
lnw->chip.can_sleep = 0;
+ lnw->pdev = pdev;
pci_set_drvdata(pdev, lnw);
retval = gpiochip_add(&lnw->chip);
if (retval) {
@@ -285,6 +343,10 @@ static int __devinit lnw_gpio_probe(struct pci_dev *pdev,
}
spin_lock_init(&lnw->lock);
+
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_allow(&pdev->dev);
+
goto done;
err5:
kfree(lnw);
@@ -302,6 +364,9 @@ static struct pci_driver lnw_gpio_driver = {
.name = "langwell_gpio",
.id_table = lnw_gpio_ids,
.probe = lnw_gpio_probe,
+ .driver = {
+ .pm = &lnw_gpio_pm_ops,
+ },
};
diff --git a/drivers/gpio/pca953x.c b/drivers/gpio/pca953x.c
index 78a843947d82..0451d7ac94ac 100644
--- a/drivers/gpio/pca953x.c
+++ b/drivers/gpio/pca953x.c
@@ -24,33 +24,46 @@
#include <linux/of_gpio.h>
#endif
-#define PCA953X_INPUT 0
-#define PCA953X_OUTPUT 1
-#define PCA953X_INVERT 2
-#define PCA953X_DIRECTION 3
-
-#define PCA953X_GPIOS 0x00FF
-#define PCA953X_INT 0x0100
+#define PCA953X_INPUT 0
+#define PCA953X_OUTPUT 1
+#define PCA953X_INVERT 2
+#define PCA953X_DIRECTION 3
+
+#define PCA957X_IN 0
+#define PCA957X_INVRT 1
+#define PCA957X_BKEN 2
+#define PCA957X_PUPD 3
+#define PCA957X_CFG 4
+#define PCA957X_OUT 5
+#define PCA957X_MSK 6
+#define PCA957X_INTS 7
+
+#define PCA_GPIO_MASK 0x00FF
+#define PCA_INT 0x0100
+#define PCA953X_TYPE 0x1000
+#define PCA957X_TYPE 0x2000
static const struct i2c_device_id pca953x_id[] = {
- { "pca9534", 8 | PCA953X_INT, },
- { "pca9535", 16 | PCA953X_INT, },
- { "pca9536", 4, },
- { "pca9537", 4 | PCA953X_INT, },
- { "pca9538", 8 | PCA953X_INT, },
- { "pca9539", 16 | PCA953X_INT, },
- { "pca9554", 8 | PCA953X_INT, },
- { "pca9555", 16 | PCA953X_INT, },
- { "pca9556", 8, },
- { "pca9557", 8, },
-
- { "max7310", 8, },
- { "max7312", 16 | PCA953X_INT, },
- { "max7313", 16 | PCA953X_INT, },
- { "max7315", 8 | PCA953X_INT, },
- { "pca6107", 8 | PCA953X_INT, },
- { "tca6408", 8 | PCA953X_INT, },
- { "tca6416", 16 | PCA953X_INT, },
+ { "pca9534", 8 | PCA953X_TYPE | PCA_INT, },
+ { "pca9535", 16 | PCA953X_TYPE | PCA_INT, },
+ { "pca9536", 4 | PCA953X_TYPE, },
+ { "pca9537", 4 | PCA953X_TYPE | PCA_INT, },
+ { "pca9538", 8 | PCA953X_TYPE | PCA_INT, },
+ { "pca9539", 16 | PCA953X_TYPE | PCA_INT, },
+ { "pca9554", 8 | PCA953X_TYPE | PCA_INT, },
+ { "pca9555", 16 | PCA953X_TYPE | PCA_INT, },
+ { "pca9556", 8 | PCA953X_TYPE, },
+ { "pca9557", 8 | PCA953X_TYPE, },
+ { "pca9574", 8 | PCA957X_TYPE | PCA_INT, },
+ { "pca9575", 16 | PCA957X_TYPE | PCA_INT, },
+
+ { "max7310", 8 | PCA953X_TYPE, },
+ { "max7312", 16 | PCA953X_TYPE | PCA_INT, },
+ { "max7313", 16 | PCA953X_TYPE | PCA_INT, },
+ { "max7315", 8 | PCA953X_TYPE | PCA_INT, },
+ { "pca6107", 8 | PCA953X_TYPE | PCA_INT, },
+ { "tca6408", 8 | PCA953X_TYPE | PCA_INT, },
+ { "tca6416", 16 | PCA953X_TYPE | PCA_INT, },
/* NYET: { "tca6424", 24, }, */
{ }
};
@@ -75,16 +88,32 @@ struct pca953x_chip {
struct pca953x_platform_data *dyn_pdata;
struct gpio_chip gpio_chip;
const char *const *names;
+ int chip_type;
};
static int pca953x_write_reg(struct pca953x_chip *chip, int reg, uint16_t val)
{
- int ret;
+ int ret = 0;
if (chip->gpio_chip.ngpio <= 8)
ret = i2c_smbus_write_byte_data(chip->client, reg, val);
- else
- ret = i2c_smbus_write_word_data(chip->client, reg << 1, val);
+ else {
+ switch (chip->chip_type) {
+ case PCA953X_TYPE:
+ ret = i2c_smbus_write_word_data(chip->client,
+ reg << 1, val);
+ break;
+ case PCA957X_TYPE:
+ ret = i2c_smbus_write_byte_data(chip->client, reg << 1,
+ val & 0xff);
+ if (ret < 0)
+ break;
+ ret = i2c_smbus_write_byte_data(chip->client,
+ (reg << 1) + 1,
+ (val & 0xff00) >> 8);
+ break;
+ }
+ }
if (ret < 0) {
dev_err(&chip->client->dev, "failed writing register\n");
@@ -116,13 +145,22 @@ static int pca953x_gpio_direction_input(struct gpio_chip *gc, unsigned off)
{
struct pca953x_chip *chip;
uint16_t reg_val;
- int ret;
+ int ret, offset = 0;
chip = container_of(gc, struct pca953x_chip, gpio_chip);
mutex_lock(&chip->i2c_lock);
reg_val = chip->reg_direction | (1u << off);
- ret = pca953x_write_reg(chip, PCA953X_DIRECTION, reg_val);
+
+ switch (chip->chip_type) {
+ case PCA953X_TYPE:
+ offset = PCA953X_DIRECTION;
+ break;
+ case PCA957X_TYPE:
+ offset = PCA957X_CFG;
+ break;
+ }
+ ret = pca953x_write_reg(chip, offset, reg_val);
if (ret)
goto exit;
@@ -138,7 +176,7 @@ static int pca953x_gpio_direction_output(struct gpio_chip *gc,
{
struct pca953x_chip *chip;
uint16_t reg_val;
- int ret;
+ int ret, offset = 0;
chip = container_of(gc, struct pca953x_chip, gpio_chip);
@@ -149,7 +187,15 @@ static int pca953x_gpio_direction_output(struct gpio_chip *gc,
else
reg_val = chip->reg_output & ~(1u << off);
- ret = pca953x_write_reg(chip, PCA953X_OUTPUT, reg_val);
+ switch (chip->chip_type) {
+ case PCA953X_TYPE:
+ offset = PCA953X_OUTPUT;
+ break;
+ case PCA957X_TYPE:
+ offset = PCA957X_OUT;
+ break;
+ }
+ ret = pca953x_write_reg(chip, offset, reg_val);
if (ret)
goto exit;
@@ -157,7 +203,15 @@ static int pca953x_gpio_direction_output(struct gpio_chip *gc,
/* then direction */
reg_val = chip->reg_direction & ~(1u << off);
- ret = pca953x_write_reg(chip, PCA953X_DIRECTION, reg_val);
+ switch (chip->chip_type) {
+ case PCA953X_TYPE:
+ offset = PCA953X_DIRECTION;
+ break;
+ case PCA957X_TYPE:
+ offset = PCA957X_CFG;
+ break;
+ }
+ ret = pca953x_write_reg(chip, offset, reg_val);
if (ret)
goto exit;
@@ -172,12 +226,20 @@ static int pca953x_gpio_get_value(struct gpio_chip *gc, unsigned off)
{
struct pca953x_chip *chip;
uint16_t reg_val;
- int ret;
+ int ret, offset = 0;
chip = container_of(gc, struct pca953x_chip, gpio_chip);
mutex_lock(&chip->i2c_lock);
- ret = pca953x_read_reg(chip, PCA953X_INPUT, &reg_val);
+ switch (chip->chip_type) {
+ case PCA953X_TYPE:
+ offset = PCA953X_INPUT;
+ break;
+ case PCA957X_TYPE:
+ offset = PCA957X_IN;
+ break;
+ }
+ ret = pca953x_read_reg(chip, offset, &reg_val);
mutex_unlock(&chip->i2c_lock);
if (ret < 0) {
/* NOTE: diagnostic already emitted; that's all we should
@@ -194,7 +256,7 @@ static void pca953x_gpio_set_value(struct gpio_chip *gc, unsigned off, int val)
{
struct pca953x_chip *chip;
uint16_t reg_val;
- int ret;
+ int ret, offset = 0;
chip = container_of(gc, struct pca953x_chip, gpio_chip);
@@ -204,7 +266,15 @@ static void pca953x_gpio_set_value(struct gpio_chip *gc, unsigned off, int val)
else
reg_val = chip->reg_output & ~(1u << off);
- ret = pca953x_write_reg(chip, PCA953X_OUTPUT, reg_val);
+ switch (chip->chip_type) {
+ case PCA953X_TYPE:
+ offset = PCA953X_OUTPUT;
+ break;
+ case PCA957X_TYPE:
+ offset = PCA957X_OUT;
+ break;
+ }
+ ret = pca953x_write_reg(chip, offset, reg_val);
if (ret)
goto exit;
@@ -322,9 +392,17 @@ static uint16_t pca953x_irq_pending(struct pca953x_chip *chip)
uint16_t old_stat;
uint16_t pending;
uint16_t trigger;
- int ret;
-
- ret = pca953x_read_reg(chip, PCA953X_INPUT, &cur_stat);
+ int ret, offset = 0;
+
+ switch (chip->chip_type) {
+ case PCA953X_TYPE:
+ offset = PCA953X_INPUT;
+ break;
+ case PCA957X_TYPE:
+ offset = PCA957X_IN;
+ break;
+ }
+ ret = pca953x_read_reg(chip, offset, &cur_stat);
if (ret)
return 0;
@@ -372,14 +450,21 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
{
struct i2c_client *client = chip->client;
struct pca953x_platform_data *pdata = client->dev.platform_data;
- int ret;
+ int ret, offset = 0;
if (pdata->irq_base != -1
- && (id->driver_data & PCA953X_INT)) {
+ && (id->driver_data & PCA_INT)) {
int lvl;
- ret = pca953x_read_reg(chip, PCA953X_INPUT,
- &chip->irq_stat);
+ switch (chip->chip_type) {
+ case PCA953X_TYPE:
+ offset = PCA953X_INPUT;
+ break;
+ case PCA957X_TYPE:
+ offset = PCA957X_IN;
+ break;
+ }
+ ret = pca953x_read_reg(chip, offset, &chip->irq_stat);
if (ret)
goto out_failed;
@@ -439,7 +524,7 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
struct i2c_client *client = chip->client;
struct pca953x_platform_data *pdata = client->dev.platform_data;
- if (pdata->irq_base != -1 && (id->driver_data & PCA953X_INT))
+ if (pdata->irq_base != -1 && (id->driver_data & PCA_INT))
dev_warn(&client->dev, "interrupt support not compiled in\n");
return 0;
@@ -499,12 +584,65 @@ pca953x_get_alt_pdata(struct i2c_client *client)
}
#endif
+static int __devinit device_pca953x_init(struct pca953x_chip *chip, int invert)
+{
+ int ret;
+
+ ret = pca953x_read_reg(chip, PCA953X_OUTPUT, &chip->reg_output);
+ if (ret)
+ goto out;
+
+ ret = pca953x_read_reg(chip, PCA953X_DIRECTION,
+ &chip->reg_direction);
+ if (ret)
+ goto out;
+
+ /* set platform specific polarity inversion */
+ ret = pca953x_write_reg(chip, PCA953X_INVERT, invert);
+ if (ret)
+ goto out;
+ return 0;
+out:
+ return ret;
+}
+
+static int __devinit device_pca957x_init(struct pca953x_chip *chip, int invert)
+{
+ int ret;
+ uint16_t val = 0;
+
+ /* Let every port in proper state, that could save power */
+ pca953x_write_reg(chip, PCA957X_PUPD, 0x0);
+ pca953x_write_reg(chip, PCA957X_CFG, 0xffff);
+ pca953x_write_reg(chip, PCA957X_OUT, 0x0);
+
+ ret = pca953x_read_reg(chip, PCA957X_IN, &val);
+ if (ret)
+ goto out;
+ ret = pca953x_read_reg(chip, PCA957X_OUT, &chip->reg_output);
+ if (ret)
+ goto out;
+ ret = pca953x_read_reg(chip, PCA957X_CFG, &chip->reg_direction);
+ if (ret)
+ goto out;
+
+ /* set platform specific polarity inversion */
+ pca953x_write_reg(chip, PCA957X_INVRT, invert);
+
+ /* To enable register 6, 7 to controll pull up and pull down */
+ pca953x_write_reg(chip, PCA957X_BKEN, 0x202);
+
+ return 0;
+out:
+ return ret;
+}
+
static int __devinit pca953x_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct pca953x_platform_data *pdata;
struct pca953x_chip *chip;
- int ret;
+ int ret = 0;
chip = kzalloc(sizeof(struct pca953x_chip), GFP_KERNEL);
if (chip == NULL)
@@ -531,25 +669,20 @@ static int __devinit pca953x_probe(struct i2c_client *client,
chip->gpio_start = pdata->gpio_base;
chip->names = pdata->names;
+ chip->chip_type = id->driver_data & (PCA953X_TYPE | PCA957X_TYPE);
mutex_init(&chip->i2c_lock);
/* initialize cached registers from their original values.
* we can't share this chip with another i2c master.
*/
- pca953x_setup_gpio(chip, id->driver_data & PCA953X_GPIOS);
+ pca953x_setup_gpio(chip, id->driver_data & PCA_GPIO_MASK);
- ret = pca953x_read_reg(chip, PCA953X_OUTPUT, &chip->reg_output);
- if (ret)
- goto out_failed;
-
- ret = pca953x_read_reg(chip, PCA953X_DIRECTION, &chip->reg_direction);
- if (ret)
- goto out_failed;
-
- /* set platform specific polarity inversion */
- ret = pca953x_write_reg(chip, PCA953X_INVERT, pdata->invert);
- if (ret)
+ if (chip->chip_type == PCA953X_TYPE)
+ device_pca953x_init(chip, pdata->invert);
+ else if (chip->chip_type == PCA957X_TYPE)
+ device_pca957x_init(chip, pdata->invert);
+ else
goto out_failed;
ret = pca953x_irq_setup(chip, id);
diff --git a/drivers/gpio/pch_gpio.c b/drivers/gpio/pch_gpio.c
index f970a5f3585e..36919e77c495 100644
--- a/drivers/gpio/pch_gpio.c
+++ b/drivers/gpio/pch_gpio.c
@@ -283,8 +283,10 @@ static int pch_gpio_resume(struct pci_dev *pdev)
#define pch_gpio_resume NULL
#endif
+#define PCI_VENDOR_ID_ROHM 0x10DB
static DEFINE_PCI_DEVICE_TABLE(pch_gpio_pcidev_id) = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x8803) },
+ { PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x8014) },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, pch_gpio_pcidev_id);
diff --git a/drivers/gpio/tps65910-gpio.c b/drivers/gpio/tps65910-gpio.c
new file mode 100644
index 000000000000..8d1ddfdd63eb
--- /dev/null
+++ b/drivers/gpio/tps65910-gpio.c
@@ -0,0 +1,100 @@
+/*
+ * tps65910-gpio.c -- TI TPS6591x
+ *
+ * Copyright 2010 Texas Instruments Inc.
+ *
+ * Author: Graeme Gregory <gg@slimlogic.co.uk>
+ * Author: Jorge Eduardo Candelaria jedu@slimlogic.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/mfd/tps65910.h>
+
+static int tps65910_gpio_get(struct gpio_chip *gc, unsigned offset)
+{
+ struct tps65910 *tps65910 = container_of(gc, struct tps65910, gpio);
+ uint8_t val;
+
+ tps65910->read(tps65910, TPS65910_GPIO0 + offset, 1, &val);
+
+ if (val & GPIO_STS_MASK)
+ return 1;
+
+ return 0;
+}
+
+static void tps65910_gpio_set(struct gpio_chip *gc, unsigned offset,
+ int value)
+{
+ struct tps65910 *tps65910 = container_of(gc, struct tps65910, gpio);
+
+ if (value)
+ tps65910_set_bits(tps65910, TPS65910_GPIO0 + offset,
+ GPIO_SET_MASK);
+ else
+ tps65910_clear_bits(tps65910, TPS65910_GPIO0 + offset,
+ GPIO_SET_MASK);
+}
+
+static int tps65910_gpio_output(struct gpio_chip *gc, unsigned offset,
+ int value)
+{
+ struct tps65910 *tps65910 = container_of(gc, struct tps65910, gpio);
+
+ /* Set the initial value */
+ tps65910_gpio_set(gc, 0, value);
+
+ return tps65910_set_bits(tps65910, TPS65910_GPIO0 + offset,
+ GPIO_CFG_MASK);
+}
+
+static int tps65910_gpio_input(struct gpio_chip *gc, unsigned offset)
+{
+ struct tps65910 *tps65910 = container_of(gc, struct tps65910, gpio);
+
+ return tps65910_clear_bits(tps65910, TPS65910_GPIO0 + offset,
+ GPIO_CFG_MASK);
+}
+
+void tps65910_gpio_init(struct tps65910 *tps65910, int gpio_base)
+{
+ int ret;
+
+ if (!gpio_base)
+ return;
+
+ tps65910->gpio.owner = THIS_MODULE;
+ tps65910->gpio.label = tps65910->i2c_client->name;
+ tps65910->gpio.dev = tps65910->dev;
+ tps65910->gpio.base = gpio_base;
+
+ switch(tps65910_chip_id(tps65910)) {
+ case TPS65910:
+ tps65910->gpio.ngpio = 6;
+ case TPS65911:
+ tps65910->gpio.ngpio = 9;
+ default:
+ return;
+ }
+ tps65910->gpio.can_sleep = 1;
+
+ tps65910->gpio.direction_input = tps65910_gpio_input;
+ tps65910->gpio.direction_output = tps65910_gpio_output;
+ tps65910->gpio.set = tps65910_gpio_set;
+ tps65910->gpio.get = tps65910_gpio_get;
+
+ ret = gpiochip_add(&tps65910->gpio);
+
+ if (ret)
+ dev_warn(tps65910->dev, "GPIO registration failed: %d\n", ret);
+}
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index 9577c432e77f..de3d2465fe24 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -350,6 +350,7 @@ static int get_pkg_tjmax(unsigned int cpu, struct device *dev)
static int create_name_attr(struct platform_data *pdata, struct device *dev)
{
+ sysfs_attr_init(&pdata->name_attr.attr);
pdata->name_attr.attr.name = "name";
pdata->name_attr.attr.mode = S_IRUGO;
pdata->name_attr.show = show_name;
@@ -372,6 +373,7 @@ static int create_core_attrs(struct temp_data *tdata, struct device *dev,
for (i = 0; i < MAX_ATTRS; i++) {
snprintf(tdata->attr_name[i], CORETEMP_NAME_LENGTH, names[i],
attr_no);
+ sysfs_attr_init(&tdata->sd_attrs[i].dev_attr.attr);
tdata->sd_attrs[i].dev_attr.attr.name = tdata->attr_name[i];
tdata->sd_attrs[i].dev_attr.attr.mode = S_IRUGO;
tdata->sd_attrs[i].dev_attr.show = rd_ptr[i];
@@ -422,7 +424,7 @@ static void update_ttarget(__u8 cpu_model, struct temp_data *tdata,
}
}
-static int chk_ucode_version(struct platform_device *pdev)
+static int __devinit chk_ucode_version(struct platform_device *pdev)
{
struct cpuinfo_x86 *c = &cpu_data(pdev->id);
int err;
@@ -509,8 +511,8 @@ static int create_core_data(struct platform_data *pdata,
/*
* Provide a single set of attributes for all HT siblings of a core
* to avoid duplicate sensors (the processor ID and core ID of all
- * HT siblings of a core is the same).
- * Skip if a HT sibling of this core is already online.
+ * HT siblings of a core are the same).
+ * Skip if a HT sibling of this core is already registered.
* This is not an error.
*/
if (pdata->core_data[attr_no] != NULL)
@@ -770,10 +772,10 @@ static void __cpuinit put_core_offline(unsigned int cpu)
coretemp_remove_core(pdata, &pdev->dev, indx);
/*
- * If a core is taken offline, but a HT sibling of the same core is
- * still online, register the alternate sibling. This ensures that
- * exactly one set of attributes is provided as long as at least one
- * HT sibling of a core is online.
+ * If a HT sibling of a core is taken offline, but another HT sibling
+ * of the same core is still online, register the alternate sibling.
+ * This ensures that exactly one set of attributes is provided as long
+ * as at least one HT sibling of a core is online.
*/
for_each_sibling(i, cpu) {
if (i != cpu) {
diff --git a/drivers/hwmon/pmbus_core.c b/drivers/hwmon/pmbus_core.c
index 98799bab69ce..354770ed3186 100644
--- a/drivers/hwmon/pmbus_core.c
+++ b/drivers/hwmon/pmbus_core.c
@@ -707,6 +707,7 @@ do { \
struct sensor_device_attribute *a \
= &data->_type##s[data->num_##_type##s].attribute; \
BUG_ON(data->num_attributes >= data->max_attributes); \
+ sysfs_attr_init(&a->dev_attr.attr); \
a->dev_attr.attr.name = _name; \
a->dev_attr.attr.mode = _mode; \
a->dev_attr.show = _show; \
diff --git a/drivers/media/dvb/dm1105/dm1105.c b/drivers/media/dvb/dm1105/dm1105.c
index 2d8b4044be36..b2b0c45f32a9 100644
--- a/drivers/media/dvb/dm1105/dm1105.c
+++ b/drivers/media/dvb/dm1105/dm1105.c
@@ -20,6 +20,7 @@
*/
#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -49,11 +50,12 @@
#define UNSET (-1U)
-#define DM1105_BOARD_NOAUTO UNSET
-#define DM1105_BOARD_UNKNOWN 0
-#define DM1105_BOARD_DVBWORLD_2002 1
-#define DM1105_BOARD_DVBWORLD_2004 2
-#define DM1105_BOARD_AXESS_DM05 3
+#define DM1105_BOARD_NOAUTO UNSET
+#define DM1105_BOARD_UNKNOWN 0
+#define DM1105_BOARD_DVBWORLD_2002 1
+#define DM1105_BOARD_DVBWORLD_2004 2
+#define DM1105_BOARD_AXESS_DM05 3
+#define DM1105_BOARD_UNBRANDED_I2C_ON_GPIO 4
/* ----------------------------------------------- */
/*
@@ -157,22 +159,38 @@
#define DM1105_MAX 0x04
#define DRIVER_NAME "dm1105"
+#define DM1105_I2C_GPIO_NAME "dm1105-gpio"
#define DM1105_DMA_PACKETS 47
#define DM1105_DMA_PACKET_LENGTH (128*4)
#define DM1105_DMA_BYTES (128 * 4 * DM1105_DMA_PACKETS)
+/* */
+#define GPIO08 (1 << 8)
+#define GPIO13 (1 << 13)
+#define GPIO14 (1 << 14)
+#define GPIO15 (1 << 15)
+#define GPIO16 (1 << 16)
+#define GPIO17 (1 << 17)
+#define GPIO_ALL 0x03ffff
+
/* GPIO's for LNB power control */
-#define DM1105_LNB_MASK 0x00000000
-#define DM1105_LNB_OFF 0x00020000
-#define DM1105_LNB_13V 0x00010100
-#define DM1105_LNB_18V 0x00000100
+#define DM1105_LNB_MASK (GPIO_ALL & ~(GPIO14 | GPIO13))
+#define DM1105_LNB_OFF GPIO17
+#define DM1105_LNB_13V (GPIO16 | GPIO08)
+#define DM1105_LNB_18V GPIO08
/* GPIO's for LNB power control for Axess DM05 */
-#define DM05_LNB_MASK 0x00000000
-#define DM05_LNB_OFF 0x00020000/* actually 13v */
-#define DM05_LNB_13V 0x00020000
-#define DM05_LNB_18V 0x00030000
+#define DM05_LNB_MASK (GPIO_ALL & ~(GPIO14 | GPIO13))
+#define DM05_LNB_OFF GPIO17/* actually 13v */
+#define DM05_LNB_13V GPIO17
+#define DM05_LNB_18V (GPIO17 | GPIO16)
+
+/* GPIO's for LNB power control for unbranded with I2C on GPIO */
+#define UNBR_LNB_MASK (GPIO17 | GPIO16)
+#define UNBR_LNB_OFF 0
+#define UNBR_LNB_13V GPIO17
+#define UNBR_LNB_18V (GPIO17 | GPIO16)
static unsigned int card[] = {[0 ... 3] = UNSET };
module_param_array(card, int, NULL, 0444);
@@ -187,7 +205,11 @@ static unsigned int dm1105_devcount;
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
struct dm1105_board {
- char *name;
+ char *name;
+ struct {
+ u32 mask, off, v13, v18;
+ } lnb;
+ u32 gpio_scl, gpio_sda;
};
struct dm1105_subid {
@@ -199,15 +221,50 @@ struct dm1105_subid {
static const struct dm1105_board dm1105_boards[] = {
[DM1105_BOARD_UNKNOWN] = {
.name = "UNKNOWN/GENERIC",
+ .lnb = {
+ .mask = DM1105_LNB_MASK,
+ .off = DM1105_LNB_OFF,
+ .v13 = DM1105_LNB_13V,
+ .v18 = DM1105_LNB_18V,
+ },
},
[DM1105_BOARD_DVBWORLD_2002] = {
.name = "DVBWorld PCI 2002",
+ .lnb = {
+ .mask = DM1105_LNB_MASK,
+ .off = DM1105_LNB_OFF,
+ .v13 = DM1105_LNB_13V,
+ .v18 = DM1105_LNB_18V,
+ },
},
[DM1105_BOARD_DVBWORLD_2004] = {
.name = "DVBWorld PCI 2004",
+ .lnb = {
+ .mask = DM1105_LNB_MASK,
+ .off = DM1105_LNB_OFF,
+ .v13 = DM1105_LNB_13V,
+ .v18 = DM1105_LNB_18V,
+ },
},
[DM1105_BOARD_AXESS_DM05] = {
.name = "Axess/EasyTv DM05",
+ .lnb = {
+ .mask = DM05_LNB_MASK,
+ .off = DM05_LNB_OFF,
+ .v13 = DM05_LNB_13V,
+ .v18 = DM05_LNB_18V,
+ },
+ },
+ [DM1105_BOARD_UNBRANDED_I2C_ON_GPIO] = {
+ .name = "Unbranded DM1105 with i2c on GPIOs",
+ .lnb = {
+ .mask = UNBR_LNB_MASK,
+ .off = UNBR_LNB_OFF,
+ .v13 = UNBR_LNB_13V,
+ .v18 = UNBR_LNB_18V,
+ },
+ .gpio_scl = GPIO14,
+ .gpio_sda = GPIO13,
},
};
@@ -293,6 +350,8 @@ struct dm1105_dev {
/* i2c */
struct i2c_adapter i2c_adap;
+ struct i2c_adapter i2c_bb_adap;
+ struct i2c_algo_bit_data i2c_bit;
/* irq */
struct work_struct work;
@@ -328,6 +387,103 @@ struct dm1105_dev {
#define dm_setl(reg, bit) dm_andorl((reg), (bit), (bit))
#define dm_clearl(reg, bit) dm_andorl((reg), (bit), 0)
+/* The chip has 18 GPIOs. In HOST mode GPIO's used as 15 bit address lines,
+ so we can use only 3 GPIO's from GPIO15 to GPIO17.
+ Here I don't check whether HOST is enebled as it is not implemented yet.
+ */
+static void dm1105_gpio_set(struct dm1105_dev *dev, u32 mask)
+{
+ if (mask & 0xfffc0000)
+ printk(KERN_ERR "%s: Only 18 GPIO's are allowed\n", __func__);
+
+ if (mask & 0x0003ffff)
+ dm_setl(DM1105_GPIOVAL, mask & 0x0003ffff);
+
+}
+
+static void dm1105_gpio_clear(struct dm1105_dev *dev, u32 mask)
+{
+ if (mask & 0xfffc0000)
+ printk(KERN_ERR "%s: Only 18 GPIO's are allowed\n", __func__);
+
+ if (mask & 0x0003ffff)
+ dm_clearl(DM1105_GPIOVAL, mask & 0x0003ffff);
+
+}
+
+static void dm1105_gpio_andor(struct dm1105_dev *dev, u32 mask, u32 val)
+{
+ if (mask & 0xfffc0000)
+ printk(KERN_ERR "%s: Only 18 GPIO's are allowed\n", __func__);
+
+ if (mask & 0x0003ffff)
+ dm_andorl(DM1105_GPIOVAL, mask & 0x0003ffff, val);
+
+}
+
+static u32 dm1105_gpio_get(struct dm1105_dev *dev, u32 mask)
+{
+ if (mask & 0xfffc0000)
+ printk(KERN_ERR "%s: Only 18 GPIO's are allowed\n", __func__);
+
+ if (mask & 0x0003ffff)
+ return dm_readl(DM1105_GPIOVAL) & mask & 0x0003ffff;
+
+ return 0;
+}
+
+static void dm1105_gpio_enable(struct dm1105_dev *dev, u32 mask, int asoutput)
+{
+ if (mask & 0xfffc0000)
+ printk(KERN_ERR "%s: Only 18 GPIO's are allowed\n", __func__);
+
+ if ((mask & 0x0003ffff) && asoutput)
+ dm_clearl(DM1105_GPIOCTR, mask & 0x0003ffff);
+ else if ((mask & 0x0003ffff) && !asoutput)
+ dm_setl(DM1105_GPIOCTR, mask & 0x0003ffff);
+
+}
+
+static void dm1105_setline(struct dm1105_dev *dev, u32 line, int state)
+{
+ if (state)
+ dm1105_gpio_enable(dev, line, 0);
+ else {
+ dm1105_gpio_enable(dev, line, 1);
+ dm1105_gpio_clear(dev, line);
+ }
+}
+
+static void dm1105_setsda(void *data, int state)
+{
+ struct dm1105_dev *dev = data;
+
+ dm1105_setline(dev, dm1105_boards[dev->boardnr].gpio_sda, state);
+}
+
+static void dm1105_setscl(void *data, int state)
+{
+ struct dm1105_dev *dev = data;
+
+ dm1105_setline(dev, dm1105_boards[dev->boardnr].gpio_scl, state);
+}
+
+static int dm1105_getsda(void *data)
+{
+ struct dm1105_dev *dev = data;
+
+ return dm1105_gpio_get(dev, dm1105_boards[dev->boardnr].gpio_sda)
+ ? 1 : 0;
+}
+
+static int dm1105_getscl(void *data)
+{
+ struct dm1105_dev *dev = data;
+
+ return dm1105_gpio_get(dev, dm1105_boards[dev->boardnr].gpio_scl)
+ ? 1 : 0;
+}
+
static int dm1105_i2c_xfer(struct i2c_adapter *i2c_adap,
struct i2c_msg *msgs, int num)
{
@@ -436,31 +592,20 @@ static inline struct dm1105_dev *frontend_to_dm1105_dev(struct dvb_frontend *fe)
static int dm1105_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage)
{
struct dm1105_dev *dev = frontend_to_dm1105_dev(fe);
- u32 lnb_mask, lnb_13v, lnb_18v, lnb_off;
- switch (dev->boardnr) {
- case DM1105_BOARD_AXESS_DM05:
- lnb_mask = DM05_LNB_MASK;
- lnb_off = DM05_LNB_OFF;
- lnb_13v = DM05_LNB_13V;
- lnb_18v = DM05_LNB_18V;
- break;
- case DM1105_BOARD_DVBWORLD_2002:
- case DM1105_BOARD_DVBWORLD_2004:
- default:
- lnb_mask = DM1105_LNB_MASK;
- lnb_off = DM1105_LNB_OFF;
- lnb_13v = DM1105_LNB_13V;
- lnb_18v = DM1105_LNB_18V;
- }
-
- dm_writel(DM1105_GPIOCTR, lnb_mask);
+ dm1105_gpio_enable(dev, dm1105_boards[dev->boardnr].lnb.mask, 1);
if (voltage == SEC_VOLTAGE_18)
- dm_writel(DM1105_GPIOVAL, lnb_18v);
+ dm1105_gpio_andor(dev,
+ dm1105_boards[dev->boardnr].lnb.mask,
+ dm1105_boards[dev->boardnr].lnb.v18);
else if (voltage == SEC_VOLTAGE_13)
- dm_writel(DM1105_GPIOVAL, lnb_13v);
+ dm1105_gpio_andor(dev,
+ dm1105_boards[dev->boardnr].lnb.mask,
+ dm1105_boards[dev->boardnr].lnb.v13);
else
- dm_writel(DM1105_GPIOVAL, lnb_off);
+ dm1105_gpio_andor(dev,
+ dm1105_boards[dev->boardnr].lnb.mask,
+ dm1105_boards[dev->boardnr].lnb.off);
return 0;
}
@@ -708,6 +853,38 @@ static int __devinit frontend_init(struct dm1105_dev *dev)
int ret;
switch (dev->boardnr) {
+ case DM1105_BOARD_UNBRANDED_I2C_ON_GPIO:
+ dm1105_gpio_enable(dev, GPIO15, 1);
+ dm1105_gpio_clear(dev, GPIO15);
+ msleep(100);
+ dm1105_gpio_set(dev, GPIO15);
+ msleep(200);
+ dev->fe = dvb_attach(
+ stv0299_attach, &sharp_z0194a_config,
+ &dev->i2c_bb_adap);
+ if (dev->fe) {
+ dev->fe->ops.set_voltage = dm1105_set_voltage;
+ dvb_attach(dvb_pll_attach, dev->fe, 0x60,
+ &dev->i2c_bb_adap, DVB_PLL_OPERA1);
+ break;
+ }
+
+ dev->fe = dvb_attach(
+ stv0288_attach, &earda_config,
+ &dev->i2c_bb_adap);
+ if (dev->fe) {
+ dev->fe->ops.set_voltage = dm1105_set_voltage;
+ dvb_attach(stb6000_attach, dev->fe, 0x61,
+ &dev->i2c_bb_adap);
+ break;
+ }
+
+ dev->fe = dvb_attach(
+ si21xx_attach, &serit_config,
+ &dev->i2c_bb_adap);
+ if (dev->fe)
+ dev->fe->ops.set_voltage = dm1105_set_voltage;
+ break;
case DM1105_BOARD_DVBWORLD_2004:
dev->fe = dvb_attach(
cx24116_attach, &serit_sp2633_config,
@@ -870,11 +1047,32 @@ static int __devinit dm1105_probe(struct pci_dev *pdev,
if (ret < 0)
goto err_dm1105_hw_exit;
+ i2c_set_adapdata(&dev->i2c_bb_adap, dev);
+ strcpy(dev->i2c_bb_adap.name, DM1105_I2C_GPIO_NAME);
+ dev->i2c_bb_adap.owner = THIS_MODULE;
+ dev->i2c_bb_adap.dev.parent = &pdev->dev;
+ dev->i2c_bb_adap.algo_data = &dev->i2c_bit;
+ dev->i2c_bit.data = dev;
+ dev->i2c_bit.setsda = dm1105_setsda;
+ dev->i2c_bit.setscl = dm1105_setscl;
+ dev->i2c_bit.getsda = dm1105_getsda;
+ dev->i2c_bit.getscl = dm1105_getscl;
+ dev->i2c_bit.udelay = 10;
+ dev->i2c_bit.timeout = 10;
+
+ /* Raise SCL and SDA */
+ dm1105_setsda(dev, 1);
+ dm1105_setscl(dev, 1);
+
+ ret = i2c_bit_add_bus(&dev->i2c_bb_adap);
+ if (ret < 0)
+ goto err_i2c_del_adapter;
+
/* dvb */
ret = dvb_register_adapter(&dev->dvb_adapter, DRIVER_NAME,
THIS_MODULE, &pdev->dev, adapter_nr);
if (ret < 0)
- goto err_i2c_del_adapter;
+ goto err_i2c_del_adapters;
dvb_adapter = &dev->dvb_adapter;
@@ -952,6 +1150,8 @@ err_dvb_dmx_release:
dvb_dmx_release(dvbdemux);
err_dvb_unregister_adapter:
dvb_unregister_adapter(dvb_adapter);
+err_i2c_del_adapters:
+ i2c_del_adapter(&dev->i2c_bb_adap);
err_i2c_del_adapter:
i2c_del_adapter(&dev->i2c_adap);
err_dm1105_hw_exit:
diff --git a/drivers/media/dvb/dvb-usb/lmedm04.c b/drivers/media/dvb/dvb-usb/lmedm04.c
index f36f471deae2..37b146961ae2 100644
--- a/drivers/media/dvb/dvb-usb/lmedm04.c
+++ b/drivers/media/dvb/dvb-usb/lmedm04.c
@@ -207,17 +207,6 @@ static int lme2510_stream_restart(struct dvb_usb_device *d)
rbuff, sizeof(rbuff));
return ret;
}
-static int lme2510_remote_keypress(struct dvb_usb_adapter *adap, u32 keypress)
-{
- struct dvb_usb_device *d = adap->dev;
-
- deb_info(1, "INT Key Keypress =%04x", keypress);
-
- if (keypress > 0)
- rc_keydown(d->rc_dev, keypress, 0);
-
- return 0;
-}
static int lme2510_enable_pid(struct dvb_usb_device *d, u8 index, u16 pid_out)
{
@@ -256,6 +245,7 @@ static void lme2510_int_response(struct urb *lme_urb)
struct lme2510_state *st = adap->dev->priv;
static u8 *ibuf, *rbuf;
int i = 0, offset;
+ u32 key;
switch (lme_urb->status) {
case 0:
@@ -282,10 +272,16 @@ static void lme2510_int_response(struct urb *lme_urb)
switch (ibuf[0]) {
case 0xaa:
- debug_data_snipet(1, "INT Remote data snipet in", ibuf);
- lme2510_remote_keypress(adap,
- (u32)(ibuf[2] << 24) + (ibuf[3] << 16) +
- (ibuf[4] << 8) + ibuf[5]);
+ debug_data_snipet(1, "INT Remote data snipet", ibuf);
+ if ((ibuf[4] + ibuf[5]) == 0xff) {
+ key = ibuf[5];
+ key += (ibuf[3] > 0)
+ ? (ibuf[3] ^ 0xff) << 8 : 0;
+ key += (ibuf[2] ^ 0xff) << 16;
+ deb_info(1, "INT Key =%08x", key);
+ if (adap->dev->rc_dev != NULL)
+ rc_keydown(adap->dev->rc_dev, key, 0);
+ }
break;
case 0xbb:
switch (st->tuner_config) {
@@ -691,45 +687,6 @@ static int lme2510_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
return (ret < 0) ? -ENODEV : 0;
}
-static int lme2510_int_service(struct dvb_usb_adapter *adap)
-{
- struct dvb_usb_device *d = adap->dev;
- struct rc_dev *rc;
- int ret;
-
- info("STA Configuring Remote");
-
- rc = rc_allocate_device();
- if (!rc)
- return -ENOMEM;
-
- usb_make_path(d->udev, d->rc_phys, sizeof(d->rc_phys));
- strlcat(d->rc_phys, "/ir0", sizeof(d->rc_phys));
-
- rc->input_name = "LME2510 Remote Control";
- rc->input_phys = d->rc_phys;
- rc->map_name = RC_MAP_LME2510;
- rc->driver_name = "LME 2510";
- usb_to_input_id(d->udev, &rc->input_id);
-
- ret = rc_register_device(rc);
- if (ret) {
- rc_free_device(rc);
- return ret;
- }
- d->rc_dev = rc;
-
- /* Start the Interrupt */
- ret = lme2510_int_read(adap);
- if (ret < 0) {
- rc_unregister_device(rc);
- info("INT Unable to start Interrupt Service");
- return -ENODEV;
- }
-
- return 0;
-}
-
static u8 check_sum(u8 *p, u8 len)
{
u8 sum = 0;
@@ -831,7 +788,7 @@ static int lme_firmware_switch(struct usb_device *udev, int cold)
cold_fw = !cold;
- if (udev->descriptor.idProduct == 0x1122) {
+ if (le16_to_cpu(udev->descriptor.idProduct) == 0x1122) {
switch (dvb_usb_lme2510_firmware) {
default:
dvb_usb_lme2510_firmware = TUNER_S0194;
@@ -1053,8 +1010,11 @@ static int dm04_lme2510_frontend_attach(struct dvb_usb_adapter *adap)
end: if (ret) {
- kfree(adap->fe);
- adap->fe = NULL;
+ if (adap->fe) {
+ dvb_frontend_detach(adap->fe);
+ adap->fe = NULL;
+ }
+ adap->dev->props.rc.core.rc_codes = NULL;
return -ENODEV;
}
@@ -1097,8 +1057,12 @@ static int dm04_lme2510_tuner(struct dvb_usb_adapter *adap)
return -ENODEV;
}
- /* Start the Interrupt & Remote*/
- ret = lme2510_int_service(adap);
+ /* Start the Interrupt*/
+ ret = lme2510_int_read(adap);
+ if (ret < 0) {
+ info("INT Unable to start Interrupt Service");
+ return -ENODEV;
+ }
return ret;
}
@@ -1204,6 +1168,12 @@ static struct dvb_usb_device_properties lme2510_properties = {
}
}
},
+ .rc.core = {
+ .protocol = RC_TYPE_NEC,
+ .module_name = "LME2510 Remote Control",
+ .allowed_protos = RC_TYPE_NEC,
+ .rc_codes = RC_MAP_LME2510,
+ },
.power_ctrl = lme2510_powerup,
.identify_state = lme2510_identify_state,
.i2c_algo = &lme2510_i2c_algo,
@@ -1246,6 +1216,12 @@ static struct dvb_usb_device_properties lme2510c_properties = {
}
}
},
+ .rc.core = {
+ .protocol = RC_TYPE_NEC,
+ .module_name = "LME2510 Remote Control",
+ .allowed_protos = RC_TYPE_NEC,
+ .rc_codes = RC_MAP_LME2510,
+ },
.power_ctrl = lme2510_powerup,
.identify_state = lme2510_identify_state,
.i2c_algo = &lme2510_i2c_algo,
@@ -1269,19 +1245,21 @@ static void *lme2510_exit_int(struct dvb_usb_device *d)
adap->feedcount = 0;
}
- if (st->lme_urb != NULL) {
+ if (st->usb_buffer != NULL) {
st->i2c_talk_onoff = 1;
st->signal_lock = 0;
st->signal_level = 0;
st->signal_sn = 0;
buffer = st->usb_buffer;
+ }
+
+ if (st->lme_urb != NULL) {
usb_kill_urb(st->lme_urb);
usb_free_coherent(d->udev, 5000, st->buffer,
st->lme_urb->transfer_dma);
info("Interrupt Service Stopped");
- rc_unregister_device(d->rc_dev);
- info("Remote Stopped");
}
+
return buffer;
}
@@ -1293,7 +1271,8 @@ static void lme2510_exit(struct usb_interface *intf)
if (d != NULL) {
usb_buffer = lme2510_exit_int(d);
dvb_usb_device_exit(intf);
- kfree(usb_buffer);
+ if (usb_buffer != NULL)
+ kfree(usb_buffer);
}
}
@@ -1327,5 +1306,5 @@ module_exit(lme2510_module_exit);
MODULE_AUTHOR("Malcolm Priestley <tvboxspy@gmail.com>");
MODULE_DESCRIPTION("LME2510(C) DVB-S USB2.0");
-MODULE_VERSION("1.86");
+MODULE_VERSION("1.88");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/frontends/stb0899_algo.c b/drivers/media/dvb/frontends/stb0899_algo.c
index 2da55ec20392..d70eee00f33a 100644
--- a/drivers/media/dvb/frontends/stb0899_algo.c
+++ b/drivers/media/dvb/frontends/stb0899_algo.c
@@ -23,7 +23,7 @@
#include "stb0899_priv.h"
#include "stb0899_reg.h"
-inline u32 stb0899_do_div(u64 n, u32 d)
+static inline u32 stb0899_do_div(u64 n, u32 d)
{
/* wrap do_div() for ease of use */
diff --git a/drivers/media/dvb/frontends/tda8261.c b/drivers/media/dvb/frontends/tda8261.c
index 1742056a34e8..53c7d8f1df28 100644
--- a/drivers/media/dvb/frontends/tda8261.c
+++ b/drivers/media/dvb/frontends/tda8261.c
@@ -224,7 +224,6 @@ exit:
}
EXPORT_SYMBOL(tda8261_attach);
-MODULE_PARM_DESC(verbose, "Set verbosity level");
MODULE_AUTHOR("Manu Abraham");
MODULE_DESCRIPTION("TDA8261 8PSK/QPSK Tuner");
diff --git a/drivers/media/radio/radio-wl1273.c b/drivers/media/radio/radio-wl1273.c
index 46cacf845049..459f7272d326 100644
--- a/drivers/media/radio/radio-wl1273.c
+++ b/drivers/media/radio/radio-wl1273.c
@@ -1382,7 +1382,7 @@ static int wl1273_fm_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
switch (ctrl->id) {
case V4L2_CID_TUNE_ANTENNA_CAPACITOR:
- ctrl->val = wl1273_fm_get_tx_ctune(radio);
+ ctrl->cur.val = wl1273_fm_get_tx_ctune(radio);
break;
default:
diff --git a/drivers/media/radio/wl128x/fmdrv_v4l2.c b/drivers/media/radio/wl128x/fmdrv_v4l2.c
index d50e5ac75ab6..87010724f914 100644
--- a/drivers/media/radio/wl128x/fmdrv_v4l2.c
+++ b/drivers/media/radio/wl128x/fmdrv_v4l2.c
@@ -191,7 +191,7 @@ static int fm_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
switch (ctrl->id) {
case V4L2_CID_TUNE_ANTENNA_CAPACITOR:
- ctrl->val = fm_tx_get_tune_cap_val(fmdev);
+ ctrl->cur.val = fm_tx_get_tune_cap_val(fmdev);
break;
default:
fmwarn("%s: Unknown IOCTL: %d\n", __func__, ctrl->id);
diff --git a/drivers/media/rc/Kconfig b/drivers/media/rc/Kconfig
index 154c337f00fd..7d4bbc226d06 100644
--- a/drivers/media/rc/Kconfig
+++ b/drivers/media/rc/Kconfig
@@ -148,6 +148,18 @@ config IR_ITE_CIR
To compile this driver as a module, choose M here: the
module will be called ite-cir.
+config IR_FINTEK
+ tristate "Fintek Consumer Infrared Transceiver"
+ depends on PNP
+ depends on RC_CORE
+ ---help---
+ Say Y here to enable support for integrated infrared receiver
+ /transciever made by Fintek. This chip is found on assorted
+ Jetway motherboards (and of course, possibly others).
+
+ To compile this driver as a module, choose M here: the
+ module will be called fintek-cir.
+
config IR_NUVOTON
tristate "Nuvoton w836x7hg Consumer Infrared Transceiver"
depends on PNP
diff --git a/drivers/media/rc/Makefile b/drivers/media/rc/Makefile
index 1f90a219a162..52830e5f4eaa 100644
--- a/drivers/media/rc/Makefile
+++ b/drivers/media/rc/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_IR_LIRC_CODEC) += ir-lirc-codec.o
obj-$(CONFIG_IR_IMON) += imon.o
obj-$(CONFIG_IR_ITE_CIR) += ite-cir.o
obj-$(CONFIG_IR_MCEUSB) += mceusb.o
+obj-$(CONFIG_IR_FINTEK) += fintek-cir.o
obj-$(CONFIG_IR_NUVOTON) += nuvoton-cir.o
obj-$(CONFIG_IR_ENE) += ene_ir.o
obj-$(CONFIG_IR_REDRAT3) += redrat3.o
diff --git a/drivers/media/rc/fintek-cir.c b/drivers/media/rc/fintek-cir.c
new file mode 100644
index 000000000000..8fa539dde1b4
--- /dev/null
+++ b/drivers/media/rc/fintek-cir.c
@@ -0,0 +1,684 @@
+/*
+ * Driver for Feature Integration Technology Inc. (aka Fintek) LPC CIR
+ *
+ * Copyright (C) 2011 Jarod Wilson <jarod@redhat.com>
+ *
+ * Special thanks to Fintek for providing hardware and spec sheets.
+ * This driver is based upon the nuvoton, ite and ene drivers for
+ * similar hardware.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pnp.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <media/rc-core.h>
+#include <linux/pci_ids.h>
+
+#include "fintek-cir.h"
+
+/* write val to config reg */
+static inline void fintek_cr_write(struct fintek_dev *fintek, u8 val, u8 reg)
+{
+ fit_dbg("%s: reg 0x%02x, val 0x%02x (ip/dp: %02x/%02x)",
+ __func__, reg, val, fintek->cr_ip, fintek->cr_dp);
+ outb(reg, fintek->cr_ip);
+ outb(val, fintek->cr_dp);
+}
+
+/* read val from config reg */
+static inline u8 fintek_cr_read(struct fintek_dev *fintek, u8 reg)
+{
+ u8 val;
+
+ outb(reg, fintek->cr_ip);
+ val = inb(fintek->cr_dp);
+
+ fit_dbg("%s: reg 0x%02x, val 0x%02x (ip/dp: %02x/%02x)",
+ __func__, reg, val, fintek->cr_ip, fintek->cr_dp);
+ return val;
+}
+
+/* update config register bit without changing other bits */
+static inline void fintek_set_reg_bit(struct fintek_dev *fintek, u8 val, u8 reg)
+{
+ u8 tmp = fintek_cr_read(fintek, reg) | val;
+ fintek_cr_write(fintek, tmp, reg);
+}
+
+/* clear config register bit without changing other bits */
+static inline void fintek_clear_reg_bit(struct fintek_dev *fintek, u8 val, u8 reg)
+{
+ u8 tmp = fintek_cr_read(fintek, reg) & ~val;
+ fintek_cr_write(fintek, tmp, reg);
+}
+
+/* enter config mode */
+static inline void fintek_config_mode_enable(struct fintek_dev *fintek)
+{
+ /* Enabling Config Mode explicitly requires writing 2x */
+ outb(CONFIG_REG_ENABLE, fintek->cr_ip);
+ outb(CONFIG_REG_ENABLE, fintek->cr_ip);
+}
+
+/* exit config mode */
+static inline void fintek_config_mode_disable(struct fintek_dev *fintek)
+{
+ outb(CONFIG_REG_DISABLE, fintek->cr_ip);
+}
+
+/*
+ * When you want to address a specific logical device, write its logical
+ * device number to GCR_LOGICAL_DEV_NO
+ */
+static inline void fintek_select_logical_dev(struct fintek_dev *fintek, u8 ldev)
+{
+ fintek_cr_write(fintek, ldev, GCR_LOGICAL_DEV_NO);
+}
+
+/* write val to cir config register */
+static inline void fintek_cir_reg_write(struct fintek_dev *fintek, u8 val, u8 offset)
+{
+ outb(val, fintek->cir_addr + offset);
+}
+
+/* read val from cir config register */
+static u8 fintek_cir_reg_read(struct fintek_dev *fintek, u8 offset)
+{
+ u8 val;
+
+ val = inb(fintek->cir_addr + offset);
+
+ return val;
+}
+
+#define pr_reg(text, ...) \
+ printk(KERN_INFO KBUILD_MODNAME ": " text, ## __VA_ARGS__)
+
+/* dump current cir register contents */
+static void cir_dump_regs(struct fintek_dev *fintek)
+{
+ fintek_config_mode_enable(fintek);
+ fintek_select_logical_dev(fintek, LOGICAL_DEV_CIR);
+
+ pr_reg("%s: Dump CIR logical device registers:\n", FINTEK_DRIVER_NAME);
+ pr_reg(" * CR CIR BASE ADDR: 0x%x\n",
+ (fintek_cr_read(fintek, CIR_CR_BASE_ADDR_HI) << 8) |
+ fintek_cr_read(fintek, CIR_CR_BASE_ADDR_LO));
+ pr_reg(" * CR CIR IRQ NUM: 0x%x\n",
+ fintek_cr_read(fintek, CIR_CR_IRQ_SEL));
+
+ fintek_config_mode_disable(fintek);
+
+ pr_reg("%s: Dump CIR registers:\n", FINTEK_DRIVER_NAME);
+ pr_reg(" * STATUS: 0x%x\n", fintek_cir_reg_read(fintek, CIR_STATUS));
+ pr_reg(" * CONTROL: 0x%x\n", fintek_cir_reg_read(fintek, CIR_CONTROL));
+ pr_reg(" * RX_DATA: 0x%x\n", fintek_cir_reg_read(fintek, CIR_RX_DATA));
+ pr_reg(" * TX_CONTROL: 0x%x\n", fintek_cir_reg_read(fintek, CIR_TX_CONTROL));
+ pr_reg(" * TX_DATA: 0x%x\n", fintek_cir_reg_read(fintek, CIR_TX_DATA));
+}
+
+/* detect hardware features */
+static int fintek_hw_detect(struct fintek_dev *fintek)
+{
+ unsigned long flags;
+ u8 chip_major, chip_minor;
+ u8 vendor_major, vendor_minor;
+ u8 portsel, ir_class;
+ u16 vendor;
+ int ret = 0;
+
+ fintek_config_mode_enable(fintek);
+
+ /* Check if we're using config port 0x4e or 0x2e */
+ portsel = fintek_cr_read(fintek, GCR_CONFIG_PORT_SEL);
+ if (portsel == 0xff) {
+ fit_pr(KERN_INFO, "first portsel read was bunk, trying alt");
+ fintek_config_mode_disable(fintek);
+ fintek->cr_ip = CR_INDEX_PORT2;
+ fintek->cr_dp = CR_DATA_PORT2;
+ fintek_config_mode_enable(fintek);
+ portsel = fintek_cr_read(fintek, GCR_CONFIG_PORT_SEL);
+ }
+ fit_dbg("portsel reg: 0x%02x", portsel);
+
+ ir_class = fintek_cir_reg_read(fintek, CIR_CR_CLASS);
+ fit_dbg("ir_class reg: 0x%02x", ir_class);
+
+ switch (ir_class) {
+ case CLASS_RX_2TX:
+ case CLASS_RX_1TX:
+ fintek->hw_tx_capable = true;
+ break;
+ case CLASS_RX_ONLY:
+ default:
+ fintek->hw_tx_capable = false;
+ break;
+ }
+
+ chip_major = fintek_cr_read(fintek, GCR_CHIP_ID_HI);
+ chip_minor = fintek_cr_read(fintek, GCR_CHIP_ID_LO);
+
+ vendor_major = fintek_cr_read(fintek, GCR_VENDOR_ID_HI);
+ vendor_minor = fintek_cr_read(fintek, GCR_VENDOR_ID_LO);
+ vendor = vendor_major << 8 | vendor_minor;
+
+ if (vendor != VENDOR_ID_FINTEK)
+ fit_pr(KERN_WARNING, "Unknown vendor ID: 0x%04x", vendor);
+ else
+ fit_dbg("Read Fintek vendor ID from chip");
+
+ fintek_config_mode_disable(fintek);
+
+ spin_lock_irqsave(&fintek->fintek_lock, flags);
+ fintek->chip_major = chip_major;
+ fintek->chip_minor = chip_minor;
+ fintek->chip_vendor = vendor;
+ spin_unlock_irqrestore(&fintek->fintek_lock, flags);
+
+ return ret;
+}
+
+static void fintek_cir_ldev_init(struct fintek_dev *fintek)
+{
+ /* Select CIR logical device and enable */
+ fintek_select_logical_dev(fintek, LOGICAL_DEV_CIR);
+ fintek_cr_write(fintek, LOGICAL_DEV_ENABLE, CIR_CR_DEV_EN);
+
+ /* Write allocated CIR address and IRQ information to hardware */
+ fintek_cr_write(fintek, fintek->cir_addr >> 8, CIR_CR_BASE_ADDR_HI);
+ fintek_cr_write(fintek, fintek->cir_addr & 0xff, CIR_CR_BASE_ADDR_LO);
+
+ fintek_cr_write(fintek, fintek->cir_irq, CIR_CR_IRQ_SEL);
+
+ fit_dbg("CIR initialized, base io address: 0x%lx, irq: %d (len: %d)",
+ fintek->cir_addr, fintek->cir_irq, fintek->cir_port_len);
+}
+
+/* enable CIR interrupts */
+static void fintek_enable_cir_irq(struct fintek_dev *fintek)
+{
+ fintek_cir_reg_write(fintek, CIR_STATUS_IRQ_EN, CIR_STATUS);
+}
+
+static void fintek_cir_regs_init(struct fintek_dev *fintek)
+{
+ /* clear any and all stray interrupts */
+ fintek_cir_reg_write(fintek, CIR_STATUS_IRQ_MASK, CIR_STATUS);
+
+ /* and finally, enable interrupts */
+ fintek_enable_cir_irq(fintek);
+}
+
+static void fintek_enable_wake(struct fintek_dev *fintek)
+{
+ fintek_config_mode_enable(fintek);
+ fintek_select_logical_dev(fintek, LOGICAL_DEV_ACPI);
+
+ /* Allow CIR PME's to wake system */
+ fintek_set_reg_bit(fintek, ACPI_WAKE_EN_CIR_BIT, LDEV_ACPI_WAKE_EN_REG);
+ /* Enable CIR PME's */
+ fintek_set_reg_bit(fintek, ACPI_PME_CIR_BIT, LDEV_ACPI_PME_EN_REG);
+ /* Clear CIR PME status register */
+ fintek_set_reg_bit(fintek, ACPI_PME_CIR_BIT, LDEV_ACPI_PME_CLR_REG);
+ /* Save state */
+ fintek_set_reg_bit(fintek, ACPI_STATE_CIR_BIT, LDEV_ACPI_STATE_REG);
+
+ fintek_config_mode_disable(fintek);
+}
+
+static int fintek_cmdsize(u8 cmd, u8 subcmd)
+{
+ int datasize = 0;
+
+ switch (cmd) {
+ case BUF_COMMAND_NULL:
+ if (subcmd == BUF_HW_CMD_HEADER)
+ datasize = 1;
+ break;
+ case BUF_HW_CMD_HEADER:
+ if (subcmd == BUF_CMD_G_REVISION)
+ datasize = 2;
+ break;
+ case BUF_COMMAND_HEADER:
+ switch (subcmd) {
+ case BUF_CMD_S_CARRIER:
+ case BUF_CMD_S_TIMEOUT:
+ case BUF_RSP_PULSE_COUNT:
+ datasize = 2;
+ break;
+ case BUF_CMD_SIG_END:
+ case BUF_CMD_S_TXMASK:
+ case BUF_CMD_S_RXSENSOR:
+ datasize = 1;
+ break;
+ }
+ }
+
+ return datasize;
+}
+
+/* process ir data stored in driver buffer */
+static void fintek_process_rx_ir_data(struct fintek_dev *fintek)
+{
+ DEFINE_IR_RAW_EVENT(rawir);
+ u8 sample;
+ int i;
+
+ for (i = 0; i < fintek->pkts; i++) {
+ sample = fintek->buf[i];
+ switch (fintek->parser_state) {
+ case CMD_HEADER:
+ fintek->cmd = sample;
+ if ((fintek->cmd == BUF_COMMAND_HEADER) ||
+ ((fintek->cmd & BUF_COMMAND_MASK) !=
+ BUF_PULSE_BIT)) {
+ fintek->parser_state = SUBCMD;
+ continue;
+ }
+ fintek->rem = (fintek->cmd & BUF_LEN_MASK);
+ fit_dbg("%s: rem: 0x%02x", __func__, fintek->rem);
+ if (fintek->rem)
+ fintek->parser_state = PARSE_IRDATA;
+ else
+ ir_raw_event_reset(fintek->rdev);
+ break;
+ case SUBCMD:
+ fintek->rem = fintek_cmdsize(fintek->cmd, sample);
+ fintek->parser_state = CMD_DATA;
+ break;
+ case CMD_DATA:
+ fintek->rem--;
+ break;
+ case PARSE_IRDATA:
+ fintek->rem--;
+ init_ir_raw_event(&rawir);
+ rawir.pulse = ((sample & BUF_PULSE_BIT) != 0);
+ rawir.duration = US_TO_NS((sample & BUF_SAMPLE_MASK)
+ * CIR_SAMPLE_PERIOD);
+
+ fit_dbg("Storing %s with duration %d",
+ rawir.pulse ? "pulse" : "space",
+ rawir.duration);
+ ir_raw_event_store_with_filter(fintek->rdev, &rawir);
+ break;
+ }
+
+ if ((fintek->parser_state != CMD_HEADER) && !fintek->rem)
+ fintek->parser_state = CMD_HEADER;
+ }
+
+ fintek->pkts = 0;
+
+ fit_dbg("Calling ir_raw_event_handle");
+ ir_raw_event_handle(fintek->rdev);
+}
+
+/* copy data from hardware rx register into driver buffer */
+static void fintek_get_rx_ir_data(struct fintek_dev *fintek, u8 rx_irqs)
+{
+ unsigned long flags;
+ u8 sample, status;
+
+ spin_lock_irqsave(&fintek->fintek_lock, flags);
+
+ /*
+ * We must read data from CIR_RX_DATA until the hardware IR buffer
+ * is empty and clears the RX_TIMEOUT and/or RX_RECEIVE flags in
+ * the CIR_STATUS register
+ */
+ do {
+ sample = fintek_cir_reg_read(fintek, CIR_RX_DATA);
+ fit_dbg("%s: sample: 0x%02x", __func__, sample);
+
+ fintek->buf[fintek->pkts] = sample;
+ fintek->pkts++;
+
+ status = fintek_cir_reg_read(fintek, CIR_STATUS);
+ if (!(status & CIR_STATUS_IRQ_EN))
+ break;
+ } while (status & rx_irqs);
+
+ fintek_process_rx_ir_data(fintek);
+
+ spin_unlock_irqrestore(&fintek->fintek_lock, flags);
+}
+
+static void fintek_cir_log_irqs(u8 status)
+{
+ fit_pr(KERN_INFO, "IRQ 0x%02x:%s%s%s%s%s", status,
+ status & CIR_STATUS_IRQ_EN ? " IRQEN" : "",
+ status & CIR_STATUS_TX_FINISH ? " TXF" : "",
+ status & CIR_STATUS_TX_UNDERRUN ? " TXU" : "",
+ status & CIR_STATUS_RX_TIMEOUT ? " RXTO" : "",
+ status & CIR_STATUS_RX_RECEIVE ? " RXOK" : "");
+}
+
+/* interrupt service routine for incoming and outgoing CIR data */
+static irqreturn_t fintek_cir_isr(int irq, void *data)
+{
+ struct fintek_dev *fintek = data;
+ u8 status, rx_irqs;
+
+ fit_dbg_verbose("%s firing", __func__);
+
+ fintek_config_mode_enable(fintek);
+ fintek_select_logical_dev(fintek, LOGICAL_DEV_CIR);
+ fintek_config_mode_disable(fintek);
+
+ /*
+ * Get IR Status register contents. Write 1 to ack/clear
+ *
+ * bit: reg name - description
+ * 3: TX_FINISH - TX is finished
+ * 2: TX_UNDERRUN - TX underrun
+ * 1: RX_TIMEOUT - RX data timeout
+ * 0: RX_RECEIVE - RX data received
+ */
+ status = fintek_cir_reg_read(fintek, CIR_STATUS);
+ if (!(status & CIR_STATUS_IRQ_MASK) || status == 0xff) {
+ fit_dbg_verbose("%s exiting, IRSTS 0x%02x", __func__, status);
+ fintek_cir_reg_write(fintek, CIR_STATUS_IRQ_MASK, CIR_STATUS);
+ return IRQ_RETVAL(IRQ_NONE);
+ }
+
+ if (debug)
+ fintek_cir_log_irqs(status);
+
+ rx_irqs = status & (CIR_STATUS_RX_RECEIVE | CIR_STATUS_RX_TIMEOUT);
+ if (rx_irqs)
+ fintek_get_rx_ir_data(fintek, rx_irqs);
+
+ /* ack/clear all irq flags we've got */
+ fintek_cir_reg_write(fintek, status, CIR_STATUS);
+
+ fit_dbg_verbose("%s done", __func__);
+ return IRQ_RETVAL(IRQ_HANDLED);
+}
+
+static void fintek_enable_cir(struct fintek_dev *fintek)
+{
+ /* set IRQ enabled */
+ fintek_cir_reg_write(fintek, CIR_STATUS_IRQ_EN, CIR_STATUS);
+
+ fintek_config_mode_enable(fintek);
+
+ /* enable the CIR logical device */
+ fintek_select_logical_dev(fintek, LOGICAL_DEV_CIR);
+ fintek_cr_write(fintek, LOGICAL_DEV_ENABLE, CIR_CR_DEV_EN);
+
+ fintek_config_mode_disable(fintek);
+
+ /* clear all pending interrupts */
+ fintek_cir_reg_write(fintek, CIR_STATUS_IRQ_MASK, CIR_STATUS);
+
+ /* enable interrupts */
+ fintek_enable_cir_irq(fintek);
+}
+
+static void fintek_disable_cir(struct fintek_dev *fintek)
+{
+ fintek_config_mode_enable(fintek);
+
+ /* disable the CIR logical device */
+ fintek_select_logical_dev(fintek, LOGICAL_DEV_CIR);
+ fintek_cr_write(fintek, LOGICAL_DEV_DISABLE, CIR_CR_DEV_EN);
+
+ fintek_config_mode_disable(fintek);
+}
+
+static int fintek_open(struct rc_dev *dev)
+{
+ struct fintek_dev *fintek = dev->priv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fintek->fintek_lock, flags);
+ fintek_enable_cir(fintek);
+ spin_unlock_irqrestore(&fintek->fintek_lock, flags);
+
+ return 0;
+}
+
+static void fintek_close(struct rc_dev *dev)
+{
+ struct fintek_dev *fintek = dev->priv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fintek->fintek_lock, flags);
+ fintek_disable_cir(fintek);
+ spin_unlock_irqrestore(&fintek->fintek_lock, flags);
+}
+
+/* Allocate memory, probe hardware, and initialize everything */
+static int fintek_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
+{
+ struct fintek_dev *fintek;
+ struct rc_dev *rdev;
+ int ret = -ENOMEM;
+
+ fintek = kzalloc(sizeof(struct fintek_dev), GFP_KERNEL);
+ if (!fintek)
+ return ret;
+
+ /* input device for IR remote (and tx) */
+ rdev = rc_allocate_device();
+ if (!rdev)
+ goto failure;
+
+ ret = -ENODEV;
+ /* validate pnp resources */
+ if (!pnp_port_valid(pdev, 0)) {
+ dev_err(&pdev->dev, "IR PNP Port not valid!\n");
+ goto failure;
+ }
+
+ if (!pnp_irq_valid(pdev, 0)) {
+ dev_err(&pdev->dev, "IR PNP IRQ not valid!\n");
+ goto failure;
+ }
+
+ fintek->cir_addr = pnp_port_start(pdev, 0);
+ fintek->cir_irq = pnp_irq(pdev, 0);
+ fintek->cir_port_len = pnp_port_len(pdev, 0);
+
+ fintek->cr_ip = CR_INDEX_PORT;
+ fintek->cr_dp = CR_DATA_PORT;
+
+ spin_lock_init(&fintek->fintek_lock);
+
+ ret = -EBUSY;
+ /* now claim resources */
+ if (!request_region(fintek->cir_addr,
+ fintek->cir_port_len, FINTEK_DRIVER_NAME))
+ goto failure;
+
+ if (request_irq(fintek->cir_irq, fintek_cir_isr, IRQF_SHARED,
+ FINTEK_DRIVER_NAME, (void *)fintek))
+ goto failure;
+
+ pnp_set_drvdata(pdev, fintek);
+ fintek->pdev = pdev;
+
+ ret = fintek_hw_detect(fintek);
+ if (ret)
+ goto failure;
+
+ /* Initialize CIR & CIR Wake Logical Devices */
+ fintek_config_mode_enable(fintek);
+ fintek_cir_ldev_init(fintek);
+ fintek_config_mode_disable(fintek);
+
+ /* Initialize CIR & CIR Wake Config Registers */
+ fintek_cir_regs_init(fintek);
+
+ /* Set up the rc device */
+ rdev->priv = fintek;
+ rdev->driver_type = RC_DRIVER_IR_RAW;
+ rdev->allowed_protos = RC_TYPE_ALL;
+ rdev->open = fintek_open;
+ rdev->close = fintek_close;
+ rdev->input_name = FINTEK_DESCRIPTION;
+ rdev->input_phys = "fintek/cir0";
+ rdev->input_id.bustype = BUS_HOST;
+ rdev->input_id.vendor = VENDOR_ID_FINTEK;
+ rdev->input_id.product = fintek->chip_major;
+ rdev->input_id.version = fintek->chip_minor;
+ rdev->dev.parent = &pdev->dev;
+ rdev->driver_name = FINTEK_DRIVER_NAME;
+ rdev->map_name = RC_MAP_RC6_MCE;
+ rdev->timeout = US_TO_NS(1000);
+ /* rx resolution is hardwired to 50us atm, 1, 25, 100 also possible */
+ rdev->rx_resolution = US_TO_NS(CIR_SAMPLE_PERIOD);
+
+ ret = rc_register_device(rdev);
+ if (ret)
+ goto failure;
+
+ device_init_wakeup(&pdev->dev, true);
+ fintek->rdev = rdev;
+ fit_pr(KERN_NOTICE, "driver has been successfully loaded\n");
+ if (debug)
+ cir_dump_regs(fintek);
+
+ return 0;
+
+failure:
+ if (fintek->cir_irq)
+ free_irq(fintek->cir_irq, fintek);
+ if (fintek->cir_addr)
+ release_region(fintek->cir_addr, fintek->cir_port_len);
+
+ rc_free_device(rdev);
+ kfree(fintek);
+
+ return ret;
+}
+
+static void __devexit fintek_remove(struct pnp_dev *pdev)
+{
+ struct fintek_dev *fintek = pnp_get_drvdata(pdev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&fintek->fintek_lock, flags);
+ /* disable CIR */
+ fintek_disable_cir(fintek);
+ fintek_cir_reg_write(fintek, CIR_STATUS_IRQ_MASK, CIR_STATUS);
+ /* enable CIR Wake (for IR power-on) */
+ fintek_enable_wake(fintek);
+ spin_unlock_irqrestore(&fintek->fintek_lock, flags);
+
+ /* free resources */
+ free_irq(fintek->cir_irq, fintek);
+ release_region(fintek->cir_addr, fintek->cir_port_len);
+
+ rc_unregister_device(fintek->rdev);
+
+ kfree(fintek);
+}
+
+static int fintek_suspend(struct pnp_dev *pdev, pm_message_t state)
+{
+ struct fintek_dev *fintek = pnp_get_drvdata(pdev);
+
+ fit_dbg("%s called", __func__);
+
+ /* disable all CIR interrupts */
+ fintek_cir_reg_write(fintek, CIR_STATUS_IRQ_MASK, CIR_STATUS);
+
+ fintek_config_mode_enable(fintek);
+
+ /* disable cir logical dev */
+ fintek_select_logical_dev(fintek, LOGICAL_DEV_CIR);
+ fintek_cr_write(fintek, LOGICAL_DEV_DISABLE, CIR_CR_DEV_EN);
+
+ fintek_config_mode_disable(fintek);
+
+ /* make sure wake is enabled */
+ fintek_enable_wake(fintek);
+
+ return 0;
+}
+
+static int fintek_resume(struct pnp_dev *pdev)
+{
+ int ret = 0;
+ struct fintek_dev *fintek = pnp_get_drvdata(pdev);
+
+ fit_dbg("%s called", __func__);
+
+ /* open interrupt */
+ fintek_enable_cir_irq(fintek);
+
+ /* Enable CIR logical device */
+ fintek_config_mode_enable(fintek);
+ fintek_select_logical_dev(fintek, LOGICAL_DEV_CIR);
+ fintek_cr_write(fintek, LOGICAL_DEV_ENABLE, CIR_CR_DEV_EN);
+
+ fintek_config_mode_disable(fintek);
+
+ fintek_cir_regs_init(fintek);
+
+ return ret;
+}
+
+static void fintek_shutdown(struct pnp_dev *pdev)
+{
+ struct fintek_dev *fintek = pnp_get_drvdata(pdev);
+ fintek_enable_wake(fintek);
+}
+
+static const struct pnp_device_id fintek_ids[] = {
+ { "FIT0002", 0 }, /* CIR */
+ { "", 0 },
+};
+
+static struct pnp_driver fintek_driver = {
+ .name = FINTEK_DRIVER_NAME,
+ .id_table = fintek_ids,
+ .flags = PNP_DRIVER_RES_DO_NOT_CHANGE,
+ .probe = fintek_probe,
+ .remove = __devexit_p(fintek_remove),
+ .suspend = fintek_suspend,
+ .resume = fintek_resume,
+ .shutdown = fintek_shutdown,
+};
+
+int fintek_init(void)
+{
+ return pnp_register_driver(&fintek_driver);
+}
+
+void fintek_exit(void)
+{
+ pnp_unregister_driver(&fintek_driver);
+}
+
+module_param(debug, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "Enable debugging output");
+
+MODULE_DEVICE_TABLE(pnp, fintek_ids);
+MODULE_DESCRIPTION(FINTEK_DESCRIPTION " driver");
+
+MODULE_AUTHOR("Jarod Wilson <jarod@redhat.com>");
+MODULE_LICENSE("GPL");
+
+module_init(fintek_init);
+module_exit(fintek_exit);
diff --git a/drivers/media/rc/fintek-cir.h b/drivers/media/rc/fintek-cir.h
new file mode 100644
index 000000000000..1b10b2011f5e
--- /dev/null
+++ b/drivers/media/rc/fintek-cir.h
@@ -0,0 +1,243 @@
+/*
+ * Driver for Feature Integration Technology Inc. (aka Fintek) LPC CIR
+ *
+ * Copyright (C) 2011 Jarod Wilson <jarod@redhat.com>
+ *
+ * Special thanks to Fintek for providing hardware and spec sheets.
+ * This driver is based upon the nuvoton, ite and ene drivers for
+ * similar hardware.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+ */
+
+#include <linux/spinlock.h>
+#include <linux/ioctl.h>
+
+/* platform driver name to register */
+#define FINTEK_DRIVER_NAME "fintek-cir"
+#define FINTEK_DESCRIPTION "Fintek LPC SuperIO Consumer IR Transceiver"
+#define VENDOR_ID_FINTEK 0x1934
+
+
+/* debugging module parameter */
+static int debug;
+
+#define fit_pr(level, text, ...) \
+ printk(level KBUILD_MODNAME ": " text, ## __VA_ARGS__)
+
+#define fit_dbg(text, ...) \
+ if (debug) \
+ printk(KERN_DEBUG \
+ KBUILD_MODNAME ": " text "\n" , ## __VA_ARGS__)
+
+#define fit_dbg_verbose(text, ...) \
+ if (debug > 1) \
+ printk(KERN_DEBUG \
+ KBUILD_MODNAME ": " text "\n" , ## __VA_ARGS__)
+
+#define fit_dbg_wake(text, ...) \
+ if (debug > 2) \
+ printk(KERN_DEBUG \
+ KBUILD_MODNAME ": " text "\n" , ## __VA_ARGS__)
+
+
+#define TX_BUF_LEN 256
+#define RX_BUF_LEN 32
+
+struct fintek_dev {
+ struct pnp_dev *pdev;
+ struct rc_dev *rdev;
+
+ spinlock_t fintek_lock;
+
+ /* for rx */
+ u8 buf[RX_BUF_LEN];
+ unsigned int pkts;
+
+ struct {
+ spinlock_t lock;
+ u8 buf[TX_BUF_LEN];
+ unsigned int buf_count;
+ unsigned int cur_buf_num;
+ wait_queue_head_t queue;
+ } tx;
+
+ /* Config register index/data port pair */
+ u8 cr_ip;
+ u8 cr_dp;
+
+ /* hardware I/O settings */
+ unsigned long cir_addr;
+ int cir_irq;
+ int cir_port_len;
+
+ /* hardware id */
+ u8 chip_major;
+ u8 chip_minor;
+ u16 chip_vendor;
+
+ /* hardware features */
+ bool hw_learning_capable;
+ bool hw_tx_capable;
+
+ /* rx settings */
+ bool learning_enabled;
+ bool carrier_detect_enabled;
+
+ enum {
+ CMD_HEADER = 0,
+ SUBCMD,
+ CMD_DATA,
+ PARSE_IRDATA,
+ } parser_state;
+
+ u8 cmd, rem;
+
+ /* carrier period = 1 / frequency */
+ u32 carrier;
+};
+
+/* buffer packet constants, largely identical to mceusb.c */
+#define BUF_PULSE_BIT 0x80
+#define BUF_LEN_MASK 0x1f
+#define BUF_SAMPLE_MASK 0x7f
+
+#define BUF_COMMAND_HEADER 0x9f
+#define BUF_COMMAND_MASK 0xe0
+#define BUF_COMMAND_NULL 0x00
+#define BUF_HW_CMD_HEADER 0xff
+#define BUF_CMD_G_REVISION 0x0b
+#define BUF_CMD_S_CARRIER 0x06
+#define BUF_CMD_S_TIMEOUT 0x0c
+#define BUF_CMD_SIG_END 0x01
+#define BUF_CMD_S_TXMASK 0x08
+#define BUF_CMD_S_RXSENSOR 0x14
+#define BUF_RSP_PULSE_COUNT 0x15
+
+#define CIR_SAMPLE_PERIOD 50
+
+/*
+ * Configuration Register:
+ * Index Port
+ * Data Port
+ */
+#define CR_INDEX_PORT 0x2e
+#define CR_DATA_PORT 0x2f
+
+/* Possible alternate values, depends on how the chip is wired */
+#define CR_INDEX_PORT2 0x4e
+#define CR_DATA_PORT2 0x4f
+
+/*
+ * GCR_CONFIG_PORT_SEL bit 4 specifies which Index Port value is
+ * active. 1 = 0x4e, 0 = 0x2e
+ */
+#define PORT_SEL_PORT_4E_EN 0x10
+
+/* Extended Function Mode enable/disable magic values */
+#define CONFIG_REG_ENABLE 0x87
+#define CONFIG_REG_DISABLE 0xaa
+
+/* Chip IDs found in CR_CHIP_ID_{HI,LO} */
+#define CHIP_ID_HIGH_F71809U 0x04
+#define CHIP_ID_LOW_F71809U 0x08
+
+/*
+ * Global control regs we need to care about:
+ * Global Control def.
+ * Register name addr val. */
+#define GCR_SOFTWARE_RESET 0x02 /* 0x00 */
+#define GCR_LOGICAL_DEV_NO 0x07 /* 0x00 */
+#define GCR_CHIP_ID_HI 0x20 /* 0x04 */
+#define GCR_CHIP_ID_LO 0x21 /* 0x08 */
+#define GCR_VENDOR_ID_HI 0x23 /* 0x19 */
+#define GCR_VENDOR_ID_LO 0x24 /* 0x34 */
+#define GCR_CONFIG_PORT_SEL 0x25 /* 0x01 */
+#define GCR_KBMOUSE_WAKEUP 0x27
+
+#define LOGICAL_DEV_DISABLE 0x00
+#define LOGICAL_DEV_ENABLE 0x01
+
+/* Logical device number of the CIR function */
+#define LOGICAL_DEV_CIR 0x05
+
+/* CIR Logical Device (LDN 0x08) config registers */
+#define CIR_CR_COMMAND_INDEX 0x04
+#define CIR_CR_IRCS 0x05 /* Before host writes command to IR, host
+ must set to 1. When host finshes write
+ command to IR, host must clear to 0. */
+#define CIR_CR_COMMAND_DATA 0x06 /* Host read or write comand data */
+#define CIR_CR_CLASS 0x07 /* 0xff = rx-only, 0x66 = rx + 2 tx,
+ 0x33 = rx + 1 tx */
+#define CIR_CR_DEV_EN 0x30 /* bit0 = 1 enables CIR */
+#define CIR_CR_BASE_ADDR_HI 0x60 /* MSB of CIR IO base addr */
+#define CIR_CR_BASE_ADDR_LO 0x61 /* LSB of CIR IO base addr */
+#define CIR_CR_IRQ_SEL 0x70 /* bits3-0 store CIR IRQ */
+#define CIR_CR_PSOUT_STATUS 0xf1
+#define CIR_CR_WAKE_KEY3_ADDR 0xf8
+#define CIR_CR_WAKE_KEY3_CODE 0xf9
+#define CIR_CR_WAKE_KEY3_DC 0xfa
+#define CIR_CR_WAKE_CONTROL 0xfb
+#define CIR_CR_WAKE_KEY12_ADDR 0xfc
+#define CIR_CR_WAKE_KEY4_ADDR 0xfd
+#define CIR_CR_WAKE_KEY5_ADDR 0xfe
+
+#define CLASS_RX_ONLY 0xff
+#define CLASS_RX_2TX 0x66
+#define CLASS_RX_1TX 0x33
+
+/* CIR device registers */
+#define CIR_STATUS 0x00
+#define CIR_RX_DATA 0x01
+#define CIR_TX_CONTROL 0x02
+#define CIR_TX_DATA 0x03
+#define CIR_CONTROL 0x04
+
+/* Bits to enable CIR wake */
+#define LOGICAL_DEV_ACPI 0x01
+#define LDEV_ACPI_WAKE_EN_REG 0xe8
+#define ACPI_WAKE_EN_CIR_BIT 0x04
+
+#define LDEV_ACPI_PME_EN_REG 0xf0
+#define LDEV_ACPI_PME_CLR_REG 0xf1
+#define ACPI_PME_CIR_BIT 0x02
+
+#define LDEV_ACPI_STATE_REG 0xf4
+#define ACPI_STATE_CIR_BIT 0x20
+
+/*
+ * CIR status register (0x00):
+ * 7 - CIR_IRQ_EN (1 = enable CIR IRQ, 0 = disable)
+ * 3 - TX_FINISH (1 when TX finished, write 1 to clear)
+ * 2 - TX_UNDERRUN (1 on TX underrun, write 1 to clear)
+ * 1 - RX_TIMEOUT (1 on RX timeout, write 1 to clear)
+ * 0 - RX_RECEIVE (1 on RX receive, write 1 to clear)
+ */
+#define CIR_STATUS_IRQ_EN 0x80
+#define CIR_STATUS_TX_FINISH 0x08
+#define CIR_STATUS_TX_UNDERRUN 0x04
+#define CIR_STATUS_RX_TIMEOUT 0x02
+#define CIR_STATUS_RX_RECEIVE 0x01
+#define CIR_STATUS_IRQ_MASK 0x0f
+
+/*
+ * CIR TX control register (0x02):
+ * 7 - TX_START (1 to indicate TX start, auto-cleared when done)
+ * 6 - TX_END (1 to indicate TX data written to TX fifo)
+ */
+#define CIR_TX_CONTROL_TX_START 0x80
+#define CIR_TX_CONTROL_TX_END 0x40
+
diff --git a/drivers/media/rc/keymaps/rc-lme2510.c b/drivers/media/rc/keymaps/rc-lme2510.c
index afae14fd152e..129d3f9a461d 100644
--- a/drivers/media/rc/keymaps/rc-lme2510.c
+++ b/drivers/media/rc/keymaps/rc-lme2510.c
@@ -14,81 +14,81 @@
static struct rc_map_table lme2510_rc[] = {
/* Type 1 - 26 buttons */
- { 0xef12ba45, KEY_0 },
- { 0xef12a05f, KEY_1 },
- { 0xef12af50, KEY_2 },
- { 0xef12a25d, KEY_3 },
- { 0xef12be41, KEY_4 },
- { 0xef12f50a, KEY_5 },
- { 0xef12bd42, KEY_6 },
- { 0xef12b847, KEY_7 },
- { 0xef12b649, KEY_8 },
- { 0xef12fa05, KEY_9 },
- { 0xef12bc43, KEY_POWER },
- { 0xef12b946, KEY_SUBTITLE },
- { 0xef12f906, KEY_PAUSE },
- { 0xef12fc03, KEY_MEDIA_REPEAT},
- { 0xef12fd02, KEY_PAUSE },
- { 0xef12a15e, KEY_VOLUMEUP },
- { 0xef12a35c, KEY_VOLUMEDOWN },
- { 0xef12f609, KEY_CHANNELUP },
- { 0xef12e51a, KEY_CHANNELDOWN },
- { 0xef12e11e, KEY_PLAY },
- { 0xef12e41b, KEY_ZOOM },
- { 0xef12a659, KEY_MUTE },
- { 0xef12a55a, KEY_TV },
- { 0xef12e718, KEY_RECORD },
- { 0xef12f807, KEY_EPG },
- { 0xef12fe01, KEY_STOP },
+ { 0x10ed45, KEY_0 },
+ { 0x10ed5f, KEY_1 },
+ { 0x10ed50, KEY_2 },
+ { 0x10ed5d, KEY_3 },
+ { 0x10ed41, KEY_4 },
+ { 0x10ed0a, KEY_5 },
+ { 0x10ed42, KEY_6 },
+ { 0x10ed47, KEY_7 },
+ { 0x10ed49, KEY_8 },
+ { 0x10ed05, KEY_9 },
+ { 0x10ed43, KEY_POWER },
+ { 0x10ed46, KEY_SUBTITLE },
+ { 0x10ed06, KEY_PAUSE },
+ { 0x10ed03, KEY_MEDIA_REPEAT},
+ { 0x10ed02, KEY_PAUSE },
+ { 0x10ed5e, KEY_VOLUMEUP },
+ { 0x10ed5c, KEY_VOLUMEDOWN },
+ { 0x10ed09, KEY_CHANNELUP },
+ { 0x10ed1a, KEY_CHANNELDOWN },
+ { 0x10ed1e, KEY_PLAY },
+ { 0x10ed1b, KEY_ZOOM },
+ { 0x10ed59, KEY_MUTE },
+ { 0x10ed5a, KEY_TV },
+ { 0x10ed18, KEY_RECORD },
+ { 0x10ed07, KEY_EPG },
+ { 0x10ed01, KEY_STOP },
/* Type 2 - 20 buttons */
- { 0xff40ea15, KEY_0 },
- { 0xff40f708, KEY_1 },
- { 0xff40f609, KEY_2 },
- { 0xff40f50a, KEY_3 },
- { 0xff40f30c, KEY_4 },
- { 0xff40f20d, KEY_5 },
- { 0xff40f10e, KEY_6 },
- { 0xff40ef10, KEY_7 },
- { 0xff40ee11, KEY_8 },
- { 0xff40ed12, KEY_9 },
- { 0xff40ff00, KEY_POWER },
- { 0xff40fb04, KEY_MEDIA_REPEAT}, /* Recall */
- { 0xff40e51a, KEY_PAUSE }, /* Timeshift */
- { 0xff40fd02, KEY_VOLUMEUP }, /* 2 x -/+ Keys not marked */
- { 0xff40f906, KEY_VOLUMEDOWN }, /* Volume defined as right hand*/
- { 0xff40fe01, KEY_CHANNELUP },
- { 0xff40fa05, KEY_CHANNELDOWN },
- { 0xff40eb14, KEY_ZOOM },
- { 0xff40e718, KEY_RECORD },
- { 0xff40e916, KEY_STOP },
+ { 0xbf15, KEY_0 },
+ { 0xbf08, KEY_1 },
+ { 0xbf09, KEY_2 },
+ { 0xbf0a, KEY_3 },
+ { 0xbf0c, KEY_4 },
+ { 0xbf0d, KEY_5 },
+ { 0xbf0e, KEY_6 },
+ { 0xbf10, KEY_7 },
+ { 0xbf11, KEY_8 },
+ { 0xbf12, KEY_9 },
+ { 0xbf00, KEY_POWER },
+ { 0xbf04, KEY_MEDIA_REPEAT}, /* Recall */
+ { 0xbf1a, KEY_PAUSE }, /* Timeshift */
+ { 0xbf02, KEY_VOLUMEUP }, /* 2 x -/+ Keys not marked */
+ { 0xbf06, KEY_VOLUMEDOWN }, /* Volume defined as right hand*/
+ { 0xbf01, KEY_CHANNELUP },
+ { 0xbf05, KEY_CHANNELDOWN },
+ { 0xbf14, KEY_ZOOM },
+ { 0xbf18, KEY_RECORD },
+ { 0xbf16, KEY_STOP },
/* Type 3 - 20 buttons */
- { 0xff00e31c, KEY_0 },
- { 0xff00f807, KEY_1 },
- { 0xff00ea15, KEY_2 },
- { 0xff00f609, KEY_3 },
- { 0xff00e916, KEY_4 },
- { 0xff00e619, KEY_5 },
- { 0xff00f20d, KEY_6 },
- { 0xff00f30c, KEY_7 },
- { 0xff00e718, KEY_8 },
- { 0xff00a15e, KEY_9 },
- { 0xff00ba45, KEY_POWER },
- { 0xff00bb44, KEY_MEDIA_REPEAT}, /* Recall */
- { 0xff00b54a, KEY_PAUSE }, /* Timeshift */
- { 0xff00b847, KEY_VOLUMEUP }, /* 2 x -/+ Keys not marked */
- { 0xff00bc43, KEY_VOLUMEDOWN }, /* Volume defined as right hand*/
- { 0xff00b946, KEY_CHANNELUP },
- { 0xff00bf40, KEY_CHANNELDOWN },
- { 0xff00f708, KEY_ZOOM },
- { 0xff00bd42, KEY_RECORD },
- { 0xff00a55a, KEY_STOP },
+ { 0x1c, KEY_0 },
+ { 0x07, KEY_1 },
+ { 0x15, KEY_2 },
+ { 0x09, KEY_3 },
+ { 0x16, KEY_4 },
+ { 0x19, KEY_5 },
+ { 0x0d, KEY_6 },
+ { 0x0c, KEY_7 },
+ { 0x18, KEY_8 },
+ { 0x5e, KEY_9 },
+ { 0x45, KEY_POWER },
+ { 0x44, KEY_MEDIA_REPEAT}, /* Recall */
+ { 0x4a, KEY_PAUSE }, /* Timeshift */
+ { 0x47, KEY_VOLUMEUP }, /* 2 x -/+ Keys not marked */
+ { 0x43, KEY_VOLUMEDOWN }, /* Volume defined as right hand*/
+ { 0x46, KEY_CHANNELUP },
+ { 0x40, KEY_CHANNELDOWN },
+ { 0x08, KEY_ZOOM },
+ { 0x42, KEY_RECORD },
+ { 0x5a, KEY_STOP },
};
static struct rc_map_list lme2510_map = {
.map = {
.scan = lme2510_rc,
.size = ARRAY_SIZE(lme2510_rc),
- .rc_type = RC_TYPE_UNKNOWN,
+ .rc_type = RC_TYPE_NEC,
.name = RC_MAP_LME2510,
}
};
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig
index 3be180b3ba27..bb53de7fe408 100644
--- a/drivers/media/video/Kconfig
+++ b/drivers/media/video/Kconfig
@@ -687,7 +687,7 @@ config VIDEO_HEXIUM_GEMINI
config VIDEO_TIMBERDALE
tristate "Support for timberdale Video In/LogiWIN"
- depends on VIDEO_V4L2 && I2C
+ depends on VIDEO_V4L2 && I2C && DMADEVICES
select DMA_ENGINE
select TIMB_DMA
select VIDEO_ADV7180
@@ -757,6 +757,8 @@ config VIDEO_NOON010PC30
---help---
This driver supports NOON010PC30 CIF camera from Siliconfile
+source "drivers/media/video/m5mols/Kconfig"
+
config VIDEO_OMAP3
tristate "OMAP 3 Camera support (EXPERIMENTAL)"
select OMAP_IOMMU
@@ -952,7 +954,7 @@ config VIDEO_SAMSUNG_S5P_FIMC
config VIDEO_S5P_MIPI_CSIS
tristate "Samsung S5P and EXYNOS4 MIPI CSI receiver driver"
- depends on VIDEO_V4L2 && PM_RUNTIME && VIDEO_V4L2_SUBDEV_API
+ depends on VIDEO_V4L2 && PM_RUNTIME && PLAT_S5P && VIDEO_V4L2_SUBDEV_API
---help---
This is a v4l2 driver for Samsung S5P/EXYNOS4 MIPI-CSI receiver.
diff --git a/drivers/media/video/Makefile b/drivers/media/video/Makefile
index 9519160c2e01..f0fecd6f6a33 100644
--- a/drivers/media/video/Makefile
+++ b/drivers/media/video/Makefile
@@ -69,6 +69,7 @@ obj-$(CONFIG_VIDEO_MT9V011) += mt9v011.o
obj-$(CONFIG_VIDEO_MT9V032) += mt9v032.o
obj-$(CONFIG_VIDEO_SR030PC30) += sr030pc30.o
obj-$(CONFIG_VIDEO_NOON010PC30) += noon010pc30.o
+obj-$(CONFIG_VIDEO_M5MOLS) += m5mols/
obj-$(CONFIG_SOC_CAMERA_IMX074) += imx074.o
obj-$(CONFIG_SOC_CAMERA_MT9M001) += mt9m001.o
diff --git a/drivers/media/video/cpia2/cpia2_v4l.c b/drivers/media/video/cpia2/cpia2_v4l.c
index 0073a8c55336..40eb6326e48a 100644
--- a/drivers/media/video/cpia2/cpia2_v4l.c
+++ b/drivers/media/video/cpia2/cpia2_v4l.c
@@ -438,7 +438,7 @@ static int cpia2_querycap(struct file *file, void *fh, struct v4l2_capability *v
strcat(vc->card, " (676/");
break;
default:
- strcat(vc->card, " (???/");
+ strcat(vc->card, " (XXX/");
break;
}
switch (cam->params.version.sensor_flags) {
@@ -458,7 +458,7 @@ static int cpia2_querycap(struct file *file, void *fh, struct v4l2_capability *v
strcat(vc->card, "500)");
break;
default:
- strcat(vc->card, "???)");
+ strcat(vc->card, "XXX)");
break;
}
diff --git a/drivers/media/video/gspca/kinect.c b/drivers/media/video/gspca/kinect.c
index 66671a4092e4..26fc206f095e 100644
--- a/drivers/media/video/gspca/kinect.c
+++ b/drivers/media/video/gspca/kinect.c
@@ -34,7 +34,7 @@ MODULE_AUTHOR("Antonio Ospite <ospite@studenti.unina.it>");
MODULE_DESCRIPTION("GSPCA/Kinect Sensor Device USB Camera Driver");
MODULE_LICENSE("GPL");
-#ifdef DEBUG
+#ifdef GSPCA_DEBUG
int gspca_debug = D_ERR | D_PROBE | D_CONF | D_STREAM | D_FRAM | D_PACK |
D_USBI | D_USBO | D_V4L2;
#endif
diff --git a/drivers/media/video/m5mols/Kconfig b/drivers/media/video/m5mols/Kconfig
new file mode 100644
index 000000000000..302dc3d70193
--- /dev/null
+++ b/drivers/media/video/m5mols/Kconfig
@@ -0,0 +1,5 @@
+config VIDEO_M5MOLS
+ tristate "Fujitsu M-5MOLS 8MP sensor support"
+ depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ ---help---
+ This driver supports Fujitsu M-5MOLS camera sensor with ISP
diff --git a/drivers/media/video/m5mols/Makefile b/drivers/media/video/m5mols/Makefile
new file mode 100644
index 000000000000..0a44e028edc7
--- /dev/null
+++ b/drivers/media/video/m5mols/Makefile
@@ -0,0 +1,3 @@
+m5mols-objs := m5mols_core.o m5mols_controls.o m5mols_capture.o
+
+obj-$(CONFIG_VIDEO_M5MOLS) += m5mols.o
diff --git a/drivers/media/video/m5mols/m5mols.h b/drivers/media/video/m5mols/m5mols.h
new file mode 100644
index 000000000000..10b55c854487
--- /dev/null
+++ b/drivers/media/video/m5mols/m5mols.h
@@ -0,0 +1,296 @@
+/*
+ * Header for M-5MOLS 8M Pixel camera sensor with ISP
+ *
+ * Copyright (C) 2011 Samsung Electronics Co., Ltd.
+ * Author: HeungJun Kim, riverful.kim@samsung.com
+ *
+ * Copyright (C) 2009 Samsung Electronics Co., Ltd.
+ * Author: Dongsoo Nathaniel Kim, dongsoo45.kim@samsung.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef M5MOLS_H
+#define M5MOLS_H
+
+#include <media/v4l2-subdev.h>
+#include "m5mols_reg.h"
+
+extern int m5mols_debug;
+
+#define to_m5mols(__sd) container_of(__sd, struct m5mols_info, sd)
+
+#define to_sd(__ctrl) \
+ (&container_of(__ctrl->handler, struct m5mols_info, handle)->sd)
+
+enum m5mols_restype {
+ M5MOLS_RESTYPE_MONITOR,
+ M5MOLS_RESTYPE_CAPTURE,
+ M5MOLS_RESTYPE_MAX,
+};
+
+/**
+ * struct m5mols_resolution - structure for the resolution
+ * @type: resolution type according to the pixel code
+ * @width: width of the resolution
+ * @height: height of the resolution
+ * @reg: resolution preset register value
+ */
+struct m5mols_resolution {
+ u8 reg;
+ enum m5mols_restype type;
+ u16 width;
+ u16 height;
+};
+
+/**
+ * struct m5mols_exif - structure for the EXIF information of M-5MOLS
+ * @exposure_time: exposure time register value
+ * @shutter_speed: speed of the shutter register value
+ * @aperture: aperture register value
+ * @exposure_bias: it calls also EV bias
+ * @iso_speed: ISO register value
+ * @flash: status register value of the flash
+ * @sdr: status register value of the Subject Distance Range
+ * @qval: not written exact meaning in document
+ */
+struct m5mols_exif {
+ u32 exposure_time;
+ u32 shutter_speed;
+ u32 aperture;
+ u32 brightness;
+ u32 exposure_bias;
+ u16 iso_speed;
+ u16 flash;
+ u16 sdr;
+ u16 qval;
+};
+
+/**
+ * struct m5mols_capture - Structure for the capture capability
+ * @exif: EXIF information
+ * @main: size in bytes of the main image
+ * @thumb: size in bytes of the thumb image, if it was accompanied
+ * @total: total size in bytes of the produced image
+ */
+struct m5mols_capture {
+ struct m5mols_exif exif;
+ u32 main;
+ u32 thumb;
+ u32 total;
+};
+
+/**
+ * struct m5mols_scenemode - structure for the scenemode capability
+ * @metering: metering light register value
+ * @ev_bias: EV bias register value
+ * @wb_mode: mode which means the WhiteBalance is Auto or Manual
+ * @wb_preset: whitebalance preset register value in the Manual mode
+ * @chroma_en: register value whether the Chroma capability is enabled or not
+ * @chroma_lvl: chroma's level register value
+ * @edge_en: register value Whether the Edge capability is enabled or not
+ * @edge_lvl: edge's level register value
+ * @af_range: Auto Focus's range
+ * @fd_mode: Face Detection mode
+ * @mcc: Multi-axis Color Conversion which means emotion color
+ * @light: status of the Light
+ * @flash: status of the Flash
+ * @tone: Tone color which means Contrast
+ * @iso: ISO register value
+ * @capt_mode: Mode of the Image Stabilization while the camera capturing
+ * @wdr: Wide Dynamic Range register value
+ *
+ * The each value according to each scenemode is recommended in the documents.
+ */
+struct m5mols_scenemode {
+ u32 metering;
+ u32 ev_bias;
+ u32 wb_mode;
+ u32 wb_preset;
+ u32 chroma_en;
+ u32 chroma_lvl;
+ u32 edge_en;
+ u32 edge_lvl;
+ u32 af_range;
+ u32 fd_mode;
+ u32 mcc;
+ u32 light;
+ u32 flash;
+ u32 tone;
+ u32 iso;
+ u32 capt_mode;
+ u32 wdr;
+};
+
+/**
+ * struct m5mols_version - firmware version information
+ * @customer: customer information
+ * @project: version of project information according to customer
+ * @fw: firmware revision
+ * @hw: hardware revision
+ * @param: version of the parameter
+ * @awb: Auto WhiteBalance algorithm version
+ * @str: information about manufacturer and packaging vendor
+ * @af: Auto Focus version
+ *
+ * The register offset starts the customer version at 0x0, and it ends
+ * the awb version at 0x09. The customer, project information occupies 1 bytes
+ * each. And also the fw, hw, param, awb each requires 2 bytes. The str is
+ * unique string associated with firmware's version. It includes information
+ * about manufacturer and the vendor of the sensor's packaging. The least
+ * significant 2 bytes of the string indicate packaging manufacturer.
+ */
+#define VERSION_STRING_SIZE 22
+struct m5mols_version {
+ u8 customer;
+ u8 project;
+ u16 fw;
+ u16 hw;
+ u16 param;
+ u16 awb;
+ u8 str[VERSION_STRING_SIZE];
+ u8 af;
+};
+#define VERSION_SIZE sizeof(struct m5mols_version)
+
+/**
+ * struct m5mols_info - M-5MOLS driver data structure
+ * @pdata: platform data
+ * @sd: v4l-subdev instance
+ * @pad: media pad
+ * @ffmt: current fmt according to resolution type
+ * @res_type: current resolution type
+ * @code: current code
+ * @irq_waitq: waitqueue for the capture
+ * @work_irq: workqueue for the IRQ
+ * @flags: state variable for the interrupt handler
+ * @handle: control handler
+ * @autoexposure: Auto Exposure control
+ * @exposure: Exposure control
+ * @autowb: Auto White Balance control
+ * @colorfx: Color effect control
+ * @saturation: Saturation control
+ * @zoom: Zoom control
+ * @ver: information of the version
+ * @cap: the capture mode attributes
+ * @power: current sensor's power status
+ * @ctrl_sync: true means all controls of the sensor are initialized
+ * @int_capture: true means the capture interrupt is issued once
+ * @lock_ae: true means the Auto Exposure is locked
+ * @lock_awb: true means the Aut WhiteBalance is locked
+ * @resolution: register value for current resolution
+ * @interrupt: register value for current interrupt status
+ * @mode: register value for current operation mode
+ * @mode_save: register value for current operation mode for saving
+ * @set_power: optional power callback to the board code
+ */
+struct m5mols_info {
+ const struct m5mols_platform_data *pdata;
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+ struct v4l2_mbus_framefmt ffmt[M5MOLS_RESTYPE_MAX];
+ int res_type;
+ enum v4l2_mbus_pixelcode code;
+ wait_queue_head_t irq_waitq;
+ struct work_struct work_irq;
+ unsigned long flags;
+
+ struct v4l2_ctrl_handler handle;
+ /* Autoexposure/exposure control cluster */
+ struct {
+ struct v4l2_ctrl *autoexposure;
+ struct v4l2_ctrl *exposure;
+ };
+ struct v4l2_ctrl *autowb;
+ struct v4l2_ctrl *colorfx;
+ struct v4l2_ctrl *saturation;
+ struct v4l2_ctrl *zoom;
+
+ struct m5mols_version ver;
+ struct m5mols_capture cap;
+ bool power;
+ bool ctrl_sync;
+ bool lock_ae;
+ bool lock_awb;
+ u8 resolution;
+ u32 interrupt;
+ u32 mode;
+ u32 mode_save;
+ int (*set_power)(struct device *dev, int on);
+};
+
+#define ST_CAPT_IRQ 0
+
+#define is_powered(__info) (__info->power)
+#define is_ctrl_synced(__info) (__info->ctrl_sync)
+#define is_available_af(__info) (__info->ver.af)
+#define is_code(__code, __type) (__code == m5mols_default_ffmt[__type].code)
+#define is_manufacturer(__info, __manufacturer) \
+ (__info->ver.str[0] == __manufacturer[0] && \
+ __info->ver.str[1] == __manufacturer[1])
+/*
+ * I2C operation of the M-5MOLS
+ *
+ * The I2C read operation of the M-5MOLS requires 2 messages. The first
+ * message sends the information about the command, command category, and total
+ * message size. The second message is used to retrieve the data specifed in
+ * the first message
+ *
+ * 1st message 2nd message
+ * +-------+---+----------+-----+-------+ +------+------+------+------+
+ * | size1 | R | category | cmd | size2 | | d[0] | d[1] | d[2] | d[3] |
+ * +-------+---+----------+-----+-------+ +------+------+------+------+
+ * - size1: message data size(5 in this case)
+ * - size2: desired buffer size of the 2nd message
+ * - d[0..3]: according to size2
+ *
+ * The I2C write operation needs just one message. The message includes
+ * category, command, total size, and desired data.
+ *
+ * 1st message
+ * +-------+---+----------+-----+------+------+------+------+
+ * | size1 | W | category | cmd | d[0] | d[1] | d[2] | d[3] |
+ * +-------+---+----------+-----+------+------+------+------+
+ * - d[0..3]: according to size1
+ */
+int m5mols_read(struct v4l2_subdev *sd, u32 reg_comb, u32 *val);
+int m5mols_write(struct v4l2_subdev *sd, u32 reg_comb, u32 val);
+int m5mols_busy(struct v4l2_subdev *sd, u8 category, u8 cmd, u32 value);
+
+/*
+ * Mode operation of the M-5MOLS
+ *
+ * Changing the mode of the M-5MOLS is needed right executing order.
+ * There are three modes(PARAMETER, MONITOR, CAPTURE) which can be changed
+ * by user. There are various categories associated with each mode.
+ *
+ * +============================================================+
+ * | mode | category |
+ * +============================================================+
+ * | FLASH | FLASH(only after Stand-by or Power-on) |
+ * | SYSTEM | SYSTEM(only after sensor arm-booting) |
+ * | PARAMETER | PARAMETER |
+ * | MONITOR | MONITOR(preview), Auto Focus, Face Detection |
+ * | CAPTURE | Single CAPTURE, Preview(recording) |
+ * +============================================================+
+ *
+ * The available executing order between each modes are as follows:
+ * PARAMETER <---> MONITOR <---> CAPTURE
+ */
+int m5mols_mode(struct m5mols_info *info, u32 mode);
+
+int m5mols_enable_interrupt(struct v4l2_subdev *sd, u32 reg);
+int m5mols_sync_controls(struct m5mols_info *info);
+int m5mols_start_capture(struct m5mols_info *info);
+int m5mols_do_scenemode(struct m5mols_info *info, u32 mode);
+int m5mols_lock_3a(struct m5mols_info *info, bool lock);
+int m5mols_set_ctrl(struct v4l2_ctrl *ctrl);
+
+/* The firmware function */
+int m5mols_update_fw(struct v4l2_subdev *sd,
+ int (*set_power)(struct m5mols_info *, bool));
+
+#endif /* M5MOLS_H */
diff --git a/drivers/media/video/m5mols/m5mols_capture.c b/drivers/media/video/m5mols/m5mols_capture.c
new file mode 100644
index 000000000000..d71a3903b60f
--- /dev/null
+++ b/drivers/media/video/m5mols/m5mols_capture.c
@@ -0,0 +1,191 @@
+/*
+ * The Capture code for Fujitsu M-5MOLS ISP
+ *
+ * Copyright (C) 2011 Samsung Electronics Co., Ltd.
+ * Author: HeungJun Kim, riverful.kim@samsung.com
+ *
+ * Copyright (C) 2009 Samsung Electronics Co., Ltd.
+ * Author: Dongsoo Nathaniel Kim, dongsoo45.kim@samsung.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/version.h>
+#include <linux/gpio.h>
+#include <linux/regulator/consumer.h>
+#include <linux/videodev2.h>
+#include <linux/version.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+#include <media/m5mols.h>
+
+#include "m5mols.h"
+#include "m5mols_reg.h"
+
+static int m5mols_capture_error_handler(struct m5mols_info *info,
+ int timeout)
+{
+ int ret;
+
+ /* Disable all interrupts and clear relevant interrupt staus bits */
+ ret = m5mols_write(&info->sd, SYSTEM_INT_ENABLE,
+ info->interrupt & ~(REG_INT_CAPTURE));
+ if (ret)
+ return ret;
+
+ if (timeout == 0)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+/**
+ * m5mols_read_rational - I2C read of a rational number
+ *
+ * Read numerator and denominator from registers @addr_num and @addr_den
+ * respectively and return the division result in @val.
+ */
+static int m5mols_read_rational(struct v4l2_subdev *sd, u32 addr_num,
+ u32 addr_den, u32 *val)
+{
+ u32 num, den;
+
+ int ret = m5mols_read(sd, addr_num, &num);
+ if (!ret)
+ ret = m5mols_read(sd, addr_den, &den);
+ if (ret)
+ return ret;
+ *val = den == 0 ? 0 : num / den;
+ return ret;
+}
+
+/**
+ * m5mols_capture_info - Gather captured image information
+ *
+ * For now it gathers only EXIF information and file size.
+ */
+static int m5mols_capture_info(struct m5mols_info *info)
+{
+ struct m5mols_exif *exif = &info->cap.exif;
+ struct v4l2_subdev *sd = &info->sd;
+ int ret;
+
+ ret = m5mols_read_rational(sd, EXIF_INFO_EXPTIME_NU,
+ EXIF_INFO_EXPTIME_DE, &exif->exposure_time);
+ if (ret)
+ return ret;
+ ret = m5mols_read_rational(sd, EXIF_INFO_TV_NU, EXIF_INFO_TV_DE,
+ &exif->shutter_speed);
+ if (ret)
+ return ret;
+ ret = m5mols_read_rational(sd, EXIF_INFO_AV_NU, EXIF_INFO_AV_DE,
+ &exif->aperture);
+ if (ret)
+ return ret;
+ ret = m5mols_read_rational(sd, EXIF_INFO_BV_NU, EXIF_INFO_BV_DE,
+ &exif->brightness);
+ if (ret)
+ return ret;
+ ret = m5mols_read_rational(sd, EXIF_INFO_EBV_NU, EXIF_INFO_EBV_DE,
+ &exif->exposure_bias);
+ if (ret)
+ return ret;
+
+ ret = m5mols_read(sd, EXIF_INFO_ISO, (u32 *)&exif->iso_speed);
+ if (!ret)
+ ret = m5mols_read(sd, EXIF_INFO_FLASH, (u32 *)&exif->flash);
+ if (!ret)
+ ret = m5mols_read(sd, EXIF_INFO_SDR, (u32 *)&exif->sdr);
+ if (!ret)
+ ret = m5mols_read(sd, EXIF_INFO_QVAL, (u32 *)&exif->qval);
+ if (ret)
+ return ret;
+
+ if (!ret)
+ ret = m5mols_read(sd, CAPC_IMAGE_SIZE, &info->cap.main);
+ if (!ret)
+ ret = m5mols_read(sd, CAPC_THUMB_SIZE, &info->cap.thumb);
+ if (!ret)
+ info->cap.total = info->cap.main + info->cap.thumb;
+
+ return ret;
+}
+
+int m5mols_start_capture(struct m5mols_info *info)
+{
+ struct v4l2_subdev *sd = &info->sd;
+ u32 resolution = info->resolution;
+ int timeout;
+ int ret;
+
+ /*
+ * Preparing capture. Setting control & interrupt before entering
+ * capture mode
+ *
+ * 1) change to MONITOR mode for operating control & interrupt
+ * 2) set controls (considering v4l2_control value & lock 3A)
+ * 3) set interrupt
+ * 4) change to CAPTURE mode
+ */
+ ret = m5mols_mode(info, REG_MONITOR);
+ if (!ret)
+ ret = m5mols_sync_controls(info);
+ if (!ret)
+ ret = m5mols_lock_3a(info, true);
+ if (!ret)
+ ret = m5mols_enable_interrupt(sd, REG_INT_CAPTURE);
+ if (!ret)
+ ret = m5mols_mode(info, REG_CAPTURE);
+ if (!ret) {
+ /* Wait for capture interrupt, after changing capture mode */
+ timeout = wait_event_interruptible_timeout(info->irq_waitq,
+ test_bit(ST_CAPT_IRQ, &info->flags),
+ msecs_to_jiffies(2000));
+ if (test_and_clear_bit(ST_CAPT_IRQ, &info->flags))
+ ret = m5mols_capture_error_handler(info, timeout);
+ }
+ if (!ret)
+ ret = m5mols_lock_3a(info, false);
+ if (ret)
+ return ret;
+ /*
+ * Starting capture. Setting capture frame count and resolution and
+ * the format(available format: JPEG, Bayer RAW, YUV).
+ *
+ * 1) select single or multi(enable to 25), format, size
+ * 2) set interrupt
+ * 3) start capture(for main image, now)
+ * 4) get information
+ * 5) notify file size to v4l2 device(e.g, to s5p-fimc v4l2 device)
+ */
+ ret = m5mols_write(sd, CAPC_SEL_FRAME, 1);
+ if (!ret)
+ ret = m5mols_write(sd, CAPP_YUVOUT_MAIN, REG_JPEG);
+ if (!ret)
+ ret = m5mols_write(sd, CAPP_MAIN_IMAGE_SIZE, resolution);
+ if (!ret)
+ ret = m5mols_enable_interrupt(sd, REG_INT_CAPTURE);
+ if (!ret)
+ ret = m5mols_write(sd, CAPC_START, REG_CAP_START_MAIN);
+ if (!ret) {
+ /* Wait for the capture completion interrupt */
+ timeout = wait_event_interruptible_timeout(info->irq_waitq,
+ test_bit(ST_CAPT_IRQ, &info->flags),
+ msecs_to_jiffies(2000));
+ if (test_and_clear_bit(ST_CAPT_IRQ, &info->flags)) {
+ ret = m5mols_capture_info(info);
+ if (!ret)
+ v4l2_subdev_notify(sd, 0, &info->cap.total);
+ }
+ }
+
+ return m5mols_capture_error_handler(info, timeout);
+}
diff --git a/drivers/media/video/m5mols/m5mols_controls.c b/drivers/media/video/m5mols/m5mols_controls.c
new file mode 100644
index 000000000000..817c16fec368
--- /dev/null
+++ b/drivers/media/video/m5mols/m5mols_controls.c
@@ -0,0 +1,299 @@
+/*
+ * Controls for M-5MOLS 8M Pixel camera sensor with ISP
+ *
+ * Copyright (C) 2011 Samsung Electronics Co., Ltd.
+ * Author: HeungJun Kim, riverful.kim@samsung.com
+ *
+ * Copyright (C) 2009 Samsung Electronics Co., Ltd.
+ * Author: Dongsoo Nathaniel Kim, dongsoo45.kim@samsung.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-ctrls.h>
+
+#include "m5mols.h"
+#include "m5mols_reg.h"
+
+static struct m5mols_scenemode m5mols_default_scenemode[] = {
+ [REG_SCENE_NORMAL] = {
+ REG_AE_CENTER, REG_AE_INDEX_00, REG_AWB_AUTO, 0,
+ REG_CHROMA_ON, 3, REG_EDGE_ON, 5,
+ REG_AF_NORMAL, REG_FD_OFF,
+ REG_MCC_NORMAL, REG_LIGHT_OFF, REG_FLASH_OFF,
+ 5, REG_ISO_AUTO, REG_CAP_NONE, REG_WDR_OFF,
+ },
+ [REG_SCENE_PORTRAIT] = {
+ REG_AE_CENTER, REG_AE_INDEX_00, REG_AWB_AUTO, 0,
+ REG_CHROMA_ON, 3, REG_EDGE_ON, 4,
+ REG_AF_NORMAL, BIT_FD_EN | BIT_FD_DRAW_FACE_FRAME,
+ REG_MCC_OFF, REG_LIGHT_OFF, REG_FLASH_OFF,
+ 6, REG_ISO_AUTO, REG_CAP_NONE, REG_WDR_OFF,
+ },
+ [REG_SCENE_LANDSCAPE] = {
+ REG_AE_ALL, REG_AE_INDEX_00, REG_AWB_AUTO, 0,
+ REG_CHROMA_ON, 4, REG_EDGE_ON, 6,
+ REG_AF_NORMAL, REG_FD_OFF,
+ REG_MCC_OFF, REG_LIGHT_OFF, REG_FLASH_OFF,
+ 6, REG_ISO_AUTO, REG_CAP_NONE, REG_WDR_OFF,
+ },
+ [REG_SCENE_SPORTS] = {
+ REG_AE_CENTER, REG_AE_INDEX_00, REG_AWB_AUTO, 0,
+ REG_CHROMA_ON, 3, REG_EDGE_ON, 5,
+ REG_AF_NORMAL, REG_FD_OFF,
+ REG_MCC_OFF, REG_LIGHT_OFF, REG_FLASH_OFF,
+ 6, REG_ISO_AUTO, REG_CAP_NONE, REG_WDR_OFF,
+ },
+ [REG_SCENE_PARTY_INDOOR] = {
+ REG_AE_CENTER, REG_AE_INDEX_00, REG_AWB_AUTO, 0,
+ REG_CHROMA_ON, 4, REG_EDGE_ON, 5,
+ REG_AF_NORMAL, REG_FD_OFF,
+ REG_MCC_OFF, REG_LIGHT_OFF, REG_FLASH_OFF,
+ 6, REG_ISO_200, REG_CAP_NONE, REG_WDR_OFF,
+ },
+ [REG_SCENE_BEACH_SNOW] = {
+ REG_AE_CENTER, REG_AE_INDEX_10_POS, REG_AWB_AUTO, 0,
+ REG_CHROMA_ON, 4, REG_EDGE_ON, 5,
+ REG_AF_NORMAL, REG_FD_OFF,
+ REG_MCC_OFF, REG_LIGHT_OFF, REG_FLASH_OFF,
+ 6, REG_ISO_50, REG_CAP_NONE, REG_WDR_OFF,
+ },
+ [REG_SCENE_SUNSET] = {
+ REG_AE_CENTER, REG_AE_INDEX_00, REG_AWB_PRESET,
+ REG_AWB_DAYLIGHT,
+ REG_CHROMA_ON, 3, REG_EDGE_ON, 5,
+ REG_AF_NORMAL, REG_FD_OFF,
+ REG_MCC_OFF, REG_LIGHT_OFF, REG_FLASH_OFF,
+ 6, REG_ISO_AUTO, REG_CAP_NONE, REG_WDR_OFF,
+ },
+ [REG_SCENE_DAWN_DUSK] = {
+ REG_AE_CENTER, REG_AE_INDEX_00, REG_AWB_PRESET,
+ REG_AWB_FLUORESCENT_1,
+ REG_CHROMA_ON, 3, REG_EDGE_ON, 5,
+ REG_AF_NORMAL, REG_FD_OFF,
+ REG_MCC_OFF, REG_LIGHT_OFF, REG_FLASH_OFF,
+ 6, REG_ISO_AUTO, REG_CAP_NONE, REG_WDR_OFF,
+ },
+ [REG_SCENE_FALL] = {
+ REG_AE_CENTER, REG_AE_INDEX_00, REG_AWB_AUTO, 0,
+ REG_CHROMA_ON, 5, REG_EDGE_ON, 5,
+ REG_AF_NORMAL, REG_FD_OFF,
+ REG_MCC_OFF, REG_LIGHT_OFF, REG_FLASH_OFF,
+ 6, REG_ISO_AUTO, REG_CAP_NONE, REG_WDR_OFF,
+ },
+ [REG_SCENE_NIGHT] = {
+ REG_AE_CENTER, REG_AE_INDEX_00, REG_AWB_AUTO, 0,
+ REG_CHROMA_ON, 3, REG_EDGE_ON, 5,
+ REG_AF_NORMAL, REG_FD_OFF,
+ REG_MCC_OFF, REG_LIGHT_OFF, REG_FLASH_OFF,
+ 6, REG_ISO_AUTO, REG_CAP_NONE, REG_WDR_OFF,
+ },
+ [REG_SCENE_AGAINST_LIGHT] = {
+ REG_AE_CENTER, REG_AE_INDEX_00, REG_AWB_AUTO, 0,
+ REG_CHROMA_ON, 3, REG_EDGE_ON, 5,
+ REG_AF_NORMAL, REG_FD_OFF,
+ REG_MCC_OFF, REG_LIGHT_OFF, REG_FLASH_OFF,
+ 6, REG_ISO_AUTO, REG_CAP_NONE, REG_WDR_OFF,
+ },
+ [REG_SCENE_FIRE] = {
+ REG_AE_CENTER, REG_AE_INDEX_00, REG_AWB_AUTO, 0,
+ REG_CHROMA_ON, 3, REG_EDGE_ON, 5,
+ REG_AF_NORMAL, REG_FD_OFF,
+ REG_MCC_OFF, REG_LIGHT_OFF, REG_FLASH_OFF,
+ 6, REG_ISO_50, REG_CAP_NONE, REG_WDR_OFF,
+ },
+ [REG_SCENE_TEXT] = {
+ REG_AE_CENTER, REG_AE_INDEX_00, REG_AWB_AUTO, 0,
+ REG_CHROMA_ON, 3, REG_EDGE_ON, 7,
+ REG_AF_MACRO, REG_FD_OFF,
+ REG_MCC_OFF, REG_LIGHT_OFF, REG_FLASH_OFF,
+ 6, REG_ISO_AUTO, REG_CAP_ANTI_SHAKE, REG_WDR_ON,
+ },
+ [REG_SCENE_CANDLE] = {
+ REG_AE_CENTER, REG_AE_INDEX_00, REG_AWB_AUTO, 0,
+ REG_CHROMA_ON, 3, REG_EDGE_ON, 5,
+ REG_AF_NORMAL, REG_FD_OFF,
+ REG_MCC_OFF, REG_LIGHT_OFF, REG_FLASH_OFF,
+ 6, REG_ISO_AUTO, REG_CAP_NONE, REG_WDR_OFF,
+ },
+};
+
+/**
+ * m5mols_do_scenemode() - Change current scenemode
+ * @mode: Desired mode of the scenemode
+ *
+ * WARNING: The execution order is important. Do not change the order.
+ */
+int m5mols_do_scenemode(struct m5mols_info *info, u32 mode)
+{
+ struct v4l2_subdev *sd = &info->sd;
+ struct m5mols_scenemode scenemode = m5mols_default_scenemode[mode];
+ int ret;
+
+ if (mode > REG_SCENE_CANDLE)
+ return -EINVAL;
+
+ ret = m5mols_lock_3a(info, false);
+ if (!ret)
+ ret = m5mols_write(sd, AE_EV_PRESET_MONITOR, mode);
+ if (!ret)
+ ret = m5mols_write(sd, AE_EV_PRESET_CAPTURE, mode);
+ if (!ret)
+ ret = m5mols_write(sd, AE_MODE, scenemode.metering);
+ if (!ret)
+ ret = m5mols_write(sd, AE_INDEX, scenemode.ev_bias);
+ if (!ret)
+ ret = m5mols_write(sd, AWB_MODE, scenemode.wb_mode);
+ if (!ret)
+ ret = m5mols_write(sd, AWB_MANUAL, scenemode.wb_preset);
+ if (!ret)
+ ret = m5mols_write(sd, MON_CHROMA_EN, scenemode.chroma_en);
+ if (!ret)
+ ret = m5mols_write(sd, MON_CHROMA_LVL, scenemode.chroma_lvl);
+ if (!ret)
+ ret = m5mols_write(sd, MON_EDGE_EN, scenemode.edge_en);
+ if (!ret)
+ ret = m5mols_write(sd, MON_EDGE_LVL, scenemode.edge_lvl);
+ if (!ret && is_available_af(info))
+ ret = m5mols_write(sd, AF_MODE, scenemode.af_range);
+ if (!ret && is_available_af(info))
+ ret = m5mols_write(sd, FD_CTL, scenemode.fd_mode);
+ if (!ret)
+ ret = m5mols_write(sd, MON_TONE_CTL, scenemode.tone);
+ if (!ret)
+ ret = m5mols_write(sd, AE_ISO, scenemode.iso);
+ if (!ret)
+ ret = m5mols_mode(info, REG_CAPTURE);
+ if (!ret)
+ ret = m5mols_write(sd, CAPP_WDR_EN, scenemode.wdr);
+ if (!ret)
+ ret = m5mols_write(sd, CAPP_MCC_MODE, scenemode.mcc);
+ if (!ret)
+ ret = m5mols_write(sd, CAPP_LIGHT_CTRL, scenemode.light);
+ if (!ret)
+ ret = m5mols_write(sd, CAPP_FLASH_CTRL, scenemode.flash);
+ if (!ret)
+ ret = m5mols_write(sd, CAPC_MODE, scenemode.capt_mode);
+ if (!ret)
+ ret = m5mols_mode(info, REG_MONITOR);
+
+ return ret;
+}
+
+static int m5mols_lock_ae(struct m5mols_info *info, bool lock)
+{
+ int ret = 0;
+
+ if (info->lock_ae != lock)
+ ret = m5mols_write(&info->sd, AE_LOCK,
+ lock ? REG_AE_LOCK : REG_AE_UNLOCK);
+ if (!ret)
+ info->lock_ae = lock;
+
+ return ret;
+}
+
+static int m5mols_lock_awb(struct m5mols_info *info, bool lock)
+{
+ int ret = 0;
+
+ if (info->lock_awb != lock)
+ ret = m5mols_write(&info->sd, AWB_LOCK,
+ lock ? REG_AWB_LOCK : REG_AWB_UNLOCK);
+ if (!ret)
+ info->lock_awb = lock;
+
+ return ret;
+}
+
+/* m5mols_lock_3a() - Lock 3A(Auto Exposure, Auto Whitebalance, Auto Focus) */
+int m5mols_lock_3a(struct m5mols_info *info, bool lock)
+{
+ int ret;
+
+ ret = m5mols_lock_ae(info, lock);
+ if (!ret)
+ ret = m5mols_lock_awb(info, lock);
+ /* Don't need to handle unlocking AF */
+ if (!ret && is_available_af(info) && lock)
+ ret = m5mols_write(&info->sd, AF_EXECUTE, REG_AF_STOP);
+
+ return ret;
+}
+
+/* m5mols_set_ctrl() - The main s_ctrl function called by m5mols_set_ctrl() */
+int m5mols_set_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct v4l2_subdev *sd = to_sd(ctrl);
+ struct m5mols_info *info = to_m5mols(sd);
+ int ret;
+
+ switch (ctrl->id) {
+ case V4L2_CID_ZOOM_ABSOLUTE:
+ return m5mols_write(sd, MON_ZOOM, ctrl->val);
+
+ case V4L2_CID_EXPOSURE_AUTO:
+ ret = m5mols_lock_ae(info,
+ ctrl->val == V4L2_EXPOSURE_AUTO ? false : true);
+ if (!ret && ctrl->val == V4L2_EXPOSURE_AUTO)
+ ret = m5mols_write(sd, AE_MODE, REG_AE_ALL);
+ if (!ret && ctrl->val == V4L2_EXPOSURE_MANUAL) {
+ int val = info->exposure->val;
+ ret = m5mols_write(sd, AE_MODE, REG_AE_OFF);
+ if (!ret)
+ ret = m5mols_write(sd, AE_MAN_GAIN_MON, val);
+ if (!ret)
+ ret = m5mols_write(sd, AE_MAN_GAIN_CAP, val);
+ }
+ return ret;
+
+ case V4L2_CID_AUTO_WHITE_BALANCE:
+ ret = m5mols_lock_awb(info, ctrl->val ? false : true);
+ if (!ret)
+ ret = m5mols_write(sd, AWB_MODE, ctrl->val ?
+ REG_AWB_AUTO : REG_AWB_PRESET);
+ return ret;
+
+ case V4L2_CID_SATURATION:
+ ret = m5mols_write(sd, MON_CHROMA_LVL, ctrl->val);
+ if (!ret)
+ ret = m5mols_write(sd, MON_CHROMA_EN, REG_CHROMA_ON);
+ return ret;
+
+ case V4L2_CID_COLORFX:
+ /*
+ * This control uses two kinds of registers: normal & color.
+ * The normal effect belongs to category 1, while the color
+ * one belongs to category 2.
+ *
+ * The normal effect uses one register: CAT1_EFFECT.
+ * The color effect uses three registers:
+ * CAT2_COLOR_EFFECT, CAT2_CFIXR, CAT2_CFIXB.
+ */
+ ret = m5mols_write(sd, PARM_EFFECT,
+ ctrl->val == V4L2_COLORFX_NEGATIVE ? REG_EFFECT_NEGA :
+ ctrl->val == V4L2_COLORFX_EMBOSS ? REG_EFFECT_EMBOSS :
+ REG_EFFECT_OFF);
+ if (!ret)
+ ret = m5mols_write(sd, MON_EFFECT,
+ ctrl->val == V4L2_COLORFX_SEPIA ?
+ REG_COLOR_EFFECT_ON : REG_COLOR_EFFECT_OFF);
+ if (!ret)
+ ret = m5mols_write(sd, MON_CFIXR,
+ ctrl->val == V4L2_COLORFX_SEPIA ?
+ REG_CFIXR_SEPIA : 0);
+ if (!ret)
+ ret = m5mols_write(sd, MON_CFIXB,
+ ctrl->val == V4L2_COLORFX_SEPIA ?
+ REG_CFIXB_SEPIA : 0);
+ return ret;
+ }
+
+ return -EINVAL;
+}
diff --git a/drivers/media/video/m5mols/m5mols_core.c b/drivers/media/video/m5mols/m5mols_core.c
new file mode 100644
index 000000000000..76eac26e84ae
--- /dev/null
+++ b/drivers/media/video/m5mols/m5mols_core.c
@@ -0,0 +1,1004 @@
+/*
+ * Driver for M-5MOLS 8M Pixel camera sensor with ISP
+ *
+ * Copyright (C) 2011 Samsung Electronics Co., Ltd.
+ * Author: HeungJun Kim, riverful.kim@samsung.com
+ *
+ * Copyright (C) 2009 Samsung Electronics Co., Ltd.
+ * Author: Dongsoo Nathaniel Kim, dongsoo45.kim@samsung.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/version.h>
+#include <linux/gpio.h>
+#include <linux/regulator/consumer.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+#include <media/m5mols.h>
+
+#include "m5mols.h"
+#include "m5mols_reg.h"
+
+int m5mols_debug;
+module_param(m5mols_debug, int, 0644);
+
+#define MODULE_NAME "M5MOLS"
+#define M5MOLS_I2C_CHECK_RETRY 500
+
+/* The regulator consumer names for external voltage regulators */
+static struct regulator_bulk_data supplies[] = {
+ {
+ .supply = "core", /* ARM core power, 1.2V */
+ }, {
+ .supply = "dig_18", /* digital power 1, 1.8V */
+ }, {
+ .supply = "d_sensor", /* sensor power 1, 1.8V */
+ }, {
+ .supply = "dig_28", /* digital power 2, 2.8V */
+ }, {
+ .supply = "a_sensor", /* analog power */
+ }, {
+ .supply = "dig_12", /* digital power 3, 1.2V */
+ },
+};
+
+static struct v4l2_mbus_framefmt m5mols_default_ffmt[M5MOLS_RESTYPE_MAX] = {
+ [M5MOLS_RESTYPE_MONITOR] = {
+ .width = 1920,
+ .height = 1080,
+ .code = V4L2_MBUS_FMT_VYUY8_2X8,
+ .field = V4L2_FIELD_NONE,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ },
+ [M5MOLS_RESTYPE_CAPTURE] = {
+ .width = 1920,
+ .height = 1080,
+ .code = V4L2_MBUS_FMT_JPEG_1X8,
+ .field = V4L2_FIELD_NONE,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ },
+};
+#define SIZE_DEFAULT_FFMT ARRAY_SIZE(m5mols_default_ffmt)
+
+static const struct m5mols_resolution m5mols_reg_res[] = {
+ { 0x01, M5MOLS_RESTYPE_MONITOR, 128, 96 }, /* SUB-QCIF */
+ { 0x03, M5MOLS_RESTYPE_MONITOR, 160, 120 }, /* QQVGA */
+ { 0x05, M5MOLS_RESTYPE_MONITOR, 176, 144 }, /* QCIF */
+ { 0x06, M5MOLS_RESTYPE_MONITOR, 176, 176 },
+ { 0x08, M5MOLS_RESTYPE_MONITOR, 240, 320 }, /* QVGA */
+ { 0x09, M5MOLS_RESTYPE_MONITOR, 320, 240 }, /* QVGA */
+ { 0x0c, M5MOLS_RESTYPE_MONITOR, 240, 400 }, /* WQVGA */
+ { 0x0d, M5MOLS_RESTYPE_MONITOR, 400, 240 }, /* WQVGA */
+ { 0x0e, M5MOLS_RESTYPE_MONITOR, 352, 288 }, /* CIF */
+ { 0x13, M5MOLS_RESTYPE_MONITOR, 480, 360 },
+ { 0x15, M5MOLS_RESTYPE_MONITOR, 640, 360 }, /* qHD */
+ { 0x17, M5MOLS_RESTYPE_MONITOR, 640, 480 }, /* VGA */
+ { 0x18, M5MOLS_RESTYPE_MONITOR, 720, 480 },
+ { 0x1a, M5MOLS_RESTYPE_MONITOR, 800, 480 }, /* WVGA */
+ { 0x1f, M5MOLS_RESTYPE_MONITOR, 800, 600 }, /* SVGA */
+ { 0x21, M5MOLS_RESTYPE_MONITOR, 1280, 720 }, /* HD */
+ { 0x25, M5MOLS_RESTYPE_MONITOR, 1920, 1080 }, /* 1080p */
+ { 0x29, M5MOLS_RESTYPE_MONITOR, 3264, 2448 }, /* 2.63fps 8M */
+ { 0x39, M5MOLS_RESTYPE_MONITOR, 800, 602 }, /* AHS_MON debug */
+
+ { 0x02, M5MOLS_RESTYPE_CAPTURE, 320, 240 }, /* QVGA */
+ { 0x04, M5MOLS_RESTYPE_CAPTURE, 400, 240 }, /* WQVGA */
+ { 0x07, M5MOLS_RESTYPE_CAPTURE, 480, 360 },
+ { 0x08, M5MOLS_RESTYPE_CAPTURE, 640, 360 }, /* qHD */
+ { 0x09, M5MOLS_RESTYPE_CAPTURE, 640, 480 }, /* VGA */
+ { 0x0a, M5MOLS_RESTYPE_CAPTURE, 800, 480 }, /* WVGA */
+ { 0x10, M5MOLS_RESTYPE_CAPTURE, 1280, 720 }, /* HD */
+ { 0x14, M5MOLS_RESTYPE_CAPTURE, 1280, 960 }, /* 1M */
+ { 0x17, M5MOLS_RESTYPE_CAPTURE, 1600, 1200 }, /* 2M */
+ { 0x19, M5MOLS_RESTYPE_CAPTURE, 1920, 1080 }, /* Full-HD */
+ { 0x1a, M5MOLS_RESTYPE_CAPTURE, 2048, 1152 }, /* 3Mega */
+ { 0x1b, M5MOLS_RESTYPE_CAPTURE, 2048, 1536 },
+ { 0x1c, M5MOLS_RESTYPE_CAPTURE, 2560, 1440 }, /* 4Mega */
+ { 0x1d, M5MOLS_RESTYPE_CAPTURE, 2560, 1536 },
+ { 0x1f, M5MOLS_RESTYPE_CAPTURE, 2560, 1920 }, /* 5Mega */
+ { 0x21, M5MOLS_RESTYPE_CAPTURE, 3264, 1836 }, /* 6Mega */
+ { 0x22, M5MOLS_RESTYPE_CAPTURE, 3264, 1960 },
+ { 0x25, M5MOLS_RESTYPE_CAPTURE, 3264, 2448 }, /* 8Mega */
+};
+
+/**
+ * m5mols_swap_byte - an byte array to integer conversion function
+ * @size: size in bytes of I2C packet defined in the M-5MOLS datasheet
+ *
+ * Convert I2C data byte array with performing any required byte
+ * reordering to assure proper values for each data type, regardless
+ * of the architecture endianness.
+ */
+static u32 m5mols_swap_byte(u8 *data, u8 length)
+{
+ if (length == 1)
+ return *data;
+ else if (length == 2)
+ return be16_to_cpu(*((u16 *)data));
+ else
+ return be32_to_cpu(*((u32 *)data));
+}
+
+/**
+ * m5mols_read - I2C read function
+ * @reg: combination of size, category and command for the I2C packet
+ * @val: read value
+ */
+int m5mols_read(struct v4l2_subdev *sd, u32 reg, u32 *val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ u8 rbuf[M5MOLS_I2C_MAX_SIZE + 1];
+ u8 size = I2C_SIZE(reg);
+ u8 category = I2C_CATEGORY(reg);
+ u8 cmd = I2C_COMMAND(reg);
+ struct i2c_msg msg[2];
+ u8 wbuf[5];
+ int ret;
+
+ if (!client->adapter)
+ return -ENODEV;
+
+ if (size != 1 && size != 2 && size != 4) {
+ v4l2_err(sd, "Wrong data size\n");
+ return -EINVAL;
+ }
+
+ msg[0].addr = client->addr;
+ msg[0].flags = 0;
+ msg[0].len = 5;
+ msg[0].buf = wbuf;
+ wbuf[0] = 5;
+ wbuf[1] = M5MOLS_BYTE_READ;
+ wbuf[2] = category;
+ wbuf[3] = cmd;
+ wbuf[4] = size;
+
+ msg[1].addr = client->addr;
+ msg[1].flags = I2C_M_RD;
+ msg[1].len = size + 1;
+ msg[1].buf = rbuf;
+
+ /* minimum stabilization time */
+ usleep_range(200, 200);
+
+ ret = i2c_transfer(client->adapter, msg, 2);
+ if (ret < 0) {
+ v4l2_err(sd, "read failed: size:%d cat:%02x cmd:%02x. %d\n",
+ size, category, cmd, ret);
+ return ret;
+ }
+
+ *val = m5mols_swap_byte(&rbuf[1], size);
+
+ return 0;
+}
+
+/**
+ * m5mols_write - I2C command write function
+ * @reg: combination of size, category and command for the I2C packet
+ * @val: value to write
+ */
+int m5mols_write(struct v4l2_subdev *sd, u32 reg, u32 val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ u8 wbuf[M5MOLS_I2C_MAX_SIZE + 4];
+ u8 category = I2C_CATEGORY(reg);
+ u8 cmd = I2C_COMMAND(reg);
+ u8 size = I2C_SIZE(reg);
+ u32 *buf = (u32 *)&wbuf[4];
+ struct i2c_msg msg[1];
+ int ret;
+
+ if (!client->adapter)
+ return -ENODEV;
+
+ if (size != 1 && size != 2 && size != 4) {
+ v4l2_err(sd, "Wrong data size\n");
+ return -EINVAL;
+ }
+
+ msg->addr = client->addr;
+ msg->flags = 0;
+ msg->len = (u16)size + 4;
+ msg->buf = wbuf;
+ wbuf[0] = size + 4;
+ wbuf[1] = M5MOLS_BYTE_WRITE;
+ wbuf[2] = category;
+ wbuf[3] = cmd;
+
+ *buf = m5mols_swap_byte((u8 *)&val, size);
+
+ usleep_range(200, 200);
+
+ ret = i2c_transfer(client->adapter, msg, 1);
+ if (ret < 0) {
+ v4l2_err(sd, "write failed: size:%d cat:%02x cmd:%02x. %d\n",
+ size, category, cmd, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int m5mols_busy(struct v4l2_subdev *sd, u8 category, u8 cmd, u32 mask)
+{
+ u32 busy, i;
+ int ret;
+
+ for (i = 0; i < M5MOLS_I2C_CHECK_RETRY; i++) {
+ ret = m5mols_read(sd, I2C_REG(category, cmd, 1), &busy);
+ if (ret < 0)
+ return ret;
+ if ((busy & mask) == mask)
+ return 0;
+ }
+ return -EBUSY;
+}
+
+/**
+ * m5mols_enable_interrupt - Clear interrupt pending bits and unmask interrupts
+ *
+ * Before writing desired interrupt value the INT_FACTOR register should
+ * be read to clear pending interrupts.
+ */
+int m5mols_enable_interrupt(struct v4l2_subdev *sd, u32 reg)
+{
+ struct m5mols_info *info = to_m5mols(sd);
+ u32 mask = is_available_af(info) ? REG_INT_AF : 0;
+ u32 dummy;
+ int ret;
+
+ ret = m5mols_read(sd, SYSTEM_INT_FACTOR, &dummy);
+ if (!ret)
+ ret = m5mols_write(sd, SYSTEM_INT_ENABLE, reg & ~mask);
+ return ret;
+}
+
+/**
+ * m5mols_reg_mode - Write the mode and check busy status
+ *
+ * It always accompanies a little delay changing the M-5MOLS mode, so it is
+ * needed checking current busy status to guarantee right mode.
+ */
+static int m5mols_reg_mode(struct v4l2_subdev *sd, u32 mode)
+{
+ int ret = m5mols_write(sd, SYSTEM_SYSMODE, mode);
+
+ return ret ? ret : m5mols_busy(sd, CAT_SYSTEM, CAT0_SYSMODE, mode);
+}
+
+/**
+ * m5mols_mode - manage the M-5MOLS's mode
+ * @mode: the required operation mode
+ *
+ * The commands of M-5MOLS are grouped into specific modes. Each functionality
+ * can be guaranteed only when the sensor is operating in mode which which
+ * a command belongs to.
+ */
+int m5mols_mode(struct m5mols_info *info, u32 mode)
+{
+ struct v4l2_subdev *sd = &info->sd;
+ int ret = -EINVAL;
+ u32 reg;
+
+ if (mode < REG_PARAMETER && mode > REG_CAPTURE)
+ return ret;
+
+ ret = m5mols_read(sd, SYSTEM_SYSMODE, &reg);
+ if ((!ret && reg == mode) || ret)
+ return ret;
+
+ switch (reg) {
+ case REG_PARAMETER:
+ ret = m5mols_reg_mode(sd, REG_MONITOR);
+ if (!ret && mode == REG_MONITOR)
+ break;
+ if (!ret)
+ ret = m5mols_reg_mode(sd, REG_CAPTURE);
+ break;
+
+ case REG_MONITOR:
+ if (mode == REG_PARAMETER) {
+ ret = m5mols_reg_mode(sd, REG_PARAMETER);
+ break;
+ }
+
+ ret = m5mols_reg_mode(sd, REG_CAPTURE);
+ break;
+
+ case REG_CAPTURE:
+ ret = m5mols_reg_mode(sd, REG_MONITOR);
+ if (!ret && mode == REG_MONITOR)
+ break;
+ if (!ret)
+ ret = m5mols_reg_mode(sd, REG_PARAMETER);
+ break;
+
+ default:
+ v4l2_warn(sd, "Wrong mode: %d\n", mode);
+ }
+
+ if (!ret)
+ info->mode = mode;
+
+ return ret;
+}
+
+/**
+ * m5mols_get_version - retrieve full revisions information of M-5MOLS
+ *
+ * The version information includes revisions of hardware and firmware,
+ * AutoFocus alghorithm version and the version string.
+ */
+static int m5mols_get_version(struct v4l2_subdev *sd)
+{
+ struct m5mols_info *info = to_m5mols(sd);
+ union {
+ struct m5mols_version ver;
+ u8 bytes[VERSION_SIZE];
+ } version;
+ u32 *value;
+ u8 cmd = CAT0_VER_CUSTOMER;
+ int ret;
+
+ do {
+ value = (u32 *)&version.bytes[cmd];
+ ret = m5mols_read(sd, SYSTEM_CMD(cmd), value);
+ if (ret)
+ return ret;
+ } while (cmd++ != CAT0_VER_AWB);
+
+ do {
+ value = (u32 *)&version.bytes[cmd];
+ ret = m5mols_read(sd, SYSTEM_VER_STRING, value);
+ if (ret)
+ return ret;
+ if (cmd >= VERSION_SIZE - 1)
+ return -EINVAL;
+ } while (version.bytes[cmd++]);
+
+ value = (u32 *)&version.bytes[cmd];
+ ret = m5mols_read(sd, AF_VERSION, value);
+ if (ret)
+ return ret;
+
+ /* store version information swapped for being readable */
+ info->ver = version.ver;
+ info->ver.fw = be16_to_cpu(info->ver.fw);
+ info->ver.hw = be16_to_cpu(info->ver.hw);
+ info->ver.param = be16_to_cpu(info->ver.param);
+ info->ver.awb = be16_to_cpu(info->ver.awb);
+
+ v4l2_info(sd, "Manufacturer\t[%s]\n",
+ is_manufacturer(info, REG_SAMSUNG_ELECTRO) ?
+ "Samsung Electro-Machanics" :
+ is_manufacturer(info, REG_SAMSUNG_OPTICS) ?
+ "Samsung Fiber-Optics" :
+ is_manufacturer(info, REG_SAMSUNG_TECHWIN) ?
+ "Samsung Techwin" : "None");
+ v4l2_info(sd, "Customer/Project\t[0x%02x/0x%02x]\n",
+ info->ver.customer, info->ver.project);
+
+ if (!is_available_af(info))
+ v4l2_info(sd, "No support Auto Focus on this firmware\n");
+
+ return ret;
+}
+
+/**
+ * __find_restype - Lookup M-5MOLS resolution type according to pixel code
+ * @code: pixel code
+ */
+static enum m5mols_restype __find_restype(enum v4l2_mbus_pixelcode code)
+{
+ enum m5mols_restype type = M5MOLS_RESTYPE_MONITOR;
+
+ do {
+ if (code == m5mols_default_ffmt[type].code)
+ return type;
+ } while (type++ != SIZE_DEFAULT_FFMT);
+
+ return 0;
+}
+
+/**
+ * __find_resolution - Lookup preset and type of M-5MOLS's resolution
+ * @mf: pixel format to find/negotiate the resolution preset for
+ * @type: M-5MOLS resolution type
+ * @resolution: M-5MOLS resolution preset register value
+ *
+ * Find nearest resolution matching resolution preset and adjust mf
+ * to supported values.
+ */
+static int __find_resolution(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf,
+ enum m5mols_restype *type,
+ u32 *resolution)
+{
+ const struct m5mols_resolution *fsize = &m5mols_reg_res[0];
+ const struct m5mols_resolution *match = NULL;
+ enum m5mols_restype stype = __find_restype(mf->code);
+ int i = ARRAY_SIZE(m5mols_reg_res);
+ unsigned int min_err = ~0;
+
+ while (i--) {
+ int err;
+ if (stype == fsize->type) {
+ err = abs(fsize->width - mf->width)
+ + abs(fsize->height - mf->height);
+
+ if (err < min_err) {
+ min_err = err;
+ match = fsize;
+ }
+ }
+ fsize++;
+ }
+ if (match) {
+ mf->width = match->width;
+ mf->height = match->height;
+ *resolution = match->reg;
+ *type = stype;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static struct v4l2_mbus_framefmt *__find_format(struct m5mols_info *info,
+ struct v4l2_subdev_fh *fh,
+ enum v4l2_subdev_format_whence which,
+ enum m5mols_restype type)
+{
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return fh ? v4l2_subdev_get_try_format(fh, 0) : NULL;
+
+ return &info->ffmt[type];
+}
+
+static int m5mols_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct m5mols_info *info = to_m5mols(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ if (fmt->pad != 0)
+ return -EINVAL;
+
+ format = __find_format(info, fh, fmt->which, info->res_type);
+ if (!format)
+ return -EINVAL;
+
+ fmt->format = *format;
+ return 0;
+}
+
+static int m5mols_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct m5mols_info *info = to_m5mols(sd);
+ struct v4l2_mbus_framefmt *format = &fmt->format;
+ struct v4l2_mbus_framefmt *sfmt;
+ enum m5mols_restype type;
+ u32 resolution = 0;
+ int ret;
+
+ if (fmt->pad != 0)
+ return -EINVAL;
+
+ ret = __find_resolution(sd, format, &type, &resolution);
+ if (ret < 0)
+ return ret;
+
+ sfmt = __find_format(info, fh, fmt->which, type);
+ if (!sfmt)
+ return 0;
+
+ *sfmt = m5mols_default_ffmt[type];
+ sfmt->width = format->width;
+ sfmt->height = format->height;
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+ info->resolution = resolution;
+ info->code = format->code;
+ info->res_type = type;
+ }
+
+ return 0;
+}
+
+static int m5mols_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (!code || code->index >= SIZE_DEFAULT_FFMT)
+ return -EINVAL;
+
+ code->code = m5mols_default_ffmt[code->index].code;
+
+ return 0;
+}
+
+static struct v4l2_subdev_pad_ops m5mols_pad_ops = {
+ .enum_mbus_code = m5mols_enum_mbus_code,
+ .get_fmt = m5mols_get_fmt,
+ .set_fmt = m5mols_set_fmt,
+};
+
+/**
+ * m5mols_sync_controls - Apply default scene mode and the current controls
+ *
+ * This is used only streaming for syncing between v4l2_ctrl framework and
+ * m5mols's controls. First, do the scenemode to the sensor, then call
+ * v4l2_ctrl_handler_setup. It can be same between some commands and
+ * the scenemode's in the default v4l2_ctrls. But, such commands of control
+ * should be prior to the scenemode's one.
+ */
+int m5mols_sync_controls(struct m5mols_info *info)
+{
+ int ret = -EINVAL;
+
+ if (!is_ctrl_synced(info)) {
+ ret = m5mols_do_scenemode(info, REG_SCENE_NORMAL);
+ if (ret)
+ return ret;
+
+ v4l2_ctrl_handler_setup(&info->handle);
+ info->ctrl_sync = true;
+ }
+
+ return ret;
+}
+
+/**
+ * m5mols_start_monitor - Start the monitor mode
+ *
+ * Before applying the controls setup the resolution and frame rate
+ * in PARAMETER mode, and then switch over to MONITOR mode.
+ */
+static int m5mols_start_monitor(struct m5mols_info *info)
+{
+ struct v4l2_subdev *sd = &info->sd;
+ int ret;
+
+ ret = m5mols_mode(info, REG_PARAMETER);
+ if (!ret)
+ ret = m5mols_write(sd, PARM_MON_SIZE, info->resolution);
+ if (!ret)
+ ret = m5mols_write(sd, PARM_MON_FPS, REG_FPS_30);
+ if (!ret)
+ ret = m5mols_mode(info, REG_MONITOR);
+ if (!ret)
+ ret = m5mols_sync_controls(info);
+
+ return ret;
+}
+
+static int m5mols_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct m5mols_info *info = to_m5mols(sd);
+
+ if (enable) {
+ int ret = -EINVAL;
+
+ if (is_code(info->code, M5MOLS_RESTYPE_MONITOR))
+ ret = m5mols_start_monitor(info);
+ if (is_code(info->code, M5MOLS_RESTYPE_CAPTURE))
+ ret = m5mols_start_capture(info);
+
+ return ret;
+ }
+
+ return m5mols_mode(info, REG_PARAMETER);
+}
+
+static const struct v4l2_subdev_video_ops m5mols_video_ops = {
+ .s_stream = m5mols_s_stream,
+};
+
+static int m5mols_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct v4l2_subdev *sd = to_sd(ctrl);
+ struct m5mols_info *info = to_m5mols(sd);
+ int ret;
+
+ info->mode_save = info->mode;
+
+ ret = m5mols_mode(info, REG_PARAMETER);
+ if (!ret)
+ ret = m5mols_set_ctrl(ctrl);
+ if (!ret)
+ ret = m5mols_mode(info, info->mode_save);
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops m5mols_ctrl_ops = {
+ .s_ctrl = m5mols_s_ctrl,
+};
+
+static int m5mols_sensor_power(struct m5mols_info *info, bool enable)
+{
+ struct v4l2_subdev *sd = &info->sd;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ const struct m5mols_platform_data *pdata = info->pdata;
+ int ret;
+
+ if (enable) {
+ if (is_powered(info))
+ return 0;
+
+ if (info->set_power) {
+ ret = info->set_power(&client->dev, 1);
+ if (ret)
+ return ret;
+ }
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(supplies), supplies);
+ if (ret) {
+ info->set_power(&client->dev, 0);
+ return ret;
+ }
+
+ gpio_set_value(pdata->gpio_reset, !pdata->reset_polarity);
+ usleep_range(1000, 1000);
+ info->power = true;
+
+ return ret;
+ }
+
+ if (!is_powered(info))
+ return 0;
+
+ ret = regulator_bulk_disable(ARRAY_SIZE(supplies), supplies);
+ if (ret)
+ return ret;
+
+ if (info->set_power)
+ info->set_power(&client->dev, 0);
+
+ gpio_set_value(pdata->gpio_reset, pdata->reset_polarity);
+ usleep_range(1000, 1000);
+ info->power = false;
+
+ return ret;
+}
+
+/* m5mols_update_fw - optional firmware update routine */
+int __attribute__ ((weak)) m5mols_update_fw(struct v4l2_subdev *sd,
+ int (*set_power)(struct m5mols_info *, bool))
+{
+ return 0;
+}
+
+/**
+ * m5mols_sensor_armboot - Booting M-5MOLS internal ARM core.
+ *
+ * Booting internal ARM core makes the M-5MOLS is ready for getting commands
+ * with I2C. It's the first thing to be done after it powered up. It must wait
+ * at least 520ms recommended by M-5MOLS datasheet, after executing arm booting.
+ */
+static int m5mols_sensor_armboot(struct v4l2_subdev *sd)
+{
+ int ret;
+
+ ret = m5mols_write(sd, FLASH_CAM_START, REG_START_ARM_BOOT);
+ if (ret < 0)
+ return ret;
+
+ msleep(520);
+
+ ret = m5mols_get_version(sd);
+ if (!ret)
+ ret = m5mols_update_fw(sd, m5mols_sensor_power);
+ if (ret)
+ return ret;
+
+ v4l2_dbg(1, m5mols_debug, sd, "Success ARM Booting\n");
+
+ ret = m5mols_write(sd, PARM_INTERFACE, REG_INTERFACE_MIPI);
+ if (!ret)
+ ret = m5mols_enable_interrupt(sd, REG_INT_AF);
+
+ return ret;
+}
+
+static int m5mols_init_controls(struct m5mols_info *info)
+{
+ struct v4l2_subdev *sd = &info->sd;
+ u16 max_exposure;
+ u16 step_zoom;
+ int ret;
+
+ /* Determine value's range & step of controls for various FW version */
+ ret = m5mols_read(sd, AE_MAX_GAIN_MON, (u32 *)&max_exposure);
+ if (!ret)
+ step_zoom = is_manufacturer(info, REG_SAMSUNG_OPTICS) ? 31 : 1;
+ if (ret)
+ return ret;
+
+ v4l2_ctrl_handler_init(&info->handle, 6);
+ info->autowb = v4l2_ctrl_new_std(&info->handle,
+ &m5mols_ctrl_ops, V4L2_CID_AUTO_WHITE_BALANCE,
+ 0, 1, 1, 0);
+ info->saturation = v4l2_ctrl_new_std(&info->handle,
+ &m5mols_ctrl_ops, V4L2_CID_SATURATION,
+ 1, 5, 1, 3);
+ info->zoom = v4l2_ctrl_new_std(&info->handle,
+ &m5mols_ctrl_ops, V4L2_CID_ZOOM_ABSOLUTE,
+ 1, 70, step_zoom, 1);
+ info->exposure = v4l2_ctrl_new_std(&info->handle,
+ &m5mols_ctrl_ops, V4L2_CID_EXPOSURE,
+ 0, max_exposure, 1, (int)max_exposure/2);
+ info->colorfx = v4l2_ctrl_new_std_menu(&info->handle,
+ &m5mols_ctrl_ops, V4L2_CID_COLORFX,
+ 4, (1 << V4L2_COLORFX_BW), V4L2_COLORFX_NONE);
+ info->autoexposure = v4l2_ctrl_new_std_menu(&info->handle,
+ &m5mols_ctrl_ops, V4L2_CID_EXPOSURE_AUTO,
+ 1, 0, V4L2_EXPOSURE_MANUAL);
+
+ sd->ctrl_handler = &info->handle;
+ if (info->handle.error) {
+ v4l2_err(sd, "Failed to initialize controls: %d\n", ret);
+ v4l2_ctrl_handler_free(&info->handle);
+ return info->handle.error;
+ }
+
+ v4l2_ctrl_cluster(2, &info->autoexposure);
+
+ return 0;
+}
+
+/**
+ * m5mols_s_power - Main sensor power control function
+ *
+ * To prevent breaking the lens when the sensor is powered off the Soft-Landing
+ * algorithm is called where available. The Soft-Landing algorithm availability
+ * dependends on the firmware provider.
+ */
+static int m5mols_s_power(struct v4l2_subdev *sd, int on)
+{
+ struct m5mols_info *info = to_m5mols(sd);
+ int ret;
+
+ if (on) {
+ ret = m5mols_sensor_power(info, true);
+ if (!ret)
+ ret = m5mols_sensor_armboot(sd);
+ if (!ret)
+ ret = m5mols_init_controls(info);
+ if (ret)
+ return ret;
+
+ info->ffmt[M5MOLS_RESTYPE_MONITOR] =
+ m5mols_default_ffmt[M5MOLS_RESTYPE_MONITOR];
+ info->ffmt[M5MOLS_RESTYPE_CAPTURE] =
+ m5mols_default_ffmt[M5MOLS_RESTYPE_CAPTURE];
+ return ret;
+ }
+
+ if (is_manufacturer(info, REG_SAMSUNG_TECHWIN)) {
+ ret = m5mols_mode(info, REG_MONITOR);
+ if (!ret)
+ ret = m5mols_write(sd, AF_EXECUTE, REG_AF_STOP);
+ if (!ret)
+ ret = m5mols_write(sd, AF_MODE, REG_AF_POWEROFF);
+ if (!ret)
+ ret = m5mols_busy(sd, CAT_SYSTEM, CAT0_STATUS,
+ REG_AF_IDLE);
+ if (!ret)
+ v4l2_info(sd, "Success soft-landing lens\n");
+ }
+
+ ret = m5mols_sensor_power(info, false);
+ if (!ret) {
+ v4l2_ctrl_handler_free(&info->handle);
+ info->ctrl_sync = false;
+ }
+
+ return ret;
+}
+
+static int m5mols_log_status(struct v4l2_subdev *sd)
+{
+ struct m5mols_info *info = to_m5mols(sd);
+
+ v4l2_ctrl_handler_log_status(&info->handle, sd->name);
+
+ return 0;
+}
+
+static const struct v4l2_subdev_core_ops m5mols_core_ops = {
+ .s_power = m5mols_s_power,
+ .g_ctrl = v4l2_subdev_g_ctrl,
+ .s_ctrl = v4l2_subdev_s_ctrl,
+ .queryctrl = v4l2_subdev_queryctrl,
+ .querymenu = v4l2_subdev_querymenu,
+ .g_ext_ctrls = v4l2_subdev_g_ext_ctrls,
+ .try_ext_ctrls = v4l2_subdev_try_ext_ctrls,
+ .s_ext_ctrls = v4l2_subdev_s_ext_ctrls,
+ .log_status = m5mols_log_status,
+};
+
+static const struct v4l2_subdev_ops m5mols_ops = {
+ .core = &m5mols_core_ops,
+ .pad = &m5mols_pad_ops,
+ .video = &m5mols_video_ops,
+};
+
+static void m5mols_irq_work(struct work_struct *work)
+{
+ struct m5mols_info *info =
+ container_of(work, struct m5mols_info, work_irq);
+ struct v4l2_subdev *sd = &info->sd;
+ u32 reg;
+ int ret;
+
+ if (!is_powered(info) ||
+ m5mols_read(sd, SYSTEM_INT_FACTOR, &info->interrupt))
+ return;
+
+ switch (info->interrupt & REG_INT_MASK) {
+ case REG_INT_AF:
+ if (!is_available_af(info))
+ break;
+ ret = m5mols_read(sd, AF_STATUS, &reg);
+ v4l2_dbg(2, m5mols_debug, sd, "AF %s\n",
+ reg == REG_AF_FAIL ? "Failed" :
+ reg == REG_AF_SUCCESS ? "Success" :
+ reg == REG_AF_IDLE ? "Idle" : "Busy");
+ break;
+ case REG_INT_CAPTURE:
+ if (!test_and_set_bit(ST_CAPT_IRQ, &info->flags))
+ wake_up_interruptible(&info->irq_waitq);
+
+ v4l2_dbg(2, m5mols_debug, sd, "CAPTURE\n");
+ break;
+ default:
+ v4l2_dbg(2, m5mols_debug, sd, "Undefined: %02x\n", reg);
+ break;
+ };
+}
+
+static irqreturn_t m5mols_irq_handler(int irq, void *data)
+{
+ struct v4l2_subdev *sd = data;
+ struct m5mols_info *info = to_m5mols(sd);
+
+ schedule_work(&info->work_irq);
+
+ return IRQ_HANDLED;
+}
+
+static int __devinit m5mols_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ const struct m5mols_platform_data *pdata = client->dev.platform_data;
+ struct m5mols_info *info;
+ struct v4l2_subdev *sd;
+ int ret;
+
+ if (pdata == NULL) {
+ dev_err(&client->dev, "No platform data\n");
+ return -EINVAL;
+ }
+
+ if (!gpio_is_valid(pdata->gpio_reset)) {
+ dev_err(&client->dev, "No valid RESET GPIO specified\n");
+ return -EINVAL;
+ }
+
+ if (!pdata->irq) {
+ dev_err(&client->dev, "Interrupt not assigned\n");
+ return -EINVAL;
+ }
+
+ info = kzalloc(sizeof(struct m5mols_info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->pdata = pdata;
+ info->set_power = pdata->set_power;
+
+ ret = gpio_request(pdata->gpio_reset, "M5MOLS_NRST");
+ if (ret) {
+ dev_err(&client->dev, "Failed to request gpio: %d\n", ret);
+ goto out_free;
+ }
+ gpio_direction_output(pdata->gpio_reset, pdata->reset_polarity);
+
+ ret = regulator_bulk_get(&client->dev, ARRAY_SIZE(supplies), supplies);
+ if (ret) {
+ dev_err(&client->dev, "Failed to get regulators: %d\n", ret);
+ goto out_gpio;
+ }
+
+ sd = &info->sd;
+ strlcpy(sd->name, MODULE_NAME, sizeof(sd->name));
+ v4l2_i2c_subdev_init(sd, client, &m5mols_ops);
+
+ info->pad.flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_init(&sd->entity, 1, &info->pad, 0);
+ if (ret < 0)
+ goto out_reg;
+ sd->entity.type = MEDIA_ENT_T_V4L2_SUBDEV_SENSOR;
+
+ init_waitqueue_head(&info->irq_waitq);
+ INIT_WORK(&info->work_irq, m5mols_irq_work);
+ ret = request_irq(pdata->irq, m5mols_irq_handler,
+ IRQF_TRIGGER_RISING, MODULE_NAME, sd);
+ if (ret) {
+ dev_err(&client->dev, "Interrupt request failed: %d\n", ret);
+ goto out_me;
+ }
+ info->res_type = M5MOLS_RESTYPE_MONITOR;
+ return 0;
+out_me:
+ media_entity_cleanup(&sd->entity);
+out_reg:
+ regulator_bulk_free(ARRAY_SIZE(supplies), supplies);
+out_gpio:
+ gpio_free(pdata->gpio_reset);
+out_free:
+ kfree(info);
+ return ret;
+}
+
+static int __devexit m5mols_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct m5mols_info *info = to_m5mols(sd);
+
+ v4l2_device_unregister_subdev(sd);
+ free_irq(info->pdata->irq, sd);
+
+ regulator_bulk_free(ARRAY_SIZE(supplies), supplies);
+ gpio_free(info->pdata->gpio_reset);
+ media_entity_cleanup(&sd->entity);
+ kfree(info);
+ return 0;
+}
+
+static const struct i2c_device_id m5mols_id[] = {
+ { MODULE_NAME, 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, m5mols_id);
+
+static struct i2c_driver m5mols_i2c_driver = {
+ .driver = {
+ .name = MODULE_NAME,
+ },
+ .probe = m5mols_probe,
+ .remove = __devexit_p(m5mols_remove),
+ .id_table = m5mols_id,
+};
+
+static int __init m5mols_mod_init(void)
+{
+ return i2c_add_driver(&m5mols_i2c_driver);
+}
+
+static void __exit m5mols_mod_exit(void)
+{
+ i2c_del_driver(&m5mols_i2c_driver);
+}
+
+module_init(m5mols_mod_init);
+module_exit(m5mols_mod_exit);
+
+MODULE_AUTHOR("HeungJun Kim <riverful.kim@samsung.com>");
+MODULE_AUTHOR("Dongsoo Kim <dongsoo45.kim@samsung.com>");
+MODULE_DESCRIPTION("Fujitsu M-5MOLS 8M Pixel camera driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/video/m5mols/m5mols_reg.h b/drivers/media/video/m5mols/m5mols_reg.h
new file mode 100644
index 000000000000..b83e36fc6ac6
--- /dev/null
+++ b/drivers/media/video/m5mols/m5mols_reg.h
@@ -0,0 +1,399 @@
+/*
+ * Register map for M-5MOLS 8M Pixel camera sensor with ISP
+ *
+ * Copyright (C) 2011 Samsung Electronics Co., Ltd.
+ * Author: HeungJun Kim, riverful.kim@samsung.com
+ *
+ * Copyright (C) 2009 Samsung Electronics Co., Ltd.
+ * Author: Dongsoo Nathaniel Kim, dongsoo45.kim@samsung.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef M5MOLS_REG_H
+#define M5MOLS_REG_H
+
+#define M5MOLS_I2C_MAX_SIZE 4
+#define M5MOLS_BYTE_READ 0x01
+#define M5MOLS_BYTE_WRITE 0x02
+
+#define I2C_CATEGORY(__cat) ((__cat >> 16) & 0xff)
+#define I2C_COMMAND(__comm) ((__comm >> 8) & 0xff)
+#define I2C_SIZE(__reg_s) ((__reg_s) & 0xff)
+#define I2C_REG(__cat, __cmd, __reg_s) ((__cat << 16) | (__cmd << 8) | __reg_s)
+
+/*
+ * Category section register
+ *
+ * The category means set including relevant command of M-5MOLS.
+ */
+#define CAT_SYSTEM 0x00
+#define CAT_PARAM 0x01
+#define CAT_MONITOR 0x02
+#define CAT_AE 0x03
+#define CAT_WB 0x06
+#define CAT_EXIF 0x07
+#define CAT_FD 0x09
+#define CAT_LENS 0x0a
+#define CAT_CAPT_PARM 0x0b
+#define CAT_CAPT_CTRL 0x0c
+#define CAT_FLASH 0x0f /* related to FW, revisions, booting */
+
+/*
+ * Category 0 - SYSTEM mode
+ *
+ * The SYSTEM mode in the M-5MOLS means area available to handle with the whole
+ * & all-round system of sensor. It deals with version/interrupt/setting mode &
+ * even sensor's status. Especially, the M-5MOLS sensor with ISP varies by
+ * packaging & manufacturer, even the customer and project code. And the
+ * function details may vary among them. The version information helps to
+ * determine what methods shall be used in the driver.
+ *
+ * There is many registers between customer version address and awb one. For
+ * more specific contents, see definition if file m5mols.h.
+ */
+#define CAT0_VER_CUSTOMER 0x00 /* customer version */
+#define CAT0_VER_AWB 0x09 /* Auto WB version */
+#define CAT0_VER_STRING 0x0a /* string including M-5MOLS */
+#define CAT0_SYSMODE 0x0b /* SYSTEM mode register */
+#define CAT0_STATUS 0x0c /* SYSTEM mode status register */
+#define CAT0_INT_FACTOR 0x10 /* interrupt pending register */
+#define CAT0_INT_ENABLE 0x11 /* interrupt enable register */
+
+#define SYSTEM_SYSMODE I2C_REG(CAT_SYSTEM, CAT0_SYSMODE, 1)
+#define REG_SYSINIT 0x00 /* SYSTEM mode */
+#define REG_PARAMETER 0x01 /* PARAMETER mode */
+#define REG_MONITOR 0x02 /* MONITOR mode */
+#define REG_CAPTURE 0x03 /* CAPTURE mode */
+
+#define SYSTEM_CMD(__cmd) I2C_REG(CAT_SYSTEM, cmd, 1)
+#define SYSTEM_VER_STRING I2C_REG(CAT_SYSTEM, CAT0_VER_STRING, 1)
+#define REG_SAMSUNG_ELECTRO "SE" /* Samsung Electro-Mechanics */
+#define REG_SAMSUNG_OPTICS "OP" /* Samsung Fiber-Optics */
+#define REG_SAMSUNG_TECHWIN "TB" /* Samsung Techwin */
+
+#define SYSTEM_INT_FACTOR I2C_REG(CAT_SYSTEM, CAT0_INT_FACTOR, 1)
+#define SYSTEM_INT_ENABLE I2C_REG(CAT_SYSTEM, CAT0_INT_ENABLE, 1)
+#define REG_INT_MODE (1 << 0)
+#define REG_INT_AF (1 << 1)
+#define REG_INT_ZOOM (1 << 2)
+#define REG_INT_CAPTURE (1 << 3)
+#define REG_INT_FRAMESYNC (1 << 4)
+#define REG_INT_FD (1 << 5)
+#define REG_INT_LENS_INIT (1 << 6)
+#define REG_INT_SOUND (1 << 7)
+#define REG_INT_MASK 0x0f
+
+/*
+ * category 1 - PARAMETER mode
+ *
+ * This category supports function of camera features of M-5MOLS. It means we
+ * can handle with preview(MONITOR) resolution size/frame per second/interface
+ * between the sensor and the Application Processor/even the image effect.
+ */
+#define CAT1_DATA_INTERFACE 0x00 /* interface between sensor and AP */
+#define CAT1_MONITOR_SIZE 0x01 /* resolution at the MONITOR mode */
+#define CAT1_MONITOR_FPS 0x02 /* frame per second at this mode */
+#define CAT1_EFFECT 0x0b /* image effects */
+
+#define PARM_MON_SIZE I2C_REG(CAT_PARAM, CAT1_MONITOR_SIZE, 1)
+
+#define PARM_MON_FPS I2C_REG(CAT_PARAM, CAT1_MONITOR_FPS, 1)
+#define REG_FPS_30 0x02
+
+#define PARM_INTERFACE I2C_REG(CAT_PARAM, CAT1_DATA_INTERFACE, 1)
+#define REG_INTERFACE_MIPI 0x02
+
+#define PARM_EFFECT I2C_REG(CAT_PARAM, CAT1_EFFECT, 1)
+#define REG_EFFECT_OFF 0x00
+#define REG_EFFECT_NEGA 0x01
+#define REG_EFFECT_EMBOSS 0x06
+#define REG_EFFECT_OUTLINE 0x07
+#define REG_EFFECT_WATERCOLOR 0x08
+
+/*
+ * Category 2 - MONITOR mode
+ *
+ * The MONITOR mode is same as preview mode as we said. The M-5MOLS has another
+ * mode named "Preview", but this preview mode is used at the case specific
+ * vider-recording mode. This mmode supports only YUYV format. On the other
+ * hand, the JPEG & RAW formats is supports by CAPTURE mode. And, there are
+ * another options like zoom/color effect(different with effect in PARAMETER
+ * mode)/anti hand shaking algorithm.
+ */
+#define CAT2_ZOOM 0x01 /* set the zoom position & execute */
+#define CAT2_ZOOM_STEP 0x03 /* set the zoom step */
+#define CAT2_CFIXB 0x09 /* CB value for color effect */
+#define CAT2_CFIXR 0x0a /* CR value for color effect */
+#define CAT2_COLOR_EFFECT 0x0b /* set on/off of color effect */
+#define CAT2_CHROMA_LVL 0x0f /* set chroma level */
+#define CAT2_CHROMA_EN 0x10 /* set on/off of choroma */
+#define CAT2_EDGE_LVL 0x11 /* set sharpness level */
+#define CAT2_EDGE_EN 0x12 /* set on/off sharpness */
+#define CAT2_TONE_CTL 0x25 /* set tone color(contrast) */
+
+#define MON_ZOOM I2C_REG(CAT_MONITOR, CAT2_ZOOM, 1)
+
+#define MON_CFIXR I2C_REG(CAT_MONITOR, CAT2_CFIXR, 1)
+#define MON_CFIXB I2C_REG(CAT_MONITOR, CAT2_CFIXB, 1)
+#define REG_CFIXB_SEPIA 0xd8
+#define REG_CFIXR_SEPIA 0x18
+
+#define MON_EFFECT I2C_REG(CAT_MONITOR, CAT2_COLOR_EFFECT, 1)
+#define REG_COLOR_EFFECT_OFF 0x00
+#define REG_COLOR_EFFECT_ON 0x01
+
+#define MON_CHROMA_EN I2C_REG(CAT_MONITOR, CAT2_CHROMA_EN, 1)
+#define MON_CHROMA_LVL I2C_REG(CAT_MONITOR, CAT2_CHROMA_LVL, 1)
+#define REG_CHROMA_OFF 0x00
+#define REG_CHROMA_ON 0x01
+
+#define MON_EDGE_EN I2C_REG(CAT_MONITOR, CAT2_EDGE_EN, 1)
+#define MON_EDGE_LVL I2C_REG(CAT_MONITOR, CAT2_EDGE_LVL, 1)
+#define REG_EDGE_OFF 0x00
+#define REG_EDGE_ON 0x01
+
+#define MON_TONE_CTL I2C_REG(CAT_MONITOR, CAT2_TONE_CTL, 1)
+
+/*
+ * Category 3 - Auto Exposure
+ *
+ * The M-5MOLS exposure capbility is detailed as which is similar to digital
+ * camera. This category supports AE locking/various AE mode(range of exposure)
+ * /ISO/flickering/EV bias/shutter/meteoring, and anything else. And the
+ * maximum/minimum exposure gain value depending on M-5MOLS firmware, may be
+ * different. So, this category also provide getting the max/min values. And,
+ * each MONITOR and CAPTURE mode has each gain/shutter/max exposure values.
+ */
+#define CAT3_AE_LOCK 0x00 /* locking Auto exposure */
+#define CAT3_AE_MODE 0x01 /* set AE mode, mode means range */
+#define CAT3_ISO 0x05 /* set ISO */
+#define CAT3_EV_PRESET_MONITOR 0x0a /* EV(scenemode) preset for MONITOR */
+#define CAT3_EV_PRESET_CAPTURE 0x0b /* EV(scenemode) preset for CAPTURE */
+#define CAT3_MANUAL_GAIN_MON 0x12 /* meteoring value for the MONITOR */
+#define CAT3_MAX_GAIN_MON 0x1a /* max gain value for the MONITOR */
+#define CAT3_MANUAL_GAIN_CAP 0x26 /* meteoring value for the CAPTURE */
+#define CAT3_AE_INDEX 0x38 /* AE index */
+
+#define AE_LOCK I2C_REG(CAT_AE, CAT3_AE_LOCK, 1)
+#define REG_AE_UNLOCK 0x00
+#define REG_AE_LOCK 0x01
+
+#define AE_MODE I2C_REG(CAT_AE, CAT3_AE_MODE, 1)
+#define REG_AE_OFF 0x00 /* AE off */
+#define REG_AE_ALL 0x01 /* calc AE in all block integral */
+#define REG_AE_CENTER 0x03 /* calc AE in center weighted */
+#define REG_AE_SPOT 0x06 /* calc AE in specific spot */
+
+#define AE_ISO I2C_REG(CAT_AE, CAT3_ISO, 1)
+#define REG_ISO_AUTO 0x00
+#define REG_ISO_50 0x01
+#define REG_ISO_100 0x02
+#define REG_ISO_200 0x03
+#define REG_ISO_400 0x04
+#define REG_ISO_800 0x05
+
+#define AE_EV_PRESET_MONITOR I2C_REG(CAT_AE, CAT3_EV_PRESET_MONITOR, 1)
+#define AE_EV_PRESET_CAPTURE I2C_REG(CAT_AE, CAT3_EV_PRESET_CAPTURE, 1)
+#define REG_SCENE_NORMAL 0x00
+#define REG_SCENE_PORTRAIT 0x01
+#define REG_SCENE_LANDSCAPE 0x02
+#define REG_SCENE_SPORTS 0x03
+#define REG_SCENE_PARTY_INDOOR 0x04
+#define REG_SCENE_BEACH_SNOW 0x05
+#define REG_SCENE_SUNSET 0x06
+#define REG_SCENE_DAWN_DUSK 0x07
+#define REG_SCENE_FALL 0x08
+#define REG_SCENE_NIGHT 0x09
+#define REG_SCENE_AGAINST_LIGHT 0x0a
+#define REG_SCENE_FIRE 0x0b
+#define REG_SCENE_TEXT 0x0c
+#define REG_SCENE_CANDLE 0x0d
+
+#define AE_MAN_GAIN_MON I2C_REG(CAT_AE, CAT3_MANUAL_GAIN_MON, 2)
+#define AE_MAX_GAIN_MON I2C_REG(CAT_AE, CAT3_MAX_GAIN_MON, 2)
+#define AE_MAN_GAIN_CAP I2C_REG(CAT_AE, CAT3_MANUAL_GAIN_CAP, 2)
+
+#define AE_INDEX I2C_REG(CAT_AE, CAT3_AE_INDEX, 1)
+#define REG_AE_INDEX_20_NEG 0x00
+#define REG_AE_INDEX_15_NEG 0x01
+#define REG_AE_INDEX_10_NEG 0x02
+#define REG_AE_INDEX_05_NEG 0x03
+#define REG_AE_INDEX_00 0x04
+#define REG_AE_INDEX_05_POS 0x05
+#define REG_AE_INDEX_10_POS 0x06
+#define REG_AE_INDEX_15_POS 0x07
+#define REG_AE_INDEX_20_POS 0x08
+
+/*
+ * Category 6 - White Balance
+ *
+ * This category provide AWB locking/mode/preset/speed/gain bias, etc.
+ */
+#define CAT6_AWB_LOCK 0x00 /* locking Auto Whitebalance */
+#define CAT6_AWB_MODE 0x02 /* set Auto or Manual */
+#define CAT6_AWB_MANUAL 0x03 /* set Manual(preset) value */
+
+#define AWB_LOCK I2C_REG(CAT_WB, CAT6_AWB_LOCK, 1)
+#define REG_AWB_UNLOCK 0x00
+#define REG_AWB_LOCK 0x01
+
+#define AWB_MODE I2C_REG(CAT_WB, CAT6_AWB_MODE, 1)
+#define REG_AWB_AUTO 0x01 /* AWB off */
+#define REG_AWB_PRESET 0x02 /* AWB preset */
+
+#define AWB_MANUAL I2C_REG(CAT_WB, CAT6_AWB_MANUAL, 1)
+#define REG_AWB_INCANDESCENT 0x01
+#define REG_AWB_FLUORESCENT_1 0x02
+#define REG_AWB_FLUORESCENT_2 0x03
+#define REG_AWB_DAYLIGHT 0x04
+#define REG_AWB_CLOUDY 0x05
+#define REG_AWB_SHADE 0x06
+#define REG_AWB_HORIZON 0x07
+#define REG_AWB_LEDLIGHT 0x09
+
+/*
+ * Category 7 - EXIF information
+ */
+#define CAT7_INFO_EXPTIME_NU 0x00
+#define CAT7_INFO_EXPTIME_DE 0x04
+#define CAT7_INFO_TV_NU 0x08
+#define CAT7_INFO_TV_DE 0x0c
+#define CAT7_INFO_AV_NU 0x10
+#define CAT7_INFO_AV_DE 0x14
+#define CAT7_INFO_BV_NU 0x18
+#define CAT7_INFO_BV_DE 0x1c
+#define CAT7_INFO_EBV_NU 0x20
+#define CAT7_INFO_EBV_DE 0x24
+#define CAT7_INFO_ISO 0x28
+#define CAT7_INFO_FLASH 0x2a
+#define CAT7_INFO_SDR 0x2c
+#define CAT7_INFO_QVAL 0x2e
+
+#define EXIF_INFO_EXPTIME_NU I2C_REG(CAT_EXIF, CAT7_INFO_EXPTIME_NU, 4)
+#define EXIF_INFO_EXPTIME_DE I2C_REG(CAT_EXIF, CAT7_INFO_EXPTIME_DE, 4)
+#define EXIF_INFO_TV_NU I2C_REG(CAT_EXIF, CAT7_INFO_TV_NU, 4)
+#define EXIF_INFO_TV_DE I2C_REG(CAT_EXIF, CAT7_INFO_TV_DE, 4)
+#define EXIF_INFO_AV_NU I2C_REG(CAT_EXIF, CAT7_INFO_AV_NU, 4)
+#define EXIF_INFO_AV_DE I2C_REG(CAT_EXIF, CAT7_INFO_AV_DE, 4)
+#define EXIF_INFO_BV_NU I2C_REG(CAT_EXIF, CAT7_INFO_BV_NU, 4)
+#define EXIF_INFO_BV_DE I2C_REG(CAT_EXIF, CAT7_INFO_BV_DE, 4)
+#define EXIF_INFO_EBV_NU I2C_REG(CAT_EXIF, CAT7_INFO_EBV_NU, 4)
+#define EXIF_INFO_EBV_DE I2C_REG(CAT_EXIF, CAT7_INFO_EBV_DE, 4)
+#define EXIF_INFO_ISO I2C_REG(CAT_EXIF, CAT7_INFO_ISO, 2)
+#define EXIF_INFO_FLASH I2C_REG(CAT_EXIF, CAT7_INFO_FLASH, 2)
+#define EXIF_INFO_SDR I2C_REG(CAT_EXIF, CAT7_INFO_SDR, 2)
+#define EXIF_INFO_QVAL I2C_REG(CAT_EXIF, CAT7_INFO_QVAL, 2)
+
+/*
+ * Category 9 - Face Detection
+ */
+#define CAT9_FD_CTL 0x00
+
+#define FD_CTL I2C_REG(CAT_FD, CAT9_FD_CTL, 1)
+#define BIT_FD_EN 0
+#define BIT_FD_DRAW_FACE_FRAME 4
+#define BIT_FD_DRAW_SMILE_LVL 6
+#define REG_FD(shift) (1 << shift)
+#define REG_FD_OFF 0x0
+
+/*
+ * Category A - Lens Parameter
+ */
+#define CATA_AF_MODE 0x01
+#define CATA_AF_EXECUTE 0x02
+#define CATA_AF_STATUS 0x03
+#define CATA_AF_VERSION 0x0a
+
+#define AF_MODE I2C_REG(CAT_LENS, CATA_AF_MODE, 1)
+#define REG_AF_NORMAL 0x00 /* Normal AF, one time */
+#define REG_AF_MACRO 0x01 /* Macro AF, one time */
+#define REG_AF_POWEROFF 0x07
+
+#define AF_EXECUTE I2C_REG(CAT_LENS, CATA_AF_EXECUTE, 1)
+#define REG_AF_STOP 0x00
+#define REG_AF_EXE_AUTO 0x01
+#define REG_AF_EXE_CAF 0x02
+
+#define AF_STATUS I2C_REG(CAT_LENS, CATA_AF_STATUS, 1)
+#define REG_AF_FAIL 0x00
+#define REG_AF_SUCCESS 0x02
+#define REG_AF_IDLE 0x04
+#define REG_AF_BUSY 0x05
+
+#define AF_VERSION I2C_REG(CAT_LENS, CATA_AF_VERSION, 1)
+
+/*
+ * Category B - CAPTURE Parameter
+ */
+#define CATB_YUVOUT_MAIN 0x00
+#define CATB_MAIN_IMAGE_SIZE 0x01
+#define CATB_MCC_MODE 0x1d
+#define CATB_WDR_EN 0x2c
+#define CATB_LIGHT_CTRL 0x40
+#define CATB_FLASH_CTRL 0x41
+
+#define CAPP_YUVOUT_MAIN I2C_REG(CAT_CAPT_PARM, CATB_YUVOUT_MAIN, 1)
+#define REG_YUV422 0x00
+#define REG_BAYER10 0x05
+#define REG_BAYER8 0x06
+#define REG_JPEG 0x10
+
+#define CAPP_MAIN_IMAGE_SIZE I2C_REG(CAT_CAPT_PARM, CATB_MAIN_IMAGE_SIZE, 1)
+
+#define CAPP_MCC_MODE I2C_REG(CAT_CAPT_PARM, CATB_MCC_MODE, 1)
+#define REG_MCC_OFF 0x00
+#define REG_MCC_NORMAL 0x01
+
+#define CAPP_WDR_EN I2C_REG(CAT_CAPT_PARM, CATB_WDR_EN, 1)
+#define REG_WDR_OFF 0x00
+#define REG_WDR_ON 0x01
+#define REG_WDR_AUTO 0x02
+
+#define CAPP_LIGHT_CTRL I2C_REG(CAT_CAPT_PARM, CATB_LIGHT_CTRL, 1)
+#define REG_LIGHT_OFF 0x00
+#define REG_LIGHT_ON 0x01
+#define REG_LIGHT_AUTO 0x02
+
+#define CAPP_FLASH_CTRL I2C_REG(CAT_CAPT_PARM, CATB_FLASH_CTRL, 1)
+#define REG_FLASH_OFF 0x00
+#define REG_FLASH_ON 0x01
+#define REG_FLASH_AUTO 0x02
+
+/*
+ * Category C - CAPTURE Control
+ */
+#define CATC_CAP_MODE 0x00
+#define CATC_CAP_SEL_FRAME 0x06 /* It determines Single or Multi */
+#define CATC_CAP_START 0x09
+#define CATC_CAP_IMAGE_SIZE 0x0d
+#define CATC_CAP_THUMB_SIZE 0x11
+
+#define CAPC_MODE I2C_REG(CAT_CAPT_CTRL, CATC_CAP_MODE, 1)
+#define REG_CAP_NONE 0x00
+#define REG_CAP_ANTI_SHAKE 0x02
+
+#define CAPC_SEL_FRAME I2C_REG(CAT_CAPT_CTRL, CATC_CAP_SEL_FRAME, 1)
+
+#define CAPC_START I2C_REG(CAT_CAPT_CTRL, CATC_CAP_START, 1)
+#define REG_CAP_START_MAIN 0x01
+#define REG_CAP_START_THUMB 0x03
+
+#define CAPC_IMAGE_SIZE I2C_REG(CAT_CAPT_CTRL, CATC_CAP_IMAGE_SIZE, 1)
+#define CAPC_THUMB_SIZE I2C_REG(CAT_CAPT_CTRL, CATC_CAP_THUMB_SIZE, 1)
+
+/*
+ * Category F - Flash
+ *
+ * This mode provides functions about internal flash stuff and system startup.
+ */
+#define CATF_CAM_START 0x12 /* It starts internal ARM core booting
+ * after power-up */
+
+#define FLASH_CAM_START I2C_REG(CAT_FLASH, CATF_CAM_START, 1)
+#define REG_START_ARM_BOOT 0x01
+
+#endif /* M5MOLS_REG_H */
diff --git a/drivers/media/video/uvc/Makefile b/drivers/media/video/uvc/Makefile
index 968c1994eda0..2071ca8a2f03 100644
--- a/drivers/media/video/uvc/Makefile
+++ b/drivers/media/video/uvc/Makefile
@@ -1,3 +1,6 @@
uvcvideo-objs := uvc_driver.o uvc_queue.o uvc_v4l2.o uvc_video.o uvc_ctrl.o \
uvc_status.o uvc_isight.o
+ifeq ($(CONFIG_MEDIA_CONTROLLER),y)
+uvcvideo-objs += uvc_entity.o
+endif
obj-$(CONFIG_USB_VIDEO_CLASS) += uvcvideo.o
diff --git a/drivers/media/video/uvc/uvc_driver.c b/drivers/media/video/uvc/uvc_driver.c
index 823f4b389745..b6eae48d7fb8 100644
--- a/drivers/media/video/uvc/uvc_driver.c
+++ b/drivers/media/video/uvc/uvc_driver.c
@@ -248,7 +248,7 @@ uint32_t uvc_fraction_to_interval(uint32_t numerator, uint32_t denominator)
* Terminal and unit management
*/
-static struct uvc_entity *uvc_entity_by_id(struct uvc_device *dev, int id)
+struct uvc_entity *uvc_entity_by_id(struct uvc_device *dev, int id)
{
struct uvc_entity *entity;
@@ -795,9 +795,12 @@ static struct uvc_entity *uvc_alloc_entity(u16 type, u8 id,
struct uvc_entity *entity;
unsigned int num_inputs;
unsigned int size;
+ unsigned int i;
+ extra_size = ALIGN(extra_size, sizeof(*entity->pads));
num_inputs = (type & UVC_TERM_OUTPUT) ? num_pads : num_pads - 1;
- size = sizeof(*entity) + extra_size + num_inputs;
+ size = sizeof(*entity) + extra_size + sizeof(*entity->pads) * num_pads
+ + num_inputs;
entity = kzalloc(size, GFP_KERNEL);
if (entity == NULL)
return NULL;
@@ -805,8 +808,17 @@ static struct uvc_entity *uvc_alloc_entity(u16 type, u8 id,
entity->id = id;
entity->type = type;
+ entity->num_links = 0;
+ entity->num_pads = num_pads;
+ entity->pads = ((void *)(entity + 1)) + extra_size;
+
+ for (i = 0; i < num_inputs; ++i)
+ entity->pads[i].flags = MEDIA_PAD_FL_SINK;
+ if (!UVC_ENTITY_IS_OTERM(entity))
+ entity->pads[num_pads-1].flags = MEDIA_PAD_FL_SOURCE;
+
entity->bNrInPins = num_inputs;
- entity->baSourceID = ((__u8 *)entity) + sizeof(*entity) + extra_size;
+ entity->baSourceID = (__u8 *)(&entity->pads[num_pads]);
return entity;
}
@@ -1585,6 +1597,13 @@ static void uvc_delete(struct uvc_device *dev)
uvc_status_cleanup(dev);
uvc_ctrl_cleanup_device(dev);
+ if (dev->vdev.dev)
+ v4l2_device_unregister(&dev->vdev);
+#ifdef CONFIG_MEDIA_CONTROLLER
+ if (media_devnode_is_registered(&dev->mdev.devnode))
+ media_device_unregister(&dev->mdev);
+#endif
+
list_for_each_safe(p, n, &dev->chains) {
struct uvc_video_chain *chain;
chain = list_entry(p, struct uvc_video_chain, list);
@@ -1594,6 +1613,13 @@ static void uvc_delete(struct uvc_device *dev)
list_for_each_safe(p, n, &dev->entities) {
struct uvc_entity *entity;
entity = list_entry(p, struct uvc_entity, list);
+#ifdef CONFIG_MEDIA_CONTROLLER
+ uvc_mc_cleanup_entity(entity);
+#endif
+ if (entity->vdev) {
+ video_device_release(entity->vdev);
+ entity->vdev = NULL;
+ }
kfree(entity);
}
@@ -1616,8 +1642,6 @@ static void uvc_release(struct video_device *vdev)
struct uvc_streaming *stream = video_get_drvdata(vdev);
struct uvc_device *dev = stream->dev;
- video_device_release(vdev);
-
/* Decrement the registered streams count and delete the device when it
* reaches zero.
*/
@@ -1682,7 +1706,7 @@ static int uvc_register_video(struct uvc_device *dev,
* unregistered before the reference is released, so we don't need to
* get another one.
*/
- vdev->parent = &dev->intf->dev;
+ vdev->v4l2_dev = &dev->vdev;
vdev->fops = &uvc_fops;
vdev->release = uvc_release;
strlcpy(vdev->name, dev->name, sizeof vdev->name);
@@ -1731,6 +1755,8 @@ static int uvc_register_terms(struct uvc_device *dev,
ret = uvc_register_video(dev, stream);
if (ret < 0)
return ret;
+
+ term->vdev = stream->vdev;
}
return 0;
@@ -1745,6 +1771,14 @@ static int uvc_register_chains(struct uvc_device *dev)
ret = uvc_register_terms(dev, chain);
if (ret < 0)
return ret;
+
+#ifdef CONFIG_MEDIA_CONTROLLER
+ ret = uvc_mc_register_entities(chain);
+ if (ret < 0) {
+ uvc_printk(KERN_INFO, "Failed to register entites "
+ "(%d).\n", ret);
+ }
+#endif
}
return 0;
@@ -1814,6 +1848,24 @@ static int uvc_probe(struct usb_interface *intf,
"linux-uvc-devel mailing list.\n");
}
+ /* Register the media and V4L2 devices. */
+#ifdef CONFIG_MEDIA_CONTROLLER
+ dev->mdev.dev = &intf->dev;
+ strlcpy(dev->mdev.model, dev->name, sizeof(dev->mdev.model));
+ if (udev->serial)
+ strlcpy(dev->mdev.serial, udev->serial,
+ sizeof(dev->mdev.serial));
+ strcpy(dev->mdev.bus_info, udev->devpath);
+ dev->mdev.hw_revision = le16_to_cpu(udev->descriptor.bcdDevice);
+ dev->mdev.driver_version = DRIVER_VERSION_NUMBER;
+ if (media_device_register(&dev->mdev) < 0)
+ goto error;
+
+ dev->vdev.mdev = &dev->mdev;
+#endif
+ if (v4l2_device_register(&intf->dev, &dev->vdev) < 0)
+ goto error;
+
/* Initialize controls. */
if (uvc_ctrl_init_device(dev) < 0)
goto error;
@@ -1822,7 +1874,7 @@ static int uvc_probe(struct usb_interface *intf,
if (uvc_scan_device(dev) < 0)
goto error;
- /* Register video devices. */
+ /* Register video device nodes. */
if (uvc_register_chains(dev) < 0)
goto error;
diff --git a/drivers/media/video/uvc/uvc_entity.c b/drivers/media/video/uvc/uvc_entity.c
new file mode 100644
index 000000000000..ede7852bb1df
--- /dev/null
+++ b/drivers/media/video/uvc/uvc_entity.c
@@ -0,0 +1,118 @@
+/*
+ * uvc_entity.c -- USB Video Class driver
+ *
+ * Copyright (C) 2005-2011
+ * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/videodev2.h>
+
+#include <media/v4l2-common.h>
+
+#include "uvcvideo.h"
+
+/* ------------------------------------------------------------------------
+ * Video subdevices registration and unregistration
+ */
+
+static int uvc_mc_register_entity(struct uvc_video_chain *chain,
+ struct uvc_entity *entity)
+{
+ const u32 flags = MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE;
+ struct uvc_entity *remote;
+ unsigned int i;
+ u8 remote_pad;
+ int ret;
+
+ for (i = 0; i < entity->num_pads; ++i) {
+ struct media_entity *source;
+ struct media_entity *sink;
+
+ if (!(entity->pads[i].flags & MEDIA_PAD_FL_SINK))
+ continue;
+
+ remote = uvc_entity_by_id(chain->dev, entity->baSourceID[i]);
+ if (remote == NULL)
+ return -EINVAL;
+
+ source = (UVC_ENTITY_TYPE(remote) == UVC_TT_STREAMING)
+ ? &remote->vdev->entity : &remote->subdev.entity;
+ sink = (UVC_ENTITY_TYPE(entity) == UVC_TT_STREAMING)
+ ? &entity->vdev->entity : &entity->subdev.entity;
+
+ remote_pad = remote->num_pads - 1;
+ ret = media_entity_create_link(source, remote_pad,
+ sink, i, flags);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (UVC_ENTITY_TYPE(entity) != UVC_TT_STREAMING)
+ ret = v4l2_device_register_subdev(&chain->dev->vdev,
+ &entity->subdev);
+
+ return ret;
+}
+
+static struct v4l2_subdev_ops uvc_subdev_ops = {
+};
+
+void uvc_mc_cleanup_entity(struct uvc_entity *entity)
+{
+ if (UVC_ENTITY_TYPE(entity) != UVC_TT_STREAMING)
+ media_entity_cleanup(&entity->subdev.entity);
+ else if (entity->vdev != NULL)
+ media_entity_cleanup(&entity->vdev->entity);
+}
+
+static int uvc_mc_init_entity(struct uvc_entity *entity)
+{
+ int ret;
+
+ if (UVC_ENTITY_TYPE(entity) != UVC_TT_STREAMING) {
+ v4l2_subdev_init(&entity->subdev, &uvc_subdev_ops);
+ strlcpy(entity->subdev.name, entity->name,
+ sizeof(entity->subdev.name));
+
+ ret = media_entity_init(&entity->subdev.entity,
+ entity->num_pads, entity->pads, 0);
+ } else
+ ret = media_entity_init(&entity->vdev->entity,
+ entity->num_pads, entity->pads, 0);
+
+ return ret;
+}
+
+int uvc_mc_register_entities(struct uvc_video_chain *chain)
+{
+ struct uvc_entity *entity;
+ int ret;
+
+ list_for_each_entry(entity, &chain->entities, chain) {
+ ret = uvc_mc_init_entity(entity);
+ if (ret < 0) {
+ uvc_printk(KERN_INFO, "Failed to initialize entity for "
+ "entity %u\n", entity->id);
+ return ret;
+ }
+ }
+
+ list_for_each_entry(entity, &chain->entities, chain) {
+ ret = uvc_mc_register_entity(chain, entity);
+ if (ret < 0) {
+ uvc_printk(KERN_INFO, "Failed to register entity for "
+ "entity %u\n", entity->id);
+ return ret;
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/media/video/uvc/uvcvideo.h b/drivers/media/video/uvc/uvcvideo.h
index 7cf224bae2e5..20107fd3574d 100644
--- a/drivers/media/video/uvc/uvcvideo.h
+++ b/drivers/media/video/uvc/uvcvideo.h
@@ -98,8 +98,11 @@ struct uvc_xu_control {
#ifdef __KERNEL__
#include <linux/poll.h>
+#include <linux/usb.h>
#include <linux/usb/video.h>
#include <linux/uvcvideo.h>
+#include <media/media-device.h>
+#include <media/v4l2-device.h>
/* --------------------------------------------------------------------------
* UVC constants
@@ -301,6 +304,13 @@ struct uvc_entity {
__u16 type;
char name[64];
+ /* Media controller-related fields. */
+ struct video_device *vdev;
+ struct v4l2_subdev subdev;
+ unsigned int num_pads;
+ unsigned int num_links;
+ struct media_pad *pads;
+
union {
struct {
__u16 wObjectiveFocalLengthMin;
@@ -504,6 +514,10 @@ struct uvc_device {
atomic_t nmappings;
/* Video control interface */
+#ifdef CONFIG_MEDIA_CONTROLLER
+ struct media_device mdev;
+#endif
+ struct v4l2_device vdev;
__u16 uvc_version;
__u32 clock_frequency;
@@ -583,6 +597,8 @@ extern unsigned int uvc_timeout_param;
/* Core driver */
extern struct uvc_driver uvc_driver;
+extern struct uvc_entity *uvc_entity_by_id(struct uvc_device *dev, int id);
+
/* Video buffers queue management. */
extern void uvc_queue_init(struct uvc_video_queue *queue,
enum v4l2_buf_type type, int drop_corrupted);
@@ -616,6 +632,10 @@ static inline int uvc_queue_streaming(struct uvc_video_queue *queue)
/* V4L2 interface */
extern const struct v4l2_file_operations uvc_fops;
+/* Media controller */
+extern int uvc_mc_register_entities(struct uvc_video_chain *chain);
+extern void uvc_mc_cleanup_entity(struct uvc_entity *entity);
+
/* Video */
extern int uvc_video_init(struct uvc_streaming *stream);
extern int uvc_video_suspend(struct uvc_streaming *stream);
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 8344fc0ab858..0f09c057e796 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -719,6 +719,15 @@ config MFD_PM8XXX_IRQ
This is required to use certain other PM 8xxx features, such as GPIO
and MPP.
+config MFD_TPS65910
+ bool "TPS65910 Power Management chip"
+ depends on I2C=y && GPIOLIB
+ select MFD_CORE
+ select GPIO_TPS65910
+ help
+ if you say yes here you get support for the TPS65910 series of
+ Power Management chips.
+
endif # MFD_SUPPORT
menu "Multimedia Capabilities Port drivers"
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 1acb8f29a96c..efe3cc33ed92 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -93,3 +93,4 @@ obj-$(CONFIG_MFD_CS5535) += cs5535-mfd.o
obj-$(CONFIG_MFD_OMAP_USB_HOST) += omap-usb-host.o
obj-$(CONFIG_MFD_PM8921_CORE) += pm8921-core.o
obj-$(CONFIG_MFD_PM8XXX_IRQ) += pm8xxx-irq.o
+obj-$(CONFIG_MFD_TPS65910) += tps65910.o tps65910-irq.o
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
index e63782107e2f..02a15d7cb3b0 100644
--- a/drivers/mfd/db8500-prcmu.c
+++ b/drivers/mfd/db8500-prcmu.c
@@ -2005,7 +2005,8 @@ static struct regulator_init_data db8500_regulators[DB8500_NUM_REGULATORS] = {
static struct mfd_cell db8500_prcmu_devs[] = {
{
.name = "db8500-prcmu-regulators",
- .mfd_data = &db8500_regulators,
+ .platform_data = &db8500_regulators,
+ .pdata_size = sizeof(db8500_regulators),
},
{
.name = "cpufreq-u8500",
diff --git a/drivers/mfd/tps65910-irq.c b/drivers/mfd/tps65910-irq.c
new file mode 100644
index 000000000000..2bfad5c86cc7
--- /dev/null
+++ b/drivers/mfd/tps65910-irq.c
@@ -0,0 +1,218 @@
+/*
+ * tps65910-irq.c -- TI TPS6591x
+ *
+ * Copyright 2010 Texas Instruments Inc.
+ *
+ * Author: Graeme Gregory <gg@slimlogic.co.uk>
+ * Author: Jorge Eduardo Candelaria <jedu@slimlogic.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/bug.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/mfd/tps65910.h>
+
+static inline int irq_to_tps65910_irq(struct tps65910 *tps65910,
+ int irq)
+{
+ return (irq - tps65910->irq_base);
+}
+
+/*
+ * This is a threaded IRQ handler so can access I2C/SPI. Since all
+ * interrupts are clear on read the IRQ line will be reasserted and
+ * the physical IRQ will be handled again if another interrupt is
+ * asserted while we run - in the normal course of events this is a
+ * rare occurrence so we save I2C/SPI reads. We're also assuming that
+ * it's rare to get lots of interrupts firing simultaneously so try to
+ * minimise I/O.
+ */
+static irqreturn_t tps65910_irq(int irq, void *irq_data)
+{
+ struct tps65910 *tps65910 = irq_data;
+ u32 irq_sts;
+ u32 irq_mask;
+ u8 reg;
+ int i;
+
+ tps65910->read(tps65910, TPS65910_INT_STS, 1, &reg);
+ irq_sts = reg;
+ tps65910->read(tps65910, TPS65910_INT_STS2, 1, &reg);
+ irq_sts |= reg << 8;
+ switch (tps65910_chip_id(tps65910)) {
+ case TPS65911:
+ tps65910->read(tps65910, TPS65910_INT_STS3, 1, &reg);
+ irq_sts |= reg << 16;
+ }
+
+ tps65910->read(tps65910, TPS65910_INT_MSK, 1, &reg);
+ irq_mask = reg;
+ tps65910->read(tps65910, TPS65910_INT_MSK2, 1, &reg);
+ irq_mask |= reg << 8;
+ switch (tps65910_chip_id(tps65910)) {
+ case TPS65911:
+ tps65910->read(tps65910, TPS65910_INT_MSK3, 1, &reg);
+ irq_mask |= reg << 16;
+ }
+
+ irq_sts &= ~irq_mask;
+
+ if (!irq_sts)
+ return IRQ_NONE;
+
+ for (i = 0; i < tps65910->irq_num; i++) {
+
+ if (!(irq_sts & (1 << i)))
+ continue;
+
+ handle_nested_irq(tps65910->irq_base + i);
+ }
+
+ /* Write the STS register back to clear IRQs we handled */
+ reg = irq_sts & 0xFF;
+ irq_sts >>= 8;
+ tps65910->write(tps65910, TPS65910_INT_STS, 1, &reg);
+ reg = irq_sts & 0xFF;
+ tps65910->write(tps65910, TPS65910_INT_STS2, 1, &reg);
+ switch (tps65910_chip_id(tps65910)) {
+ case TPS65911:
+ reg = irq_sts >> 8;
+ tps65910->write(tps65910, TPS65910_INT_STS3, 1, &reg);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void tps65910_irq_lock(struct irq_data *data)
+{
+ struct tps65910 *tps65910 = irq_data_get_irq_chip_data(data);
+
+ mutex_lock(&tps65910->irq_lock);
+}
+
+static void tps65910_irq_sync_unlock(struct irq_data *data)
+{
+ struct tps65910 *tps65910 = irq_data_get_irq_chip_data(data);
+ u32 reg_mask;
+ u8 reg;
+
+ tps65910->read(tps65910, TPS65910_INT_MSK, 1, &reg);
+ reg_mask = reg;
+ tps65910->read(tps65910, TPS65910_INT_MSK2, 1, &reg);
+ reg_mask |= reg << 8;
+ switch (tps65910_chip_id(tps65910)) {
+ case TPS65911:
+ tps65910->read(tps65910, TPS65910_INT_MSK3, 1, &reg);
+ reg_mask |= reg << 16;
+ }
+
+ if (tps65910->irq_mask != reg_mask) {
+ reg = tps65910->irq_mask & 0xFF;
+ tps65910->write(tps65910, TPS65910_INT_MSK, 1, &reg);
+ reg = tps65910->irq_mask >> 8 & 0xFF;
+ tps65910->write(tps65910, TPS65910_INT_MSK2, 1, &reg);
+ switch (tps65910_chip_id(tps65910)) {
+ case TPS65911:
+ reg = tps65910->irq_mask >> 16;
+ tps65910->write(tps65910, TPS65910_INT_MSK3, 1, &reg);
+ }
+ }
+ mutex_unlock(&tps65910->irq_lock);
+}
+
+static void tps65910_irq_enable(struct irq_data *data)
+{
+ struct tps65910 *tps65910 = irq_data_get_irq_chip_data(data);
+
+ tps65910->irq_mask &= ~( 1 << irq_to_tps65910_irq(tps65910, data->irq));
+}
+
+static void tps65910_irq_disable(struct irq_data *data)
+{
+ struct tps65910 *tps65910 = irq_data_get_irq_chip_data(data);
+
+ tps65910->irq_mask |= ( 1 << irq_to_tps65910_irq(tps65910, data->irq));
+}
+
+static struct irq_chip tps65910_irq_chip = {
+ .name = "tps65910",
+ .irq_bus_lock = tps65910_irq_lock,
+ .irq_bus_sync_unlock = tps65910_irq_sync_unlock,
+ .irq_disable = tps65910_irq_disable,
+ .irq_enable = tps65910_irq_enable,
+};
+
+int tps65910_irq_init(struct tps65910 *tps65910, int irq,
+ struct tps65910_platform_data *pdata)
+{
+ int ret, cur_irq;
+ int flags = IRQF_ONESHOT;
+
+ if (!irq) {
+ dev_warn(tps65910->dev, "No interrupt support, no core IRQ\n");
+ return -EINVAL;
+ }
+
+ if (!pdata || !pdata->irq_base) {
+ dev_warn(tps65910->dev, "No interrupt support, no IRQ base\n");
+ return -EINVAL;
+ }
+
+ tps65910->irq_mask = 0xFFFFFF;
+
+ mutex_init(&tps65910->irq_lock);
+ tps65910->chip_irq = irq;
+ tps65910->irq_base = pdata->irq_base;
+
+ switch (tps65910_chip_id(tps65910)) {
+ case TPS65910:
+ tps65910->irq_num = TPS65910_NUM_IRQ;
+ case TPS65911:
+ tps65910->irq_num = TPS65911_NUM_IRQ;
+ }
+
+ /* Register with genirq */
+ for (cur_irq = tps65910->irq_base;
+ cur_irq < tps65910->irq_num + tps65910->irq_base;
+ cur_irq++) {
+ irq_set_chip_data(cur_irq, tps65910);
+ irq_set_chip_and_handler(cur_irq, &tps65910_irq_chip,
+ handle_edge_irq);
+ irq_set_nested_thread(cur_irq, 1);
+
+ /* ARM needs us to explicitly flag the IRQ as valid
+ * and will set them noprobe when we do so. */
+#ifdef CONFIG_ARM
+ set_irq_flags(cur_irq, IRQF_VALID);
+#else
+ irq_set_noprobe(cur_irq);
+#endif
+ }
+
+ ret = request_threaded_irq(irq, NULL, tps65910_irq, flags,
+ "tps65910", tps65910);
+
+ irq_set_irq_type(irq, IRQ_TYPE_LEVEL_LOW);
+
+ if (ret != 0)
+ dev_err(tps65910->dev, "Failed to request IRQ: %d\n", ret);
+
+ return ret;
+}
+
+int tps65910_irq_exit(struct tps65910 *tps65910)
+{
+ free_irq(tps65910->chip_irq, tps65910);
+ return 0;
+}
diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c
new file mode 100644
index 000000000000..2229e66d80db
--- /dev/null
+++ b/drivers/mfd/tps65910.c
@@ -0,0 +1,229 @@
+/*
+ * tps65910.c -- TI TPS6591x
+ *
+ * Copyright 2010 Texas Instruments Inc.
+ *
+ * Author: Graeme Gregory <gg@slimlogic.co.uk>
+ * Author: Jorge Eduardo Candelaria <jedu@slimlogic.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/tps65910.h>
+
+static struct mfd_cell tps65910s[] = {
+ {
+ .name = "tps65910-pmic",
+ },
+ {
+ .name = "tps65910-rtc",
+ },
+ {
+ .name = "tps65910-power",
+ },
+};
+
+
+static int tps65910_i2c_read(struct tps65910 *tps65910, u8 reg,
+ int bytes, void *dest)
+{
+ struct i2c_client *i2c = tps65910->i2c_client;
+ struct i2c_msg xfer[2];
+ int ret;
+
+ /* Write register */
+ xfer[0].addr = i2c->addr;
+ xfer[0].flags = 0;
+ xfer[0].len = 1;
+ xfer[0].buf = &reg;
+
+ /* Read data */
+ xfer[1].addr = i2c->addr;
+ xfer[1].flags = I2C_M_RD;
+ xfer[1].len = bytes;
+ xfer[1].buf = dest;
+
+ ret = i2c_transfer(i2c->adapter, xfer, 2);
+ if (ret == 2)
+ ret = 0;
+ else if (ret >= 0)
+ ret = -EIO;
+
+ return ret;
+}
+
+static int tps65910_i2c_write(struct tps65910 *tps65910, u8 reg,
+ int bytes, void *src)
+{
+ struct i2c_client *i2c = tps65910->i2c_client;
+ /* we add 1 byte for device register */
+ u8 msg[TPS65910_MAX_REGISTER + 1];
+ int ret;
+
+ if (bytes > TPS65910_MAX_REGISTER)
+ return -EINVAL;
+
+ msg[0] = reg;
+ memcpy(&msg[1], src, bytes);
+
+ ret = i2c_master_send(i2c, msg, bytes + 1);
+ if (ret < 0)
+ return ret;
+ if (ret != bytes + 1)
+ return -EIO;
+ return 0;
+}
+
+int tps65910_set_bits(struct tps65910 *tps65910, u8 reg, u8 mask)
+{
+ u8 data;
+ int err;
+
+ mutex_lock(&tps65910->io_mutex);
+ err = tps65910_i2c_read(tps65910, reg, 1, &data);
+ if (err) {
+ dev_err(tps65910->dev, "read from reg %x failed\n", reg);
+ goto out;
+ }
+
+ data |= mask;
+ err = tps65910_i2c_write(tps65910, reg, 1, &data);
+ if (err)
+ dev_err(tps65910->dev, "write to reg %x failed\n", reg);
+
+out:
+ mutex_unlock(&tps65910->io_mutex);
+ return err;
+}
+EXPORT_SYMBOL_GPL(tps65910_set_bits);
+
+int tps65910_clear_bits(struct tps65910 *tps65910, u8 reg, u8 mask)
+{
+ u8 data;
+ int err;
+
+ mutex_lock(&tps65910->io_mutex);
+ err = tps65910_i2c_read(tps65910, reg, 1, &data);
+ if (err) {
+ dev_err(tps65910->dev, "read from reg %x failed\n", reg);
+ goto out;
+ }
+
+ data &= mask;
+ err = tps65910_i2c_write(tps65910, reg, 1, &data);
+ if (err)
+ dev_err(tps65910->dev, "write to reg %x failed\n", reg);
+
+out:
+ mutex_unlock(&tps65910->io_mutex);
+ return err;
+}
+EXPORT_SYMBOL_GPL(tps65910_clear_bits);
+
+static int tps65910_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct tps65910 *tps65910;
+ struct tps65910_board *pmic_plat_data;
+ struct tps65910_platform_data *init_data;
+ int ret = 0;
+
+ pmic_plat_data = dev_get_platdata(&i2c->dev);
+ if (!pmic_plat_data)
+ return -EINVAL;
+
+ init_data = kzalloc(sizeof(struct tps65910_platform_data), GFP_KERNEL);
+ if (init_data == NULL)
+ return -ENOMEM;
+
+ init_data->irq = pmic_plat_data->irq;
+ init_data->irq_base = pmic_plat_data->irq;
+
+ tps65910 = kzalloc(sizeof(struct tps65910), GFP_KERNEL);
+ if (tps65910 == NULL)
+ return -ENOMEM;
+
+ i2c_set_clientdata(i2c, tps65910);
+ tps65910->dev = &i2c->dev;
+ tps65910->i2c_client = i2c;
+ tps65910->id = id->driver_data;
+ tps65910->read = tps65910_i2c_read;
+ tps65910->write = tps65910_i2c_write;
+ mutex_init(&tps65910->io_mutex);
+
+ ret = mfd_add_devices(tps65910->dev, -1,
+ tps65910s, ARRAY_SIZE(tps65910s),
+ NULL, 0);
+ if (ret < 0)
+ goto err;
+
+ tps65910_gpio_init(tps65910, pmic_plat_data->gpio_base);
+
+ ret = tps65910_irq_init(tps65910, init_data->irq, init_data);
+ if (ret < 0)
+ goto err;
+
+ return ret;
+
+err:
+ mfd_remove_devices(tps65910->dev);
+ kfree(tps65910);
+ return ret;
+}
+
+static int tps65910_i2c_remove(struct i2c_client *i2c)
+{
+ struct tps65910 *tps65910 = i2c_get_clientdata(i2c);
+
+ mfd_remove_devices(tps65910->dev);
+ kfree(tps65910);
+
+ return 0;
+}
+
+static const struct i2c_device_id tps65910_i2c_id[] = {
+ { "tps65910", TPS65910 },
+ { "tps65911", TPS65911 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tps65910_i2c_id);
+
+
+static struct i2c_driver tps65910_i2c_driver = {
+ .driver = {
+ .name = "tps65910",
+ .owner = THIS_MODULE,
+ },
+ .probe = tps65910_i2c_probe,
+ .remove = tps65910_i2c_remove,
+ .id_table = tps65910_i2c_id,
+};
+
+static int __init tps65910_i2c_init(void)
+{
+ return i2c_add_driver(&tps65910_i2c_driver);
+}
+/* init early so consumer devices can complete system boot */
+subsys_initcall(tps65910_i2c_init);
+
+static void __exit tps65910_i2c_exit(void)
+{
+ i2c_del_driver(&tps65910_i2c_driver);
+}
+module_exit(tps65910_i2c_exit);
+
+MODULE_AUTHOR("Graeme Gregory <gg@slimlogic.co.uk>");
+MODULE_AUTHOR("Jorge Eduardo Candelaria <jedu@slimlogic.co.uk>");
+MODULE_DESCRIPTION("TPS6591x chip family multi-function driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/tps65911-comparator.c b/drivers/mfd/tps65911-comparator.c
new file mode 100644
index 000000000000..3d2dc56a3d40
--- /dev/null
+++ b/drivers/mfd/tps65911-comparator.c
@@ -0,0 +1,188 @@
+/*
+ * tps65910.c -- TI TPS6591x
+ *
+ * Copyright 2010 Texas Instruments Inc.
+ *
+ * Author: Jorge Eduardo Candelaria <jedu@slimlogic.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/debugfs.h>
+#include <linux/gpio.h>
+#include <linux/mfd/tps65910.h>
+
+#define COMP 0
+#define COMP1 1
+#define COMP2 2
+
+/* Comparator 1 voltage selection table in milivolts */
+static const u16 COMP_VSEL_TABLE[] = {
+ 0, 2500, 2500, 2500, 2500, 2550, 2600, 2650,
+ 2700, 2750, 2800, 2850, 2900, 2950, 3000, 3050,
+ 3100, 3150, 3200, 3250, 3300, 3350, 3400, 3450,
+ 3500,
+};
+
+struct comparator {
+ const char *name;
+ int reg;
+ int uV_max;
+ const u16 *vsel_table;
+};
+
+static struct comparator tps_comparators[] = {
+ {
+ .name = "COMP1",
+ .reg = TPS65911_VMBCH,
+ .uV_max = 3500,
+ .vsel_table = COMP_VSEL_TABLE,
+ },
+ {
+ .name = "COMP2",
+ .reg = TPS65911_VMBCH2,
+ .uV_max = 3500,
+ .vsel_table = COMP_VSEL_TABLE,
+ },
+};
+
+static int comp_threshold_set(struct tps65910 *tps65910, int id, int voltage)
+{
+ struct comparator tps_comp = tps_comparators[id];
+ int curr_voltage = 0;
+ int ret;
+ u8 index = 0, val;
+
+ if (id == COMP)
+ return 0;
+
+ while (curr_voltage < tps_comp.uV_max) {
+ curr_voltage = tps_comp.vsel_table[index];
+ if (curr_voltage >= voltage)
+ break;
+ else if (curr_voltage < voltage)
+ index ++;
+ }
+
+ if (curr_voltage > tps_comp.uV_max)
+ return -EINVAL;
+
+ val = index << 1;
+ ret = tps65910->write(tps65910, tps_comp.reg, 1, &val);
+
+ return ret;
+}
+
+static int comp_threshold_get(struct tps65910 *tps65910, int id)
+{
+ struct comparator tps_comp = tps_comparators[id];
+ int ret;
+ u8 val;
+
+ if (id == COMP)
+ return 0;
+
+ ret = tps65910->read(tps65910, tps_comp.reg, 1, &val);
+ if (ret < 0)
+ return ret;
+
+ val >>= 1;
+ return tps_comp.vsel_table[val];
+}
+
+static ssize_t comp_threshold_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct tps65910 *tps65910 = dev_get_drvdata(dev->parent);
+ struct attribute comp_attr = attr->attr;
+ int id, uVolt;
+
+ if (!strcmp(comp_attr.name, "comp1_threshold"))
+ id = COMP1;
+ else if (!strcmp(comp_attr.name, "comp2_threshold"))
+ id = COMP2;
+ else
+ return -EINVAL;
+
+ uVolt = comp_threshold_get(tps65910, id);
+
+ return sprintf(buf, "%d\n", uVolt);
+}
+
+static DEVICE_ATTR(comp1_threshold, S_IRUGO, comp_threshold_show, NULL);
+static DEVICE_ATTR(comp2_threshold, S_IRUGO, comp_threshold_show, NULL);
+
+static __devinit int tps65911_comparator_probe(struct platform_device *pdev)
+{
+ struct tps65910 *tps65910 = dev_get_drvdata(pdev->dev.parent);
+ struct tps65910_platform_data *pdata = dev_get_platdata(tps65910->dev);
+ int ret;
+
+ ret = comp_threshold_set(tps65910, COMP1, pdata->vmbch_threshold);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "cannot set COMP1 threshold\n");
+ return ret;
+ }
+
+ ret = comp_threshold_set(tps65910, COMP2, pdata->vmbch2_threshold);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "cannot set COMP2 theshold\n");
+ return ret;
+ }
+
+ /* Create sysfs entry */
+ ret = device_create_file(&pdev->dev, &dev_attr_comp1_threshold);
+ if (ret < 0)
+ dev_err(&pdev->dev, "failed to add COMP1 sysfs file\n");
+
+ ret = device_create_file(&pdev->dev, &dev_attr_comp2_threshold);
+ if (ret < 0)
+ dev_err(&pdev->dev, "failed to add COMP2 sysfs file\n");
+
+ return ret;
+}
+
+static __devexit int tps65911_comparator_remove(struct platform_device *pdev)
+{
+ struct tps65910 *tps65910;
+
+ tps65910 = dev_get_drvdata(pdev->dev.parent);
+
+ return 0;
+}
+
+static struct platform_driver tps65911_comparator_driver = {
+ .driver = {
+ .name = "tps65911-comparator",
+ .owner = THIS_MODULE,
+ },
+ .probe = tps65911_comparator_probe,
+ .remove = __devexit_p(tps65911_comparator_remove),
+};
+
+static int __init tps65911_comparator_init(void)
+{
+ return platform_driver_register(&tps65911_comparator_driver);
+}
+subsys_initcall(tps65911_comparator_init);
+
+static void __exit tps65911_comparator_exit(void)
+{
+ platform_driver_unregister(&tps65911_comparator_driver);
+}
+module_exit(tps65911_comparator_exit);
+
+MODULE_AUTHOR("Jorge Eduardo Candelaria <jedu@slimlogic.co.uk>");
+MODULE_DESCRIPTION("TPS65911 comparator driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:tps65911-comparator");
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 4941e06fe2e1..5da5bea0f9f0 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -51,6 +51,7 @@ static unsigned int fmax = 515633;
* is asserted (likewise for RX)
* @sdio: variant supports SDIO
* @st_clkdiv: true if using a ST-specific clock divider algorithm
+ * @blksz_datactrl16: true if Block size is at b16..b30 position in datactrl register
*/
struct variant_data {
unsigned int clkreg;
@@ -60,6 +61,7 @@ struct variant_data {
unsigned int fifohalfsize;
bool sdio;
bool st_clkdiv;
+ bool blksz_datactrl16;
};
static struct variant_data variant_arm = {
@@ -92,6 +94,17 @@ static struct variant_data variant_ux500 = {
.st_clkdiv = true,
};
+static struct variant_data variant_ux500v2 = {
+ .fifosize = 30 * 4,
+ .fifohalfsize = 8 * 4,
+ .clkreg = MCI_CLK_ENABLE,
+ .clkreg_enable = MCI_ST_UX500_HWFCEN,
+ .datalength_bits = 24,
+ .sdio = true,
+ .st_clkdiv = true,
+ .blksz_datactrl16 = true,
+};
+
/*
* This must be called with host->lock held
*/
@@ -465,7 +478,10 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
blksz_bits = ffs(data->blksz) - 1;
BUG_ON(1 << blksz_bits != data->blksz);
- datactrl = MCI_DPSM_ENABLE | blksz_bits << 4;
+ if (variant->blksz_datactrl16)
+ datactrl = MCI_DPSM_ENABLE | (data->blksz << 16);
+ else
+ datactrl = MCI_DPSM_ENABLE | blksz_bits << 4;
if (data->flags & MMC_DATA_READ)
datactrl |= MCI_DPSM_DIRECTION;
@@ -1311,9 +1327,14 @@ static struct amba_id mmci_ids[] = {
},
{
.id = 0x00480180,
- .mask = 0x00ffffff,
+ .mask = 0xf0ffffff,
.data = &variant_ux500,
},
+ {
+ .id = 0x10480180,
+ .mask = 0xf0ffffff,
+ .data = &variant_ux500v2,
+ },
{ 0, 0 },
};
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index bc50d5ea5534..4be8373d43e5 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -33,20 +33,6 @@ config MTD_TESTS
should normally be compiled as kernel modules. The modules perform
various checks and verifications when loaded.
-config MTD_PARTITIONS
- bool "MTD partitioning support"
- help
- If you have a device which needs to divide its flash chip(s) up
- into multiple 'partitions', each of which appears to the user as
- a separate MTD device, you require this option to be enabled. If
- unsure, say 'Y'.
-
- Note, however, that you don't need this option for the DiskOnChip
- devices. Partitioning on NFTL 'devices' is a different - that's the
- 'normal' form of partitioning used on a block device.
-
-if MTD_PARTITIONS
-
config MTD_REDBOOT_PARTS
tristate "RedBoot partition table parsing"
---help---
@@ -99,7 +85,7 @@ endif # MTD_REDBOOT_PARTS
config MTD_CMDLINE_PARTS
bool "Command line partition table parsing"
- depends on MTD_PARTITIONS = "y" && MTD = "y"
+ depends on MTD = "y"
---help---
Allow generic configuration of the MTD partition tables via the kernel
command line. Multiple flash resources are supported for hardware where
@@ -163,8 +149,6 @@ config MTD_AR7_PARTS
---help---
TI AR7 partitioning support
-endif # MTD_PARTITIONS
-
comment "User Modules And Translation Layers"
config MTD_CHAR
diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile
index d578095fb255..39664c4229ff 100644
--- a/drivers/mtd/Makefile
+++ b/drivers/mtd/Makefile
@@ -4,8 +4,7 @@
# Core functionality.
obj-$(CONFIG_MTD) += mtd.o
-mtd-y := mtdcore.o mtdsuper.o mtdconcat.o
-mtd-$(CONFIG_MTD_PARTITIONS) += mtdpart.o
+mtd-y := mtdcore.o mtdsuper.o mtdconcat.o mtdpart.o
mtd-$(CONFIG_MTD_OF_PARTS) += ofpart.o
obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index 09cb7c8d93b4..e1e122f2f929 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -812,12 +812,9 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
break;
if (time_after(jiffies, timeo)) {
- /* Urgh. Resume and pretend we weren't here. */
- map_write(map, CMD(0xd0), adr);
- /* Make sure we're in 'read status' mode if it had finished */
- map_write(map, CMD(0x70), adr);
- chip->state = FL_ERASING;
- chip->oldstate = FL_READY;
+ /* Urgh. Resume and pretend we weren't here.
+ * Make sure we're in 'read status' mode if it had finished */
+ put_chip(map, chip, adr);
printk(KERN_ERR "%s: Chip not ready after erase "
"suspended: status = 0x%lx\n", map->name, status.x[0]);
return -EIO;
@@ -997,7 +994,6 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad
switch(chip->oldstate) {
case FL_ERASING:
- chip->state = chip->oldstate;
/* What if one interleaved chip has finished and the
other hasn't? The old code would leave the finished
one in READY mode. That's bad, and caused -EROFS
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index 0b49266840b9..23175edd5634 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -462,13 +462,14 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
cfi_fixup_major_minor(cfi, extp);
/*
- * Valid primary extension versions are: 1.0, 1.1, 1.2, 1.3, 1.4
+ * Valid primary extension versions are: 1.0, 1.1, 1.2, 1.3, 1.4, 1.5
* see: http://cs.ozerki.net/zap/pub/axim-x5/docs/cfi_r20.pdf, page 19
* http://www.spansion.com/Support/AppNotes/cfi_100_20011201.pdf
* http://www.spansion.com/Support/Datasheets/s29ws-p_00_a12_e.pdf
+ * http://www.spansion.com/Support/Datasheets/S29GL_128S_01GS_00_02_e.pdf
*/
if (extp->MajorVersion != '1' ||
- (extp->MajorVersion == '1' && (extp->MinorVersion < '0' || extp->MinorVersion > '4'))) {
+ (extp->MajorVersion == '1' && (extp->MinorVersion < '0' || extp->MinorVersion > '5'))) {
printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query "
"version %c.%c (%#02x/%#02x).\n",
extp->MajorVersion, extp->MinorVersion,
@@ -710,9 +711,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
* there was an error (so leave the erase
* routine to recover from it) or we trying to
* use the erase-in-progress sector. */
- map_write(map, cfi->sector_erase_cmd, chip->in_progress_block_addr);
- chip->state = FL_ERASING;
- chip->oldstate = FL_READY;
+ put_chip(map, chip, adr);
printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__);
return -EIO;
}
@@ -762,7 +761,6 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad
switch(chip->oldstate) {
case FL_ERASING:
- chip->state = chip->oldstate;
map_write(map, cfi->sector_erase_cmd, chip->in_progress_block_addr);
chip->oldstate = FL_READY;
chip->state = FL_ERASING;
diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
index ed56ad3884fb..179814a95f3a 100644
--- a/drivers/mtd/chips/cfi_cmdset_0020.c
+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
@@ -296,6 +296,7 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
/* make sure we're in 'read status' mode */
map_write(map, CMD(0x70), cmd_addr);
chip->state = FL_ERASING;
+ wake_up(&chip->wq);
mutex_unlock(&chip->mutex);
printk(KERN_ERR "Chip not ready after erase "
"suspended: status = 0x%lx\n", status.x[0]);
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index 97183c8c9e33..b78f23169d4e 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -294,7 +294,7 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size)
dev->mtd.priv = dev;
dev->mtd.owner = THIS_MODULE;
- if (add_mtd_device(&dev->mtd)) {
+ if (mtd_device_register(&dev->mtd, NULL, 0)) {
/* Device didn't get added, so free the entry */
goto devinit_err;
}
@@ -465,7 +465,7 @@ static void __devexit block2mtd_exit(void)
list_for_each_safe(pos, next, &blkmtd_device_list) {
struct block2mtd_dev *dev = list_entry(pos, typeof(*dev), list);
block2mtd_sync(&dev->mtd);
- del_mtd_device(&dev->mtd);
+ mtd_device_unregister(&dev->mtd);
INFO("mtd%d: [%s] removed", dev->mtd.index,
dev->mtd.name + strlen("block2mtd: "));
list_del(&dev->list);
diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
index 5bf5f460e132..f7fbf6025ef2 100644
--- a/drivers/mtd/devices/doc2000.c
+++ b/drivers/mtd/devices/doc2000.c
@@ -597,7 +597,7 @@ void DoC2k_init(struct mtd_info *mtd)
doc2klist = mtd;
mtd->size = this->totlen;
mtd->erasesize = this->erasesize;
- add_mtd_device(mtd);
+ mtd_device_register(mtd, NULL, 0);
return;
}
}
@@ -1185,7 +1185,7 @@ static void __exit cleanup_doc2000(void)
this = mtd->priv;
doc2klist = this->nextdoc;
- del_mtd_device(mtd);
+ mtd_device_unregister(mtd);
iounmap(this->virtadr);
kfree(this->chips);
diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c
index 0990f7803628..241192f05bc8 100644
--- a/drivers/mtd/devices/doc2001.c
+++ b/drivers/mtd/devices/doc2001.c
@@ -376,7 +376,7 @@ void DoCMil_init(struct mtd_info *mtd)
this->nextdoc = docmillist;
docmillist = mtd;
mtd->size = this->totlen;
- add_mtd_device(mtd);
+ mtd_device_register(mtd, NULL, 0);
return;
}
}
@@ -826,7 +826,7 @@ static void __exit cleanup_doc2001(void)
this = mtd->priv;
docmillist = this->nextdoc;
- del_mtd_device(mtd);
+ mtd_device_unregister(mtd);
iounmap(this->virtadr);
kfree(this->chips);
diff --git a/drivers/mtd/devices/doc2001plus.c b/drivers/mtd/devices/doc2001plus.c
index 8b36fa77a195..09ae0adc3ad0 100644
--- a/drivers/mtd/devices/doc2001plus.c
+++ b/drivers/mtd/devices/doc2001plus.c
@@ -499,7 +499,7 @@ void DoCMilPlus_init(struct mtd_info *mtd)
docmilpluslist = mtd;
mtd->size = this->totlen;
mtd->erasesize = this->erasesize;
- add_mtd_device(mtd);
+ mtd_device_register(mtd, NULL, 0);
return;
}
}
@@ -1091,7 +1091,7 @@ static void __exit cleanup_doc2001plus(void)
this = mtd->priv;
docmilpluslist = this->nextdoc;
- del_mtd_device(mtd);
+ mtd_device_unregister(mtd);
iounmap(this->virtadr);
kfree(this->chips);
diff --git a/drivers/mtd/devices/lart.c b/drivers/mtd/devices/lart.c
index 4b829f97d56c..772a0ff89e0f 100644
--- a/drivers/mtd/devices/lart.c
+++ b/drivers/mtd/devices/lart.c
@@ -684,9 +684,10 @@ static int __init lart_flash_init (void)
#endif
#ifndef HAVE_PARTITIONS
- result = add_mtd_device (&mtd);
+ result = mtd_device_register(&mtd, NULL, 0);
#else
- result = add_mtd_partitions (&mtd,lart_partitions, ARRAY_SIZE(lart_partitions));
+ result = mtd_device_register(&mtd, lart_partitions,
+ ARRAY_SIZE(lart_partitions));
#endif
return (result);
@@ -695,9 +696,9 @@ static int __init lart_flash_init (void)
static void __exit lart_flash_exit (void)
{
#ifndef HAVE_PARTITIONS
- del_mtd_device (&mtd);
+ mtd_device_unregister(&mtd);
#else
- del_mtd_partitions (&mtd);
+ mtd_device_unregister(&mtd);
#endif
}
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 3fb981d4bb51..35180e475c4c 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -27,6 +27,7 @@
#include <linux/sched.h>
#include <linux/mod_devicetable.h>
+#include <linux/mtd/cfi.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
@@ -55,6 +56,9 @@
#define OPCODE_EN4B 0xb7 /* Enter 4-byte mode */
#define OPCODE_EX4B 0xe9 /* Exit 4-byte mode */
+/* Used for Spansion flashes only. */
+#define OPCODE_BRWR 0x17 /* Bank register write */
+
/* Status Register bits. */
#define SR_WIP 1 /* Write in progress */
#define SR_WEL 2 /* Write enable latch */
@@ -76,6 +80,8 @@
#define FAST_READ_DUMMY_BYTE 0
#endif
+#define JEDEC_MFR(_jedec_id) ((_jedec_id) >> 16)
+
/****************************************************************************/
struct m25p {
@@ -158,11 +164,18 @@ static inline int write_disable(struct m25p *flash)
/*
* Enable/disable 4-byte addressing mode.
*/
-static inline int set_4byte(struct m25p *flash, int enable)
+static inline int set_4byte(struct m25p *flash, u32 jedec_id, int enable)
{
- u8 code = enable ? OPCODE_EN4B : OPCODE_EX4B;
-
- return spi_write_then_read(flash->spi, &code, 1, NULL, 0);
+ switch (JEDEC_MFR(jedec_id)) {
+ case CFI_MFR_MACRONIX:
+ flash->command[0] = enable ? OPCODE_EN4B : OPCODE_EX4B;
+ return spi_write(flash->spi, flash->command, 1);
+ default:
+ /* Spansion style */
+ flash->command[0] = OPCODE_BRWR;
+ flash->command[1] = enable << 7;
+ return spi_write(flash->spi, flash->command, 2);
+ }
}
/*
@@ -668,6 +681,7 @@ static const struct spi_device_id m25p_ids[] = {
/* Macronix */
{ "mx25l4005a", INFO(0xc22013, 0, 64 * 1024, 8, SECT_4K) },
{ "mx25l8005", INFO(0xc22014, 0, 64 * 1024, 16, 0) },
+ { "mx25l1606e", INFO(0xc22015, 0, 64 * 1024, 32, SECT_4K) },
{ "mx25l3205d", INFO(0xc22016, 0, 64 * 1024, 64, 0) },
{ "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128, 0) },
{ "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) },
@@ -684,6 +698,10 @@ static const struct spi_device_id m25p_ids[] = {
{ "s25sl032a", INFO(0x010215, 0, 64 * 1024, 64, 0) },
{ "s25sl032p", INFO(0x010215, 0x4d00, 64 * 1024, 64, SECT_4K) },
{ "s25sl064a", INFO(0x010216, 0, 64 * 1024, 128, 0) },
+ { "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, 0) },
+ { "s25fl256s1", INFO(0x010219, 0x4d01, 64 * 1024, 512, 0) },
+ { "s25fl512s", INFO(0x010220, 0x4d00, 256 * 1024, 256, 0) },
+ { "s70fl01gs", INFO(0x010221, 0x4d00, 256 * 1024, 256, 0) },
{ "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, 0) },
{ "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) },
{ "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64, 0) },
@@ -729,7 +747,10 @@ static const struct spi_device_id m25p_ids[] = {
{ "m25pe80", INFO(0x208014, 0, 64 * 1024, 16, 0) },
{ "m25pe16", INFO(0x208015, 0, 64 * 1024, 32, SECT_4K) },
- { "m25px64", INFO(0x207117, 0, 64 * 1024, 128, 0) },
+ { "m25px32", INFO(0x207116, 0, 64 * 1024, 64, SECT_4K) },
+ { "m25px32-s0", INFO(0x207316, 0, 64 * 1024, 64, SECT_4K) },
+ { "m25px32-s1", INFO(0x206316, 0, 64 * 1024, 64, SECT_4K) },
+ { "m25px64", INFO(0x207117, 0, 64 * 1024, 128, 0) },
/* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */
{ "w25x10", INFO(0xef3011, 0, 64 * 1024, 2, SECT_4K) },
@@ -804,6 +825,8 @@ static int __devinit m25p_probe(struct spi_device *spi)
struct m25p *flash;
struct flash_info *info;
unsigned i;
+ struct mtd_partition *parts = NULL;
+ int nr_parts = 0;
/* Platform data helps sort out which chip type we have, as
* well as how this board partitions it. If we don't have
@@ -868,9 +891,9 @@ static int __devinit m25p_probe(struct spi_device *spi)
* up with the software protection bits set
*/
- if (info->jedec_id >> 16 == 0x1f ||
- info->jedec_id >> 16 == 0x89 ||
- info->jedec_id >> 16 == 0xbf) {
+ if (JEDEC_MFR(info->jedec_id) == CFI_MFR_ATMEL ||
+ JEDEC_MFR(info->jedec_id) == CFI_MFR_INTEL ||
+ JEDEC_MFR(info->jedec_id) == CFI_MFR_SST) {
write_enable(flash);
write_sr(flash, 0);
}
@@ -888,7 +911,7 @@ static int __devinit m25p_probe(struct spi_device *spi)
flash->mtd.read = m25p80_read;
/* sst flash chips use AAI word program */
- if (info->jedec_id >> 16 == 0xbf)
+ if (JEDEC_MFR(info->jedec_id) == CFI_MFR_SST)
flash->mtd.write = sst_write;
else
flash->mtd.write = m25p80_write;
@@ -914,7 +937,7 @@ static int __devinit m25p_probe(struct spi_device *spi)
/* enable 4-byte addressing if the device exceeds 16MiB */
if (flash->mtd.size > 0x1000000) {
flash->addr_width = 4;
- set_4byte(flash, 1);
+ set_4byte(flash, info->jedec_id, 1);
} else
flash->addr_width = 3;
}
@@ -945,48 +968,41 @@ static int __devinit m25p_probe(struct spi_device *spi)
/* partitions should match sector boundaries; and it may be good to
* use readonly partitions for writeprotected sectors (BP2..BP0).
*/
- if (mtd_has_partitions()) {
- struct mtd_partition *parts = NULL;
- int nr_parts = 0;
-
- if (mtd_has_cmdlinepart()) {
- static const char *part_probes[]
- = { "cmdlinepart", NULL, };
+ if (mtd_has_cmdlinepart()) {
+ static const char *part_probes[]
+ = { "cmdlinepart", NULL, };
- nr_parts = parse_mtd_partitions(&flash->mtd,
- part_probes, &parts, 0);
- }
+ nr_parts = parse_mtd_partitions(&flash->mtd,
+ part_probes, &parts, 0);
+ }
- if (nr_parts <= 0 && data && data->parts) {
- parts = data->parts;
- nr_parts = data->nr_parts;
- }
+ if (nr_parts <= 0 && data && data->parts) {
+ parts = data->parts;
+ nr_parts = data->nr_parts;
+ }
#ifdef CONFIG_MTD_OF_PARTS
- if (nr_parts <= 0 && spi->dev.of_node) {
- nr_parts = of_mtd_parse_partitions(&spi->dev,
- spi->dev.of_node, &parts);
- }
+ if (nr_parts <= 0 && spi->dev.of_node) {
+ nr_parts = of_mtd_parse_partitions(&spi->dev,
+ spi->dev.of_node, &parts);
+ }
#endif
- if (nr_parts > 0) {
- for (i = 0; i < nr_parts; i++) {
- DEBUG(MTD_DEBUG_LEVEL2, "partitions[%d] = "
- "{.name = %s, .offset = 0x%llx, "
- ".size = 0x%llx (%lldKiB) }\n",
- i, parts[i].name,
- (long long)parts[i].offset,
- (long long)parts[i].size,
- (long long)(parts[i].size >> 10));
- }
- flash->partitioned = 1;
- return add_mtd_partitions(&flash->mtd, parts, nr_parts);
+ if (nr_parts > 0) {
+ for (i = 0; i < nr_parts; i++) {
+ DEBUG(MTD_DEBUG_LEVEL2, "partitions[%d] = "
+ "{.name = %s, .offset = 0x%llx, "
+ ".size = 0x%llx (%lldKiB) }\n",
+ i, parts[i].name,
+ (long long)parts[i].offset,
+ (long long)parts[i].size,
+ (long long)(parts[i].size >> 10));
}
- } else if (data && data->nr_parts)
- dev_warn(&spi->dev, "ignoring %d default partitions on %s\n",
- data->nr_parts, data->name);
+ flash->partitioned = 1;
+ }
- return add_mtd_device(&flash->mtd) == 1 ? -ENODEV : 0;
+ return mtd_device_register(&flash->mtd, parts, nr_parts) == 1 ?
+ -ENODEV : 0;
}
@@ -996,10 +1012,7 @@ static int __devexit m25p_remove(struct spi_device *spi)
int status;
/* Clean up MTD stuff. */
- if (mtd_has_partitions() && flash->partitioned)
- status = del_mtd_partitions(&flash->mtd);
- else
- status = del_mtd_device(&flash->mtd);
+ status = mtd_device_unregister(&flash->mtd);
if (status == 0) {
kfree(flash->command);
kfree(flash);
diff --git a/drivers/mtd/devices/ms02-nv.c b/drivers/mtd/devices/ms02-nv.c
index 6a9a24a80a6d..8423fb6d4f26 100644
--- a/drivers/mtd/devices/ms02-nv.c
+++ b/drivers/mtd/devices/ms02-nv.c
@@ -220,7 +220,7 @@ static int __init ms02nv_init_one(ulong addr)
mtd->writesize = 1;
ret = -EIO;
- if (add_mtd_device(mtd)) {
+ if (mtd_device_register(mtd, NULL, 0)) {
printk(KERN_ERR
"ms02-nv: Unable to register MTD device, aborting!\n");
goto err_out_csr_res;
@@ -262,7 +262,7 @@ static void __exit ms02nv_remove_one(void)
root_ms02nv_mtd = mp->next;
- del_mtd_device(mtd);
+ mtd_device_unregister(mtd);
release_resource(mp->resource.csr);
kfree(mp->resource.csr);
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index c5015cc721d5..13749d458a31 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -637,6 +637,8 @@ add_dataflash_otp(struct spi_device *spi, char *name,
struct flash_platform_data *pdata = spi->dev.platform_data;
char *otp_tag = "";
int err = 0;
+ struct mtd_partition *parts;
+ int nr_parts = 0;
priv = kzalloc(sizeof *priv, GFP_KERNEL);
if (!priv)
@@ -675,33 +677,25 @@ add_dataflash_otp(struct spi_device *spi, char *name,
pagesize, otp_tag);
dev_set_drvdata(&spi->dev, priv);
- if (mtd_has_partitions()) {
- struct mtd_partition *parts;
- int nr_parts = 0;
+ if (mtd_has_cmdlinepart()) {
+ static const char *part_probes[] = { "cmdlinepart", NULL, };
- if (mtd_has_cmdlinepart()) {
- static const char *part_probes[]
- = { "cmdlinepart", NULL, };
-
- nr_parts = parse_mtd_partitions(device,
- part_probes, &parts, 0);
- }
+ nr_parts = parse_mtd_partitions(device, part_probes, &parts,
+ 0);
+ }
- if (nr_parts <= 0 && pdata && pdata->parts) {
- parts = pdata->parts;
- nr_parts = pdata->nr_parts;
- }
+ if (nr_parts <= 0 && pdata && pdata->parts) {
+ parts = pdata->parts;
+ nr_parts = pdata->nr_parts;
+ }
- if (nr_parts > 0) {
- priv->partitioned = 1;
- err = add_mtd_partitions(device, parts, nr_parts);
- goto out;
- }
- } else if (pdata && pdata->nr_parts)
- dev_warn(&spi->dev, "ignoring %d default partitions on %s\n",
- pdata->nr_parts, device->name);
+ if (nr_parts > 0) {
+ priv->partitioned = 1;
+ err = mtd_device_register(device, parts, nr_parts);
+ goto out;
+ }
- if (add_mtd_device(device) == 1)
+ if (mtd_device_register(device, NULL, 0) == 1)
err = -ENODEV;
out:
@@ -939,10 +933,7 @@ static int __devexit dataflash_remove(struct spi_device *spi)
DEBUG(MTD_DEBUG_LEVEL1, "%s: remove\n", dev_name(&spi->dev));
- if (mtd_has_partitions() && flash->partitioned)
- status = del_mtd_partitions(&flash->mtd);
- else
- status = del_mtd_device(&flash->mtd);
+ status = mtd_device_unregister(&flash->mtd);
if (status == 0) {
dev_set_drvdata(&spi->dev, NULL);
kfree(flash);
diff --git a/drivers/mtd/devices/mtdram.c b/drivers/mtd/devices/mtdram.c
index 1483e18971ce..2562689ba6b4 100644
--- a/drivers/mtd/devices/mtdram.c
+++ b/drivers/mtd/devices/mtdram.c
@@ -104,7 +104,7 @@ static int ram_write(struct mtd_info *mtd, loff_t to, size_t len,
static void __exit cleanup_mtdram(void)
{
if (mtd_info) {
- del_mtd_device(mtd_info);
+ mtd_device_unregister(mtd_info);
vfree(mtd_info->priv);
kfree(mtd_info);
}
@@ -133,9 +133,8 @@ int mtdram_init_device(struct mtd_info *mtd, void *mapped_address,
mtd->read = ram_read;
mtd->write = ram_write;
- if (add_mtd_device(mtd)) {
+ if (mtd_device_register(mtd, NULL, 0))
return -EIO;
- }
return 0;
}
diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c
index 8d28fa02a5a2..23423bd00b06 100644
--- a/drivers/mtd/devices/phram.c
+++ b/drivers/mtd/devices/phram.c
@@ -115,7 +115,7 @@ static void unregister_devices(void)
struct phram_mtd_list *this, *safe;
list_for_each_entry_safe(this, safe, &phram_list, list) {
- del_mtd_device(&this->mtd);
+ mtd_device_unregister(&this->mtd);
iounmap(this->mtd.priv);
kfree(this->mtd.name);
kfree(this);
@@ -153,7 +153,7 @@ static int register_device(char *name, unsigned long start, unsigned long len)
new->mtd.writesize = 1;
ret = -EAGAIN;
- if (add_mtd_device(&new->mtd)) {
+ if (mtd_device_register(&new->mtd, NULL, 0)) {
pr_err("Failed to register new device\n");
goto out2;
}
diff --git a/drivers/mtd/devices/pmc551.c b/drivers/mtd/devices/pmc551.c
index 41b8cdcc64cb..ecff765579dd 100644
--- a/drivers/mtd/devices/pmc551.c
+++ b/drivers/mtd/devices/pmc551.c
@@ -798,7 +798,7 @@ static int __init init_pmc551(void)
mtd->writesize = 1;
mtd->owner = THIS_MODULE;
- if (add_mtd_device(mtd)) {
+ if (mtd_device_register(mtd, NULL, 0)) {
printk(KERN_NOTICE "pmc551: Failed to register new device\n");
pci_iounmap(PCI_Device, priv->start);
kfree(mtd->priv);
@@ -806,7 +806,7 @@ static int __init init_pmc551(void)
break;
}
- /* Keep a reference as the add_mtd_device worked */
+ /* Keep a reference as the mtd_device_register worked */
pci_dev_get(PCI_Device);
printk(KERN_NOTICE "Registered pmc551 memory device.\n");
@@ -856,7 +856,7 @@ static void __exit cleanup_pmc551(void)
pci_dev_put(priv->dev);
kfree(mtd->priv);
- del_mtd_device(mtd);
+ mtd_device_unregister(mtd);
kfree(mtd);
found++;
}
diff --git a/drivers/mtd/devices/slram.c b/drivers/mtd/devices/slram.c
index 592016a0668f..e585263161b9 100644
--- a/drivers/mtd/devices/slram.c
+++ b/drivers/mtd/devices/slram.c
@@ -210,7 +210,7 @@ static int register_device(char *name, unsigned long start, unsigned long length
(*curmtd)->mtdinfo->erasesize = SLRAM_BLK_SZ;
(*curmtd)->mtdinfo->writesize = 1;
- if (add_mtd_device((*curmtd)->mtdinfo)) {
+ if (mtd_device_register((*curmtd)->mtdinfo, NULL, 0)) {
E("slram: Failed to register new device\n");
iounmap(((slram_priv_t *)(*curmtd)->mtdinfo->priv)->start);
kfree((*curmtd)->mtdinfo->priv);
@@ -231,7 +231,7 @@ static void unregister_devices(void)
while (slram_mtdlist) {
nextitem = slram_mtdlist->next;
- del_mtd_device(slram_mtdlist->mtdinfo);
+ mtd_device_unregister(slram_mtdlist->mtdinfo);
iounmap(((slram_priv_t *)slram_mtdlist->mtdinfo->priv)->start);
kfree(slram_mtdlist->mtdinfo->priv);
kfree(slram_mtdlist->mtdinfo);
diff --git a/drivers/mtd/devices/sst25l.c b/drivers/mtd/devices/sst25l.c
index c163e619abc9..1e2c430aaad2 100644
--- a/drivers/mtd/devices/sst25l.c
+++ b/drivers/mtd/devices/sst25l.c
@@ -66,7 +66,7 @@ struct flash_info {
#define to_sst25l_flash(x) container_of(x, struct sst25l_flash, mtd)
-static struct flash_info __initdata sst25l_flash_info[] = {
+static struct flash_info __devinitdata sst25l_flash_info[] = {
{"sst25lf020a", 0xbf43, 256, 1024, 4096},
{"sst25lf040a", 0xbf44, 256, 2048, 4096},
};
@@ -381,6 +381,8 @@ static int __devinit sst25l_probe(struct spi_device *spi)
struct sst25l_flash *flash;
struct flash_platform_data *data;
int ret, i;
+ struct mtd_partition *parts = NULL;
+ int nr_parts = 0;
flash_info = sst25l_match_device(spi);
if (!flash_info)
@@ -420,46 +422,37 @@ static int __devinit sst25l_probe(struct spi_device *spi)
flash->mtd.erasesize, flash->mtd.erasesize / 1024,
flash->mtd.numeraseregions);
- if (mtd_has_partitions()) {
- struct mtd_partition *parts = NULL;
- int nr_parts = 0;
- if (mtd_has_cmdlinepart()) {
- static const char *part_probes[] =
- {"cmdlinepart", NULL};
+ if (mtd_has_cmdlinepart()) {
+ static const char *part_probes[] = {"cmdlinepart", NULL};
- nr_parts = parse_mtd_partitions(&flash->mtd,
- part_probes,
- &parts, 0);
- }
+ nr_parts = parse_mtd_partitions(&flash->mtd,
+ part_probes,
+ &parts, 0);
+ }
- if (nr_parts <= 0 && data && data->parts) {
- parts = data->parts;
- nr_parts = data->nr_parts;
- }
+ if (nr_parts <= 0 && data && data->parts) {
+ parts = data->parts;
+ nr_parts = data->nr_parts;
+ }
- if (nr_parts > 0) {
- for (i = 0; i < nr_parts; i++) {
- DEBUG(MTD_DEBUG_LEVEL2, "partitions[%d] = "
- "{.name = %s, .offset = 0x%llx, "
- ".size = 0x%llx (%lldKiB) }\n",
- i, parts[i].name,
- (long long)parts[i].offset,
- (long long)parts[i].size,
- (long long)(parts[i].size >> 10));
- }
-
- flash->partitioned = 1;
- return add_mtd_partitions(&flash->mtd,
- parts, nr_parts);
+ if (nr_parts > 0) {
+ for (i = 0; i < nr_parts; i++) {
+ DEBUG(MTD_DEBUG_LEVEL2, "partitions[%d] = "
+ "{.name = %s, .offset = 0x%llx, "
+ ".size = 0x%llx (%lldKiB) }\n",
+ i, parts[i].name,
+ (long long)parts[i].offset,
+ (long long)parts[i].size,
+ (long long)(parts[i].size >> 10));
}
- } else if (data && data->nr_parts) {
- dev_warn(&spi->dev, "ignoring %d default partitions on %s\n",
- data->nr_parts, data->name);
+ flash->partitioned = 1;
+ return mtd_device_register(&flash->mtd, parts,
+ nr_parts);
}
- ret = add_mtd_device(&flash->mtd);
+ ret = mtd_device_register(&flash->mtd, NULL, 0);
if (ret == 1) {
kfree(flash);
dev_set_drvdata(&spi->dev, NULL);
@@ -469,15 +462,12 @@ static int __devinit sst25l_probe(struct spi_device *spi)
return 0;
}
-static int __exit sst25l_remove(struct spi_device *spi)
+static int __devexit sst25l_remove(struct spi_device *spi)
{
struct sst25l_flash *flash = dev_get_drvdata(&spi->dev);
int ret;
- if (mtd_has_partitions() && flash->partitioned)
- ret = del_mtd_partitions(&flash->mtd);
- else
- ret = del_mtd_device(&flash->mtd);
+ ret = mtd_device_unregister(&flash->mtd);
if (ret == 0)
kfree(flash);
return ret;
@@ -490,7 +480,7 @@ static struct spi_driver sst25l_driver = {
.owner = THIS_MODULE,
},
.probe = sst25l_probe,
- .remove = __exit_p(sst25l_remove),
+ .remove = __devexit_p(sst25l_remove),
};
static int __init sst25l_init(void)
diff --git a/drivers/mtd/lpddr/lpddr_cmds.c b/drivers/mtd/lpddr/lpddr_cmds.c
index 12679925b420..65655dd59e1f 100644
--- a/drivers/mtd/lpddr/lpddr_cmds.c
+++ b/drivers/mtd/lpddr/lpddr_cmds.c
@@ -313,12 +313,7 @@ static int chip_ready(struct map_info *map, struct flchip *chip, int mode)
if (ret) {
/* Oops. something got wrong. */
/* Resume and pretend we weren't here. */
- map_write(map, CMD(LPDDR_RESUME),
- map->pfow_base + PFOW_COMMAND_CODE);
- map_write(map, CMD(LPDDR_START_EXECUTION),
- map->pfow_base + PFOW_COMMAND_EXECUTE);
- chip->state = FL_ERASING;
- chip->oldstate = FL_READY;
+ put_chip(map, chip);
printk(KERN_ERR "%s: suspend operation failed."
"State may be wrong \n", map->name);
return -EIO;
@@ -383,7 +378,6 @@ static void put_chip(struct map_info *map, struct flchip *chip)
switch (chip->oldstate) {
case FL_ERASING:
- chip->state = chip->oldstate;
map_write(map, CMD(LPDDR_RESUME),
map->pfow_base + PFOW_COMMAND_CODE);
map_write(map, CMD(LPDDR_START_EXECUTION),
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index 5069111c81cc..c0c328c5b133 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -82,7 +82,6 @@ config MTD_PHYSMAP_OF
config MTD_PMC_MSP_EVM
tristate "CFI Flash device mapped on PMC-Sierra MSP"
depends on PMC_MSP && MTD_CFI
- select MTD_PARTITIONS
help
This provides a 'mapping' driver which supports the way
in which user-programmable flash chips are connected on the
@@ -122,7 +121,7 @@ config MTD_SC520CDP
config MTD_NETSC520
tristate "CFI Flash device mapped on AMD NetSc520"
- depends on X86 && MTD_CFI && MTD_PARTITIONS
+ depends on X86 && MTD_CFI
help
This enables access routines for the flash chips on the AMD NetSc520
demonstration board. If you have one of these boards and would like
@@ -131,7 +130,6 @@ config MTD_NETSC520
config MTD_TS5500
tristate "JEDEC Flash device mapped on Technologic Systems TS-5500"
depends on X86
- select MTD_PARTITIONS
select MTD_JEDECPROBE
select MTD_CFI_AMDSTD
help
@@ -149,7 +147,7 @@ config MTD_TS5500
config MTD_SBC_GXX
tristate "CFI Flash device mapped on Arcom SBC-GXx boards"
- depends on X86 && MTD_CFI_INTELEXT && MTD_PARTITIONS && MTD_COMPLEX_MAPPINGS
+ depends on X86 && MTD_CFI_INTELEXT && MTD_COMPLEX_MAPPINGS
help
This provides a driver for the on-board flash of Arcom Control
Systems' SBC-GXn family of boards, formerly known as SBC-MediaGX.
@@ -161,7 +159,6 @@ config MTD_SBC_GXX
config MTD_PXA2XX
tristate "CFI Flash device mapped on Intel XScale PXA2xx based boards"
depends on (PXA25x || PXA27x) && MTD_CFI_INTELEXT
- select MTD_PARTITIONS
help
This provides a driver for the NOR flash attached to a PXA2xx chip.
@@ -185,7 +182,7 @@ config MTD_VMAX
config MTD_SCx200_DOCFLASH
tristate "Flash device mapped with DOCCS on NatSemi SCx200"
- depends on SCx200 && MTD_CFI && MTD_PARTITIONS
+ depends on SCx200 && MTD_CFI
help
Enable support for a flash chip mapped using the DOCCS signal on a
National Semiconductor SCx200 processor.
@@ -247,7 +244,7 @@ config MTD_TSUNAMI
config MTD_NETtel
tristate "CFI flash device on SnapGear/SecureEdge"
- depends on X86 && MTD_PARTITIONS && MTD_JEDECPROBE
+ depends on X86 && MTD_JEDECPROBE
help
Support for flash chips on NETtel/SecureEdge/SnapGear boards.
@@ -269,7 +266,7 @@ config MTD_LANTIQ
config MTD_DILNETPC
tristate "CFI Flash device mapped on DIL/Net PC"
- depends on X86 && MTD_PARTITIONS && MTD_CFI_INTELEXT && BROKEN
+ depends on X86 && MTD_CFI_INTELEXT && BROKEN
help
MTD map driver for SSV DIL/Net PC Boards "DNP" and "ADNP".
For details, see <http://www.ssv-embedded.de/ssv/pc104/p169.htm>
@@ -355,7 +352,7 @@ config MTD_CDB89712
config MTD_SA1100
tristate "CFI Flash device mapped on StrongARM SA11x0"
- depends on MTD_CFI && ARCH_SA1100 && MTD_PARTITIONS
+ depends on MTD_CFI && ARCH_SA1100
help
This enables access to the flash chips on most platforms based on
the SA1100 and SA1110, including the Assabet and the Compaq iPAQ.
@@ -389,7 +386,7 @@ config MTD_IXP2000
config MTD_FORTUNET
tristate "CFI Flash device mapped on the FortuNet board"
- depends on MTD_CFI && MTD_PARTITIONS && SA1100_FORTUNET
+ depends on MTD_CFI && SA1100_FORTUNET
help
This enables access to the Flash on the FortuNet board. If you
have such a board, say 'Y'.
@@ -461,7 +458,6 @@ config MTD_PCMCIA_ANONYMOUS
config MTD_BFIN_ASYNC
tristate "Blackfin BF533-STAMP Flash Chip Support"
depends on BFIN533_STAMP && MTD_CFI && MTD_COMPLEX_MAPPINGS
- select MTD_PARTITIONS
default y
help
Map driver which allows for simultaneous utilization of
@@ -473,7 +469,6 @@ config MTD_GPIO_ADDR
tristate "GPIO-assisted Flash Chip Support"
depends on GENERIC_GPIO || GPIOLIB
depends on MTD_COMPLEX_MAPPINGS
- select MTD_PARTITIONS
help
Map driver which allows flashes to be partially physically addressed
and assisted by GPIOs.
@@ -482,14 +477,13 @@ config MTD_GPIO_ADDR
config MTD_UCLINUX
bool "Generic uClinux RAM/ROM filesystem support"
- depends on MTD_PARTITIONS && MTD_RAM=y && !MMU
+ depends on MTD_RAM=y && !MMU
help
Map driver to support image based filesystems for uClinux.
config MTD_WRSBC8260
tristate "Map driver for WindRiver PowerQUICC II MPC82xx board"
depends on (SBC82xx || SBC8560)
- select MTD_PARTITIONS
select MTD_MAP_BANK_WIDTH_4
select MTD_MAP_BANK_WIDTH_1
select MTD_CFI_I1
@@ -502,7 +496,6 @@ config MTD_WRSBC8260
config MTD_DMV182
tristate "Map driver for Dy-4 SVME/DMV-182 board."
depends on DMV182
- select MTD_PARTITIONS
select MTD_MAP_BANK_WIDTH_32
select MTD_CFI_I8
select MTD_CFI_AMDSTD
diff --git a/drivers/mtd/maps/amd76xrom.c b/drivers/mtd/maps/amd76xrom.c
index 92de7e3a49a5..e2875d6fe129 100644
--- a/drivers/mtd/maps/amd76xrom.c
+++ b/drivers/mtd/maps/amd76xrom.c
@@ -82,7 +82,7 @@ static void amd76xrom_cleanup(struct amd76xrom_window *window)
if (map->rsrc.parent) {
release_resource(&map->rsrc);
}
- del_mtd_device(map->mtd);
+ mtd_device_unregister(map->mtd);
map_destroy(map->mtd);
list_del(&map->list);
kfree(map);
@@ -262,7 +262,7 @@ static int __devinit amd76xrom_init_one (struct pci_dev *pdev,
/* Now that the mtd devices is complete claim and export it */
map->mtd->owner = THIS_MODULE;
- if (add_mtd_device(map->mtd)) {
+ if (mtd_device_register(map->mtd, NULL, 0)) {
map_destroy(map->mtd);
map->mtd = NULL;
goto out;
diff --git a/drivers/mtd/maps/autcpu12-nvram.c b/drivers/mtd/maps/autcpu12-nvram.c
index 53664188fc47..e5bfd0e093bb 100644
--- a/drivers/mtd/maps/autcpu12-nvram.c
+++ b/drivers/mtd/maps/autcpu12-nvram.c
@@ -88,7 +88,7 @@ map:
sram_mtd->owner = THIS_MODULE;
sram_mtd->erasesize = 16;
- if (add_mtd_device(sram_mtd)) {
+ if (mtd_device_register(sram_mtd, NULL, 0)) {
printk("NV-RAM device addition failed\n");
err = -ENOMEM;
goto out_probe;
@@ -111,7 +111,7 @@ out:
static void __exit cleanup_autcpu12_maps(void)
{
if (sram_mtd) {
- del_mtd_device(sram_mtd);
+ mtd_device_unregister(sram_mtd);
map_destroy(sram_mtd);
iounmap((void *)autcpu12_sram_map.virt);
}
diff --git a/drivers/mtd/maps/bcm963xx-flash.c b/drivers/mtd/maps/bcm963xx-flash.c
index 1f3049590d9e..608967fe74c6 100644
--- a/drivers/mtd/maps/bcm963xx-flash.c
+++ b/drivers/mtd/maps/bcm963xx-flash.c
@@ -224,8 +224,8 @@ probe_ok:
goto err_probe;
}
- return add_mtd_partitions(bcm963xx_mtd_info, parsed_parts,
- parsed_nr_parts);
+ return mtd_device_register(bcm963xx_mtd_info, parsed_parts,
+ parsed_nr_parts);
err_probe:
iounmap(bcm963xx_map.virt);
@@ -235,7 +235,7 @@ err_probe:
static int bcm963xx_remove(struct platform_device *pdev)
{
if (bcm963xx_mtd_info) {
- del_mtd_partitions(bcm963xx_mtd_info);
+ mtd_device_unregister(bcm963xx_mtd_info);
map_destroy(bcm963xx_mtd_info);
}
diff --git a/drivers/mtd/maps/bfin-async-flash.c b/drivers/mtd/maps/bfin-async-flash.c
index 85dd18193cf2..d4297a97e100 100644
--- a/drivers/mtd/maps/bfin-async-flash.c
+++ b/drivers/mtd/maps/bfin-async-flash.c
@@ -41,9 +41,7 @@ struct async_state {
uint32_t flash_ambctl0, flash_ambctl1;
uint32_t save_ambctl0, save_ambctl1;
unsigned long irq_flags;
-#ifdef CONFIG_MTD_PARTITIONS
struct mtd_partition *parts;
-#endif
};
static void switch_to_flash(struct async_state *state)
@@ -124,9 +122,7 @@ static void bfin_flash_copy_to(struct map_info *map, unsigned long to, const voi
switch_back(state);
}
-#ifdef CONFIG_MTD_PARTITIONS
static const char *part_probe_types[] = { "cmdlinepart", "RedBoot", NULL };
-#endif
static int __devinit bfin_flash_probe(struct platform_device *pdev)
{
@@ -169,22 +165,17 @@ static int __devinit bfin_flash_probe(struct platform_device *pdev)
return -ENXIO;
}
-#ifdef CONFIG_MTD_PARTITIONS
ret = parse_mtd_partitions(state->mtd, part_probe_types, &pdata->parts, 0);
if (ret > 0) {
pr_devinit(KERN_NOTICE DRIVER_NAME ": Using commandline partition definition\n");
- add_mtd_partitions(state->mtd, pdata->parts, ret);
+ mtd_device_register(state->mtd, pdata->parts, ret);
state->parts = pdata->parts;
-
} else if (pdata->nr_parts) {
pr_devinit(KERN_NOTICE DRIVER_NAME ": Using board partition definition\n");
- add_mtd_partitions(state->mtd, pdata->parts, pdata->nr_parts);
-
- } else
-#endif
- {
+ mtd_device_register(state->mtd, pdata->parts, pdata->nr_parts);
+ } else {
pr_devinit(KERN_NOTICE DRIVER_NAME ": no partition info available, registering whole flash at once\n");
- add_mtd_device(state->mtd);
+ mtd_device_register(state->mtd, NULL, 0);
}
platform_set_drvdata(pdev, state);
@@ -196,10 +187,8 @@ static int __devexit bfin_flash_remove(struct platform_device *pdev)
{
struct async_state *state = platform_get_drvdata(pdev);
gpio_free(state->enet_flash_pin);
-#ifdef CONFIG_MTD_PARTITIONS
- del_mtd_partitions(state->mtd);
+ mtd_device_unregister(state->mtd);
kfree(state->parts);
-#endif
map_destroy(state->mtd);
kfree(state);
return 0;
diff --git a/drivers/mtd/maps/cdb89712.c b/drivers/mtd/maps/cdb89712.c
index 8d92d8db9a98..c29cbf87ea0c 100644
--- a/drivers/mtd/maps/cdb89712.c
+++ b/drivers/mtd/maps/cdb89712.c
@@ -75,7 +75,7 @@ static int __init init_cdb89712_flash (void)
flash_mtd->owner = THIS_MODULE;
- if (add_mtd_device(flash_mtd)) {
+ if (mtd_device_register(flash_mtd, NULL, 0)) {
printk("FLASH device addition failed\n");
err = -ENOMEM;
goto out_probe;
@@ -141,7 +141,7 @@ static int __init init_cdb89712_sram (void)
sram_mtd->owner = THIS_MODULE;
sram_mtd->erasesize = 16;
- if (add_mtd_device(sram_mtd)) {
+ if (mtd_device_register(sram_mtd, NULL, 0)) {
printk("SRAM device addition failed\n");
err = -ENOMEM;
goto out_probe;
@@ -209,7 +209,7 @@ static int __init init_cdb89712_bootrom (void)
bootrom_mtd->owner = THIS_MODULE;
bootrom_mtd->erasesize = 0x10000;
- if (add_mtd_device(bootrom_mtd)) {
+ if (mtd_device_register(bootrom_mtd, NULL, 0)) {
printk("BootROM device addition failed\n");
err = -ENOMEM;
goto out_probe;
@@ -249,21 +249,21 @@ static int __init init_cdb89712_maps(void)
static void __exit cleanup_cdb89712_maps(void)
{
if (sram_mtd) {
- del_mtd_device(sram_mtd);
+ mtd_device_unregister(sram_mtd);
map_destroy(sram_mtd);
iounmap((void *)cdb89712_sram_map.virt);
release_resource (&cdb89712_sram_resource);
}
if (flash_mtd) {
- del_mtd_device(flash_mtd);
+ mtd_device_unregister(flash_mtd);
map_destroy(flash_mtd);
iounmap((void *)cdb89712_flash_map.virt);
release_resource (&cdb89712_flash_resource);
}
if (bootrom_mtd) {
- del_mtd_device(bootrom_mtd);
+ mtd_device_unregister(bootrom_mtd);
map_destroy(bootrom_mtd);
iounmap((void *)cdb89712_bootrom_map.virt);
release_resource (&cdb89712_bootrom_resource);
diff --git a/drivers/mtd/maps/ceiva.c b/drivers/mtd/maps/ceiva.c
index 23f551dc8ca8..06f9c9815720 100644
--- a/drivers/mtd/maps/ceiva.c
+++ b/drivers/mtd/maps/ceiva.c
@@ -224,7 +224,7 @@ static void __exit clps_destroy_mtd(struct clps_info *clps, struct mtd_info *mtd
{
int i;
- del_mtd_partitions(mtd);
+ mtd_device_unregister(mtd);
if (mtd != clps[0].mtd)
mtd_concat_destroy(mtd);
@@ -292,11 +292,11 @@ static void __init clps_locate_partitions(struct mtd_info *mtd)
if (nr_parts == 0) {
printk(KERN_NOTICE "clps flash: no partition info "
"available, registering whole flash\n");
- add_mtd_device(mtd);
+ mtd_device_register(mtd, NULL, 0);
} else {
printk(KERN_NOTICE "clps flash: using %s partition "
"definition\n", part_type);
- add_mtd_partitions(mtd, parsed_parts, nr_parts);
+ mtd_device_register(mtd, parsed_parts, nr_parts);
}
/* Always succeeds. */
diff --git a/drivers/mtd/maps/cfi_flagadm.c b/drivers/mtd/maps/cfi_flagadm.c
index f71343cd77cc..d16fc9d3b8cd 100644
--- a/drivers/mtd/maps/cfi_flagadm.c
+++ b/drivers/mtd/maps/cfi_flagadm.c
@@ -107,7 +107,7 @@ static int __init init_flagadm(void)
mymtd = do_map_probe("cfi_probe", &flagadm_map);
if (mymtd) {
mymtd->owner = THIS_MODULE;
- add_mtd_partitions(mymtd, flagadm_parts, PARTITION_COUNT);
+ mtd_device_register(mymtd, flagadm_parts, PARTITION_COUNT);
printk(KERN_NOTICE "FlagaDM flash device initialized\n");
return 0;
}
@@ -119,7 +119,7 @@ static int __init init_flagadm(void)
static void __exit cleanup_flagadm(void)
{
if (mymtd) {
- del_mtd_partitions(mymtd);
+ mtd_device_unregister(mymtd);
map_destroy(mymtd);
}
if (flagadm_map.virt) {
diff --git a/drivers/mtd/maps/ck804xrom.c b/drivers/mtd/maps/ck804xrom.c
index 5fdb7b26cea3..3d0e762fa5f2 100644
--- a/drivers/mtd/maps/ck804xrom.c
+++ b/drivers/mtd/maps/ck804xrom.c
@@ -94,7 +94,7 @@ static void ck804xrom_cleanup(struct ck804xrom_window *window)
if (map->rsrc.parent)
release_resource(&map->rsrc);
- del_mtd_device(map->mtd);
+ mtd_device_unregister(map->mtd);
map_destroy(map->mtd);
list_del(&map->list);
kfree(map);
@@ -291,7 +291,7 @@ static int __devinit ck804xrom_init_one (struct pci_dev *pdev,
/* Now that the mtd devices is complete claim and export it */
map->mtd->owner = THIS_MODULE;
- if (add_mtd_device(map->mtd)) {
+ if (mtd_device_register(map->mtd, NULL, 0)) {
map_destroy(map->mtd);
map->mtd = NULL;
goto out;
diff --git a/drivers/mtd/maps/dbox2-flash.c b/drivers/mtd/maps/dbox2-flash.c
index cfacfa6f45dd..85bdece6ab3f 100644
--- a/drivers/mtd/maps/dbox2-flash.c
+++ b/drivers/mtd/maps/dbox2-flash.c
@@ -93,7 +93,7 @@ static int __init init_dbox2_flash(void)
mymtd->owner = THIS_MODULE;
/* Create MTD devices for each partition. */
- add_mtd_partitions(mymtd, partition_info, NUM_PARTITIONS);
+ mtd_device_register(mymtd, partition_info, NUM_PARTITIONS);
return 0;
}
@@ -105,7 +105,7 @@ static int __init init_dbox2_flash(void)
static void __exit cleanup_dbox2_flash(void)
{
if (mymtd) {
- del_mtd_partitions(mymtd);
+ mtd_device_unregister(mymtd);
map_destroy(mymtd);
}
if (dbox2_flash_map.virt) {
diff --git a/drivers/mtd/maps/dc21285.c b/drivers/mtd/maps/dc21285.c
index b3cb3a183809..7a9e1989c977 100644
--- a/drivers/mtd/maps/dc21285.c
+++ b/drivers/mtd/maps/dc21285.c
@@ -145,17 +145,13 @@ static struct map_info dc21285_map = {
/* Partition stuff */
-#ifdef CONFIG_MTD_PARTITIONS
static struct mtd_partition *dc21285_parts;
static const char *probes[] = { "RedBoot", "cmdlinepart", NULL };
-#endif
static int __init init_dc21285(void)
{
-#ifdef CONFIG_MTD_PARTITIONS
int nrparts;
-#endif
/* Determine bankwidth */
switch (*CSR_SA110_CNTL & (3<<14)) {
@@ -204,13 +200,8 @@ static int __init init_dc21285(void)
dc21285_mtd->owner = THIS_MODULE;
-#ifdef CONFIG_MTD_PARTITIONS
nrparts = parse_mtd_partitions(dc21285_mtd, probes, &dc21285_parts, 0);
- if (nrparts > 0)
- add_mtd_partitions(dc21285_mtd, dc21285_parts, nrparts);
- else
-#endif
- add_mtd_device(dc21285_mtd);
+ mtd_device_register(dc21285_mtd, dc21285_parts, nrparts);
if(machine_is_ebsa285()) {
/*
@@ -232,14 +223,9 @@ static int __init init_dc21285(void)
static void __exit cleanup_dc21285(void)
{
-#ifdef CONFIG_MTD_PARTITIONS
- if (dc21285_parts) {
- del_mtd_partitions(dc21285_mtd);
+ mtd_device_unregister(dc21285_mtd);
+ if (dc21285_parts)
kfree(dc21285_parts);
- } else
-#endif
- del_mtd_device(dc21285_mtd);
-
map_destroy(dc21285_mtd);
iounmap(dc21285_map.virt);
}
diff --git a/drivers/mtd/maps/dilnetpc.c b/drivers/mtd/maps/dilnetpc.c
index 0713e3a5a22c..3e393f0da823 100644
--- a/drivers/mtd/maps/dilnetpc.c
+++ b/drivers/mtd/maps/dilnetpc.c
@@ -450,7 +450,7 @@ static int __init init_dnpc(void)
partition_info[2].mtdp = &lowlvl_parts[1];
partition_info[3].mtdp = &lowlvl_parts[3];
- add_mtd_partitions(mymtd, partition_info, NUM_PARTITIONS);
+ mtd_device_register(mymtd, partition_info, NUM_PARTITIONS);
/*
** now create a virtual MTD device by concatenating the for partitions
@@ -463,7 +463,8 @@ static int __init init_dnpc(void)
** we do not supply mtd pointers in higlvl_partition_info, so
** add_mtd_partitions() will register the devices.
*/
- add_mtd_partitions(merged_mtd, higlvl_partition_info, NUM_HIGHLVL_PARTITIONS);
+ mtd_device_register(merged_mtd, higlvl_partition_info,
+ NUM_HIGHLVL_PARTITIONS);
}
return 0;
@@ -472,12 +473,12 @@ static int __init init_dnpc(void)
static void __exit cleanup_dnpc(void)
{
if(merged_mtd) {
- del_mtd_partitions(merged_mtd);
+ mtd_device_unregister(merged_mtd);
mtd_concat_destroy(merged_mtd);
}
if (mymtd) {
- del_mtd_partitions(mymtd);
+ mtd_device_unregister(mymtd);
map_destroy(mymtd);
}
if (dnpc_map.virt) {
diff --git a/drivers/mtd/maps/dmv182.c b/drivers/mtd/maps/dmv182.c
index d171674eb2ed..6538ac675e00 100644
--- a/drivers/mtd/maps/dmv182.c
+++ b/drivers/mtd/maps/dmv182.c
@@ -120,7 +120,7 @@ static int __init init_svme182(void)
this_mtd->size >> 20, FLASH_BASE_ADDR);
this_mtd->owner = THIS_MODULE;
- add_mtd_partitions(this_mtd, partitions, num_parts);
+ mtd_device_register(this_mtd, partitions, num_parts);
return 0;
}
@@ -129,7 +129,7 @@ static void __exit cleanup_svme182(void)
{
if (this_mtd)
{
- del_mtd_partitions(this_mtd);
+ mtd_device_unregister(this_mtd);
map_destroy(this_mtd);
}
diff --git a/drivers/mtd/maps/edb7312.c b/drivers/mtd/maps/edb7312.c
index be9e90b44587..fe42a212bb3e 100644
--- a/drivers/mtd/maps/edb7312.c
+++ b/drivers/mtd/maps/edb7312.c
@@ -15,10 +15,7 @@
#include <asm/io.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
-
-#ifdef CONFIG_MTD_PARTITIONS
#include <linux/mtd/partitions.h>
-#endif
#define WINDOW_ADDR 0x00000000 /* physical properties of flash */
#define WINDOW_SIZE 0x01000000
@@ -40,8 +37,6 @@ struct map_info edb7312nor_map = {
.phys = WINDOW_ADDR,
};
-#ifdef CONFIG_MTD_PARTITIONS
-
/*
* MTD partitioning stuff
*/
@@ -66,8 +61,6 @@ static struct mtd_partition static_partitions[3] =
static const char *probes[] = { "RedBoot", "cmdlinepart", NULL };
-#endif
-
static int mtd_parts_nb = 0;
static struct mtd_partition *mtd_parts = 0;
@@ -96,27 +89,24 @@ static int __init init_edb7312nor(void)
if (mymtd) {
mymtd->owner = THIS_MODULE;
-#ifdef CONFIG_MTD_PARTITIONS
mtd_parts_nb = parse_mtd_partitions(mymtd, probes, &mtd_parts, MTDID);
if (mtd_parts_nb > 0)
- part_type = "detected";
+ part_type = "detected";
- if (mtd_parts_nb == 0)
- {
+ if (mtd_parts_nb == 0) {
mtd_parts = static_partitions;
mtd_parts_nb = ARRAY_SIZE(static_partitions);
part_type = "static";
}
-#endif
- add_mtd_device(mymtd);
+
if (mtd_parts_nb == 0)
- printk(KERN_NOTICE MSG_PREFIX "no partition info available\n");
+ printk(KERN_NOTICE MSG_PREFIX "no partition info available\n");
else
- {
printk(KERN_NOTICE MSG_PREFIX
"using %s partition definition\n", part_type);
- add_mtd_partitions(mymtd, mtd_parts, mtd_parts_nb);
- }
+ /* Register the whole device first. */
+ mtd_device_register(mymtd, NULL, 0);
+ mtd_device_register(mymtd, mtd_parts, mtd_parts_nb);
return 0;
}
@@ -127,7 +117,7 @@ static int __init init_edb7312nor(void)
static void __exit cleanup_edb7312nor(void)
{
if (mymtd) {
- del_mtd_device(mymtd);
+ mtd_device_unregister(mymtd);
map_destroy(mymtd);
}
if (edb7312nor_map.virt) {
diff --git a/drivers/mtd/maps/esb2rom.c b/drivers/mtd/maps/esb2rom.c
index 4feb7507ab7c..08322b1c3e81 100644
--- a/drivers/mtd/maps/esb2rom.c
+++ b/drivers/mtd/maps/esb2rom.c
@@ -128,7 +128,7 @@ static void esb2rom_cleanup(struct esb2rom_window *window)
list_for_each_entry_safe(map, scratch, &window->maps, list) {
if (map->rsrc.parent)
release_resource(&map->rsrc);
- del_mtd_device(map->mtd);
+ mtd_device_unregister(map->mtd);
map_destroy(map->mtd);
list_del(&map->list);
kfree(map);
@@ -352,7 +352,7 @@ static int __devinit esb2rom_init_one(struct pci_dev *pdev,
/* Now that the mtd devices is complete claim and export it */
map->mtd->owner = THIS_MODULE;
- if (add_mtd_device(map->mtd)) {
+ if (mtd_device_register(map->mtd, NULL, 0)) {
map_destroy(map->mtd);
map->mtd = NULL;
goto out;
diff --git a/drivers/mtd/maps/fortunet.c b/drivers/mtd/maps/fortunet.c
index 1e43124d498b..956e2e4f30ea 100644
--- a/drivers/mtd/maps/fortunet.c
+++ b/drivers/mtd/maps/fortunet.c
@@ -243,8 +243,9 @@ static int __init init_fortunet(void)
&map_regions[ix].map_info);
}
map_regions[ix].mymtd->owner = THIS_MODULE;
- add_mtd_partitions(map_regions[ix].mymtd,
- map_regions[ix].parts,map_regions_parts[ix]);
+ mtd_device_register(map_regions[ix].mymtd,
+ map_regions[ix].parts,
+ map_regions_parts[ix]);
}
}
if(iy)
@@ -261,7 +262,7 @@ static void __exit cleanup_fortunet(void)
{
if( map_regions[ix].mymtd )
{
- del_mtd_partitions( map_regions[ix].mymtd );
+ mtd_device_unregister(map_regions[ix].mymtd);
map_destroy( map_regions[ix].mymtd );
}
iounmap((void *)map_regions[ix].map_info.virt);
diff --git a/drivers/mtd/maps/gpio-addr-flash.c b/drivers/mtd/maps/gpio-addr-flash.c
index af5707a80205..7568c5f8b8ae 100644
--- a/drivers/mtd/maps/gpio-addr-flash.c
+++ b/drivers/mtd/maps/gpio-addr-flash.c
@@ -155,9 +155,7 @@ static void gf_copy_to(struct map_info *map, unsigned long to, const void *from,
memcpy_toio(map->virt + (to % state->win_size), from, len);
}
-#ifdef CONFIG_MTD_PARTITIONS
static const char *part_probe_types[] = { "cmdlinepart", "RedBoot", NULL };
-#endif
/**
* gpio_flash_probe() - setup a mapping for a GPIO assisted flash
@@ -189,7 +187,7 @@ static const char *part_probe_types[] = { "cmdlinepart", "RedBoot", NULL };
*/
static int __devinit gpio_flash_probe(struct platform_device *pdev)
{
- int ret;
+ int nr_parts;
size_t i, arr_size;
struct physmap_flash_data *pdata;
struct resource *memory;
@@ -254,24 +252,21 @@ static int __devinit gpio_flash_probe(struct platform_device *pdev)
return -ENXIO;
}
-#ifdef CONFIG_MTD_PARTITIONS
- ret = parse_mtd_partitions(state->mtd, part_probe_types, &pdata->parts, 0);
- if (ret > 0) {
+ nr_parts = parse_mtd_partitions(state->mtd, part_probe_types,
+ &pdata->parts, 0);
+ if (nr_parts > 0) {
pr_devinit(KERN_NOTICE PFX "Using commandline partition definition\n");
- add_mtd_partitions(state->mtd, pdata->parts, ret);
kfree(pdata->parts);
-
} else if (pdata->nr_parts) {
pr_devinit(KERN_NOTICE PFX "Using board partition definition\n");
- add_mtd_partitions(state->mtd, pdata->parts, pdata->nr_parts);
-
- } else
-#endif
- {
+ nr_parts = pdata->nr_parts;
+ } else {
pr_devinit(KERN_NOTICE PFX "no partition info available, registering whole flash at once\n");
- add_mtd_device(state->mtd);
+ nr_parts = 0;
}
+ mtd_device_register(state->mtd, pdata->parts, nr_parts);
+
return 0;
}
@@ -282,9 +277,7 @@ static int __devexit gpio_flash_remove(struct platform_device *pdev)
do {
gpio_free(state->gpio_addrs[i]);
} while (++i < state->gpio_count);
-#ifdef CONFIG_MTD_PARTITIONS
- del_mtd_partitions(state->mtd);
-#endif
+ mtd_device_unregister(state->mtd);
map_destroy(state->mtd);
kfree(state);
return 0;
diff --git a/drivers/mtd/maps/h720x-flash.c b/drivers/mtd/maps/h720x-flash.c
index 72c724fa8c27..7f035860a36b 100644
--- a/drivers/mtd/maps/h720x-flash.c
+++ b/drivers/mtd/maps/h720x-flash.c
@@ -92,18 +92,16 @@ static int __init h720x_mtd_init(void)
if (mymtd) {
mymtd->owner = THIS_MODULE;
-#ifdef CONFIG_MTD_PARTITIONS
nr_mtd_parts = parse_mtd_partitions(mymtd, probes, &mtd_parts, 0);
if (nr_mtd_parts > 0)
part_type = "command line";
-#endif
if (nr_mtd_parts <= 0) {
mtd_parts = h720x_partitions;
nr_mtd_parts = NUM_PARTITIONS;
part_type = "builtin";
}
printk(KERN_INFO "Using %s partition table\n", part_type);
- add_mtd_partitions(mymtd, mtd_parts, nr_mtd_parts);
+ mtd_device_register(mymtd, mtd_parts, nr_mtd_parts);
return 0;
}
@@ -118,7 +116,7 @@ static void __exit h720x_mtd_cleanup(void)
{
if (mymtd) {
- del_mtd_partitions(mymtd);
+ mtd_device_unregister(mymtd);
map_destroy(mymtd);
}
diff --git a/drivers/mtd/maps/ichxrom.c b/drivers/mtd/maps/ichxrom.c
index 1337a4191a0c..6689dcb3124d 100644
--- a/drivers/mtd/maps/ichxrom.c
+++ b/drivers/mtd/maps/ichxrom.c
@@ -67,7 +67,7 @@ static void ichxrom_cleanup(struct ichxrom_window *window)
list_for_each_entry_safe(map, scratch, &window->maps, list) {
if (map->rsrc.parent)
release_resource(&map->rsrc);
- del_mtd_device(map->mtd);
+ mtd_device_unregister(map->mtd);
map_destroy(map->mtd);
list_del(&map->list);
kfree(map);
@@ -287,7 +287,7 @@ static int __devinit ichxrom_init_one (struct pci_dev *pdev,
/* Now that the mtd devices is complete claim and export it */
map->mtd->owner = THIS_MODULE;
- if (add_mtd_device(map->mtd)) {
+ if (mtd_device_register(map->mtd, NULL, 0)) {
map_destroy(map->mtd);
map->mtd = NULL;
goto out;
diff --git a/drivers/mtd/maps/impa7.c b/drivers/mtd/maps/impa7.c
index 998a27da97f3..404a50cbafa0 100644
--- a/drivers/mtd/maps/impa7.c
+++ b/drivers/mtd/maps/impa7.c
@@ -15,10 +15,7 @@
#include <asm/io.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
-
-#ifdef CONFIG_MTD_PARTITIONS
#include <linux/mtd/partitions.h>
-#endif
#define WINDOW_ADDR0 0x00000000 /* physical properties of flash */
#define WINDOW_SIZE0 0x00800000
@@ -49,8 +46,6 @@ static struct map_info impa7_map[NUM_FLASHBANKS] = {
},
};
-#ifdef CONFIG_MTD_PARTITIONS
-
/*
* MTD partitioning stuff
*/
@@ -66,8 +61,6 @@ static struct mtd_partition static_partitions[] =
static int mtd_parts_nb[NUM_FLASHBANKS];
static struct mtd_partition *mtd_parts[NUM_FLASHBANKS];
-#endif
-
static const char *probes[] = { "cmdlinepart", NULL };
static int __init init_impa7(void)
@@ -104,7 +97,6 @@ static int __init init_impa7(void)
if (impa7_mtd[i]) {
impa7_mtd[i]->owner = THIS_MODULE;
devicesfound++;
-#ifdef CONFIG_MTD_PARTITIONS
mtd_parts_nb[i] = parse_mtd_partitions(impa7_mtd[i],
probes,
&mtd_parts[i],
@@ -120,12 +112,8 @@ static int __init init_impa7(void)
printk(KERN_NOTICE MSG_PREFIX
"using %s partition definition\n",
part_type);
- add_mtd_partitions(impa7_mtd[i],
- mtd_parts[i], mtd_parts_nb[i]);
-#else
- add_mtd_device(impa7_mtd[i]);
-
-#endif
+ mtd_device_register(impa7_mtd[i],
+ mtd_parts[i], mtd_parts_nb[i]);
}
else
iounmap((void *)impa7_map[i].virt);
@@ -138,11 +126,7 @@ static void __exit cleanup_impa7(void)
int i;
for (i=0; i<NUM_FLASHBANKS; i++) {
if (impa7_mtd[i]) {
-#ifdef CONFIG_MTD_PARTITIONS
- del_mtd_partitions(impa7_mtd[i]);
-#else
- del_mtd_device(impa7_mtd[i]);
-#endif
+ mtd_device_unregister(impa7_mtd[i]);
map_destroy(impa7_mtd[i]);
iounmap((void *)impa7_map[i].virt);
impa7_map[i].virt = 0;
diff --git a/drivers/mtd/maps/intel_vr_nor.c b/drivers/mtd/maps/intel_vr_nor.c
index fc1998512eb4..d2f47be8754b 100644
--- a/drivers/mtd/maps/intel_vr_nor.c
+++ b/drivers/mtd/maps/intel_vr_nor.c
@@ -66,33 +66,18 @@ struct vr_nor_mtd {
static void __devexit vr_nor_destroy_partitions(struct vr_nor_mtd *p)
{
- if (p->nr_parts > 0) {
-#if defined(CONFIG_MTD_PARTITIONS) || defined(CONFIG_MTD_PARTITIONS_MODULE)
- del_mtd_partitions(p->info);
-#endif
- } else
- del_mtd_device(p->info);
+ mtd_device_unregister(p->info);
}
static int __devinit vr_nor_init_partitions(struct vr_nor_mtd *p)
{
- int err = 0;
-#if defined(CONFIG_MTD_PARTITIONS) || defined(CONFIG_MTD_PARTITIONS_MODULE)
struct mtd_partition *parts;
static const char *part_probes[] = { "cmdlinepart", NULL };
-#endif
/* register the flash bank */
-#if defined(CONFIG_MTD_PARTITIONS) || defined(CONFIG_MTD_PARTITIONS_MODULE)
/* partition the flash bank */
p->nr_parts = parse_mtd_partitions(p->info, part_probes, &parts, 0);
- if (p->nr_parts > 0)
- err = add_mtd_partitions(p->info, parts, p->nr_parts);
-#endif
- if (p->nr_parts <= 0)
- err = add_mtd_device(p->info);
-
- return err;
+ return mtd_device_register(p->info, parts, p->nr_parts);
}
static void __devexit vr_nor_destroy_mtd_setup(struct vr_nor_mtd *p)
diff --git a/drivers/mtd/maps/ixp2000.c b/drivers/mtd/maps/ixp2000.c
index 9639d83a9d6c..c00b9175ba9e 100644
--- a/drivers/mtd/maps/ixp2000.c
+++ b/drivers/mtd/maps/ixp2000.c
@@ -119,7 +119,7 @@ static int ixp2000_flash_remove(struct platform_device *dev)
return 0;
if (info->mtd) {
- del_mtd_partitions(info->mtd);
+ mtd_device_unregister(info->mtd);
map_destroy(info->mtd);
}
if (info->map.map_priv_1)
@@ -230,7 +230,7 @@ static int ixp2000_flash_probe(struct platform_device *dev)
err = parse_mtd_partitions(info->mtd, probes, &info->partitions, 0);
if (err > 0) {
- err = add_mtd_partitions(info->mtd, info->partitions, err);
+ err = mtd_device_register(info->mtd, info->partitions, err);
if(err)
dev_err(&dev->dev, "Could not parse partitions\n");
}
diff --git a/drivers/mtd/maps/ixp4xx.c b/drivers/mtd/maps/ixp4xx.c
index 1f9fde0dad35..155b21942f47 100644
--- a/drivers/mtd/maps/ixp4xx.c
+++ b/drivers/mtd/maps/ixp4xx.c
@@ -162,7 +162,7 @@ static int ixp4xx_flash_remove(struct platform_device *dev)
return 0;
if (info->mtd) {
- del_mtd_partitions(info->mtd);
+ mtd_device_unregister(info->mtd);
map_destroy(info->mtd);
}
if (info->map.virt)
@@ -252,10 +252,8 @@ static int ixp4xx_flash_probe(struct platform_device *dev)
/* Use the fast version */
info->map.write = ixp4xx_write16;
-#ifdef CONFIG_MTD_PARTITIONS
nr_parts = parse_mtd_partitions(info->mtd, probes, &info->partitions,
dev->resource->start);
-#endif
if (nr_parts > 0) {
part_type = "dynamic";
} else {
@@ -263,18 +261,16 @@ static int ixp4xx_flash_probe(struct platform_device *dev)
nr_parts = plat->nr_parts;
part_type = "static";
}
- if (nr_parts == 0) {
+ if (nr_parts == 0)
printk(KERN_NOTICE "IXP4xx flash: no partition info "
"available, registering whole flash\n");
- err = add_mtd_device(info->mtd);
- } else {
+ else
printk(KERN_NOTICE "IXP4xx flash: using %s partition "
"definition\n", part_type);
- err = add_mtd_partitions(info->mtd, info->partitions, nr_parts);
- if(err)
- printk(KERN_ERR "Could not parse partitions\n");
- }
+ err = mtd_device_register(info->mtd, info->partitions, nr_parts);
+ if (err)
+ printk(KERN_ERR "Could not parse partitions\n");
if (err)
goto Error;
diff --git a/drivers/mtd/maps/l440gx.c b/drivers/mtd/maps/l440gx.c
index 9e054503c4cf..dd0360ba2412 100644
--- a/drivers/mtd/maps/l440gx.c
+++ b/drivers/mtd/maps/l440gx.c
@@ -138,7 +138,7 @@ static int __init init_l440gx(void)
if (mymtd) {
mymtd->owner = THIS_MODULE;
- add_mtd_device(mymtd);
+ mtd_device_register(mymtd, NULL, 0);
return 0;
}
@@ -148,7 +148,7 @@ static int __init init_l440gx(void)
static void __exit cleanup_l440gx(void)
{
- del_mtd_device(mymtd);
+ mtd_device_unregister(mymtd);
map_destroy(mymtd);
iounmap(l440gx_map.virt);
diff --git a/drivers/mtd/maps/latch-addr-flash.c b/drivers/mtd/maps/latch-addr-flash.c
index ee2548085334..5936c466e901 100644
--- a/drivers/mtd/maps/latch-addr-flash.c
+++ b/drivers/mtd/maps/latch-addr-flash.c
@@ -112,18 +112,9 @@ static int latch_addr_flash_remove(struct platform_device *dev)
latch_addr_data = dev->dev.platform_data;
if (info->mtd != NULL) {
- if (mtd_has_partitions()) {
- if (info->nr_parts) {
- del_mtd_partitions(info->mtd);
- kfree(info->parts);
- } else if (latch_addr_data->nr_parts) {
- del_mtd_partitions(info->mtd);
- } else {
- del_mtd_device(info->mtd);
- }
- } else {
- del_mtd_device(info->mtd);
- }
+ if (info->nr_parts)
+ kfree(info->parts);
+ mtd_device_unregister(info->mtd);
map_destroy(info->mtd);
}
@@ -215,23 +206,21 @@ static int __devinit latch_addr_flash_probe(struct platform_device *dev)
}
info->mtd->owner = THIS_MODULE;
- if (mtd_has_partitions()) {
-
- err = parse_mtd_partitions(info->mtd,
- (const char **)part_probe_types,
- &info->parts, 0);
- if (err > 0) {
- add_mtd_partitions(info->mtd, info->parts, err);
- return 0;
- }
- if (latch_addr_data->nr_parts) {
- pr_notice("Using latch-addr-flash partition information\n");
- add_mtd_partitions(info->mtd, latch_addr_data->parts,
- latch_addr_data->nr_parts);
- return 0;
- }
+ err = parse_mtd_partitions(info->mtd, (const char **)part_probe_types,
+ &info->parts, 0);
+ if (err > 0) {
+ mtd_device_register(info->mtd, info->parts, err);
+ return 0;
+ }
+ if (latch_addr_data->nr_parts) {
+ pr_notice("Using latch-addr-flash partition information\n");
+ mtd_device_register(info->mtd,
+ latch_addr_data->parts,
+ latch_addr_data->nr_parts);
+ return 0;
}
- add_mtd_device(info->mtd);
+
+ mtd_device_register(info->mtd, NULL, 0);
return 0;
iounmap:
diff --git a/drivers/mtd/maps/mbx860.c b/drivers/mtd/maps/mbx860.c
index 0eb5a7c85380..93fa56c33003 100644
--- a/drivers/mtd/maps/mbx860.c
+++ b/drivers/mtd/maps/mbx860.c
@@ -69,8 +69,8 @@ static int __init init_mbx(void)
mymtd = do_map_probe("jedec_probe", &mbx_map);
if (mymtd) {
mymtd->owner = THIS_MODULE;
- add_mtd_device(mymtd);
- add_mtd_partitions(mymtd, partition_info, NUM_PARTITIONS);
+ mtd_device_register(mymtd, NULL, 0);
+ mtd_device_register(mymtd, partition_info, NUM_PARTITIONS);
return 0;
}
@@ -81,7 +81,7 @@ static int __init init_mbx(void)
static void __exit cleanup_mbx(void)
{
if (mymtd) {
- del_mtd_device(mymtd);
+ mtd_device_unregister(mymtd);
map_destroy(mymtd);
}
if (mbx_map.virt) {
diff --git a/drivers/mtd/maps/netsc520.c b/drivers/mtd/maps/netsc520.c
index c0cb319b2b70..81dc2598bc0a 100644
--- a/drivers/mtd/maps/netsc520.c
+++ b/drivers/mtd/maps/netsc520.c
@@ -116,14 +116,14 @@ static int __init init_netsc520(void)
}
mymtd->owner = THIS_MODULE;
- add_mtd_partitions( mymtd, partition_info, NUM_PARTITIONS );
+ mtd_device_register(mymtd, partition_info, NUM_PARTITIONS);
return 0;
}
static void __exit cleanup_netsc520(void)
{
if (mymtd) {
- del_mtd_partitions(mymtd);
+ mtd_device_unregister(mymtd);
map_destroy(mymtd);
}
if (netsc520_map.virt) {
diff --git a/drivers/mtd/maps/nettel.c b/drivers/mtd/maps/nettel.c
index a97133eb9d70..eadcfffc4f9c 100644
--- a/drivers/mtd/maps/nettel.c
+++ b/drivers/mtd/maps/nettel.c
@@ -383,13 +383,13 @@ static int __init nettel_init(void)
/* No BIOS regions when AMD boot */
num_intel_partitions -= 2;
}
- rc = add_mtd_partitions(intel_mtd, nettel_intel_partitions,
- num_intel_partitions);
+ rc = mtd_device_register(intel_mtd, nettel_intel_partitions,
+ num_intel_partitions);
#endif
if (amd_mtd) {
- rc = add_mtd_partitions(amd_mtd, nettel_amd_partitions,
- num_amd_partitions);
+ rc = mtd_device_register(amd_mtd, nettel_amd_partitions,
+ num_amd_partitions);
}
#ifdef CONFIG_MTD_CFI_INTELEXT
@@ -419,7 +419,7 @@ static void __exit nettel_cleanup(void)
unregister_reboot_notifier(&nettel_notifier_block);
#endif
if (amd_mtd) {
- del_mtd_partitions(amd_mtd);
+ mtd_device_unregister(amd_mtd);
map_destroy(amd_mtd);
}
if (nettel_mmcrp) {
@@ -432,7 +432,7 @@ static void __exit nettel_cleanup(void)
}
#ifdef CONFIG_MTD_CFI_INTELEXT
if (intel_mtd) {
- del_mtd_partitions(intel_mtd);
+ mtd_device_unregister(intel_mtd);
map_destroy(intel_mtd);
}
if (nettel_intel_map.virt) {
diff --git a/drivers/mtd/maps/octagon-5066.c b/drivers/mtd/maps/octagon-5066.c
index 23fe1786770f..807ac2a2e686 100644
--- a/drivers/mtd/maps/octagon-5066.c
+++ b/drivers/mtd/maps/octagon-5066.c
@@ -175,7 +175,7 @@ void cleanup_oct5066(void)
int i;
for (i=0; i<2; i++) {
if (oct5066_mtd[i]) {
- del_mtd_device(oct5066_mtd[i]);
+ mtd_device_unregister(oct5066_mtd[i]);
map_destroy(oct5066_mtd[i]);
}
}
@@ -220,7 +220,7 @@ static int __init init_oct5066(void)
oct5066_mtd[i] = do_map_probe("map_rom", &oct5066_map[i]);
if (oct5066_mtd[i]) {
oct5066_mtd[i]->owner = THIS_MODULE;
- add_mtd_device(oct5066_mtd[i]);
+ mtd_device_register(oct5066_mtd[i], NULL, 0);
}
}
diff --git a/drivers/mtd/maps/pci.c b/drivers/mtd/maps/pci.c
index 48f4cf5cb9d1..1d005a3e9b41 100644
--- a/drivers/mtd/maps/pci.c
+++ b/drivers/mtd/maps/pci.c
@@ -313,7 +313,7 @@ mtd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
goto release;
mtd->owner = THIS_MODULE;
- add_mtd_device(mtd);
+ mtd_device_register(mtd, NULL, 0);
pci_set_drvdata(dev, mtd);
@@ -336,7 +336,7 @@ mtd_pci_remove(struct pci_dev *dev)
struct mtd_info *mtd = pci_get_drvdata(dev);
struct map_pci_info *map = mtd->priv;
- del_mtd_device(mtd);
+ mtd_device_unregister(mtd);
map_destroy(mtd);
map->exit(dev, map);
kfree(map);
diff --git a/drivers/mtd/maps/pcmciamtd.c b/drivers/mtd/maps/pcmciamtd.c
index 33dc2829b01b..bbe168b65c26 100644
--- a/drivers/mtd/maps/pcmciamtd.c
+++ b/drivers/mtd/maps/pcmciamtd.c
@@ -630,7 +630,7 @@ static int pcmciamtd_config(struct pcmcia_device *link)
dev->pcmcia_map.copy_to = pcmcia_copy_to;
}
- if(add_mtd_device(mtd)) {
+ if (mtd_device_register(mtd, NULL, 0)) {
map_destroy(mtd);
dev->mtd_info = NULL;
dev_err(&dev->p_dev->dev,
@@ -669,7 +669,7 @@ static void pcmciamtd_detach(struct pcmcia_device *link)
DEBUG(3, "link=0x%p", link);
if(dev->mtd_info) {
- del_mtd_device(dev->mtd_info);
+ mtd_device_unregister(dev->mtd_info);
dev_info(&dev->p_dev->dev, "mtd%d: Removing\n",
dev->mtd_info->index);
map_destroy(dev->mtd_info);
diff --git a/drivers/mtd/maps/physmap.c b/drivers/mtd/maps/physmap.c
index 1a9b94f0ee54..f64cee4a3bfb 100644
--- a/drivers/mtd/maps/physmap.c
+++ b/drivers/mtd/maps/physmap.c
@@ -27,10 +27,8 @@ struct physmap_flash_info {
struct mtd_info *mtd[MAX_RESOURCES];
struct mtd_info *cmtd;
struct map_info map[MAX_RESOURCES];
-#ifdef CONFIG_MTD_PARTITIONS
int nr_parts;
struct mtd_partition *parts;
-#endif
};
static int physmap_flash_remove(struct platform_device *dev)
@@ -47,18 +45,9 @@ static int physmap_flash_remove(struct platform_device *dev)
physmap_data = dev->dev.platform_data;
if (info->cmtd) {
-#ifdef CONFIG_MTD_PARTITIONS
- if (info->nr_parts || physmap_data->nr_parts) {
- del_mtd_partitions(info->cmtd);
-
- if (info->nr_parts)
- kfree(info->parts);
- } else {
- del_mtd_device(info->cmtd);
- }
-#else
- del_mtd_device(info->cmtd);
-#endif
+ mtd_device_unregister(info->cmtd);
+ if (info->nr_parts)
+ kfree(info->parts);
if (info->cmtd != info->mtd[0])
mtd_concat_destroy(info->cmtd);
}
@@ -92,10 +81,8 @@ static const char *rom_probe_types[] = {
"qinfo_probe",
"map_rom",
NULL };
-#ifdef CONFIG_MTD_PARTITIONS
static const char *part_probe_types[] = { "cmdlinepart", "RedBoot", "afs",
NULL };
-#endif
static int physmap_flash_probe(struct platform_device *dev)
{
@@ -188,24 +175,23 @@ static int physmap_flash_probe(struct platform_device *dev)
if (err)
goto err_out;
-#ifdef CONFIG_MTD_PARTITIONS
err = parse_mtd_partitions(info->cmtd, part_probe_types,
- &info->parts, 0);
+ &info->parts, 0);
if (err > 0) {
- add_mtd_partitions(info->cmtd, info->parts, err);
+ mtd_device_register(info->cmtd, info->parts, err);
info->nr_parts = err;
return 0;
}
if (physmap_data->nr_parts) {
printk(KERN_NOTICE "Using physmap partition information\n");
- add_mtd_partitions(info->cmtd, physmap_data->parts,
- physmap_data->nr_parts);
+ mtd_device_register(info->cmtd, physmap_data->parts,
+ physmap_data->nr_parts);
return 0;
}
-#endif
- add_mtd_device(info->cmtd);
+ mtd_device_register(info->cmtd, NULL, 0);
+
return 0;
err_out:
@@ -269,14 +255,12 @@ void physmap_configure(unsigned long addr, unsigned long size,
physmap_flash_data.set_vpp = set_vpp;
}
-#ifdef CONFIG_MTD_PARTITIONS
void physmap_set_partitions(struct mtd_partition *parts, int num_parts)
{
physmap_flash_data.nr_parts = num_parts;
physmap_flash_data.parts = parts;
}
#endif
-#endif
static int __init physmap_init(void)
{
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index c1d33464aee8..d251d1db129b 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -34,16 +34,12 @@ struct of_flash_list {
struct of_flash {
struct mtd_info *cmtd;
-#ifdef CONFIG_MTD_PARTITIONS
struct mtd_partition *parts;
-#endif
int list_size; /* number of elements in of_flash_list */
struct of_flash_list list[0];
};
-#ifdef CONFIG_MTD_PARTITIONS
#define OF_FLASH_PARTS(info) ((info)->parts)
-
static int parse_obsolete_partitions(struct platform_device *dev,
struct of_flash *info,
struct device_node *dp)
@@ -89,10 +85,6 @@ static int parse_obsolete_partitions(struct platform_device *dev,
return nr_parts;
}
-#else /* MTD_PARTITIONS */
-#define OF_FLASH_PARTS(info) (0)
-#define parse_partitions(info, dev) (0)
-#endif /* MTD_PARTITIONS */
static int of_flash_remove(struct platform_device *dev)
{
@@ -105,17 +97,14 @@ static int of_flash_remove(struct platform_device *dev)
dev_set_drvdata(&dev->dev, NULL);
if (info->cmtd != info->list[0].mtd) {
- del_mtd_device(info->cmtd);
+ mtd_device_unregister(info->cmtd);
mtd_concat_destroy(info->cmtd);
}
if (info->cmtd) {
- if (OF_FLASH_PARTS(info)) {
- del_mtd_partitions(info->cmtd);
+ if (OF_FLASH_PARTS(info))
kfree(OF_FLASH_PARTS(info));
- } else {
- del_mtd_device(info->cmtd);
- }
+ mtd_device_unregister(info->cmtd);
}
for (i = 0; i < info->list_size; i++) {
@@ -172,7 +161,6 @@ static struct mtd_info * __devinit obsolete_probe(struct platform_device *dev,
}
}
-#ifdef CONFIG_MTD_PARTITIONS
/* When partitions are set we look for a linux,part-probe property which
specifies the list of partition probers to use. If none is given then the
default is use. These take precedence over other device tree
@@ -212,14 +200,11 @@ static void __devinit of_free_probes(const char **probes)
if (probes != part_probe_types_def)
kfree(probes);
}
-#endif
static struct of_device_id of_flash_match[];
static int __devinit of_flash_probe(struct platform_device *dev)
{
-#ifdef CONFIG_MTD_PARTITIONS
const char **part_probe_types;
-#endif
const struct of_device_id *match;
struct device_node *dp = dev->dev.of_node;
struct resource res;
@@ -346,7 +331,6 @@ static int __devinit of_flash_probe(struct platform_device *dev)
if (err)
goto err_out;
-#ifdef CONFIG_MTD_PARTITIONS
part_probe_types = of_get_probes(dp);
err = parse_mtd_partitions(info->cmtd, part_probe_types,
&info->parts, 0);
@@ -356,13 +340,11 @@ static int __devinit of_flash_probe(struct platform_device *dev)
}
of_free_probes(part_probe_types);
-#ifdef CONFIG_MTD_OF_PARTS
if (err == 0) {
err = of_mtd_parse_partitions(&dev->dev, dp, &info->parts);
if (err < 0)
goto err_out;
}
-#endif
if (err == 0) {
err = parse_obsolete_partitions(dev, info, dp);
@@ -370,11 +352,7 @@ static int __devinit of_flash_probe(struct platform_device *dev)
goto err_out;
}
- if (err > 0)
- add_mtd_partitions(info->cmtd, info->parts, err);
- else
-#endif
- add_mtd_device(info->cmtd);
+ mtd_device_register(info->cmtd, info->parts, err);
kfree(mtd_list);
diff --git a/drivers/mtd/maps/plat-ram.c b/drivers/mtd/maps/plat-ram.c
index 76a76be5a7bd..9ca1eccba4bc 100644
--- a/drivers/mtd/maps/plat-ram.c
+++ b/drivers/mtd/maps/plat-ram.c
@@ -94,14 +94,11 @@ static int platram_remove(struct platform_device *pdev)
return 0;
if (info->mtd) {
-#ifdef CONFIG_MTD_PARTITIONS
+ mtd_device_unregister(info->mtd);
if (info->partitions) {
- del_mtd_partitions(info->mtd);
if (info->free_partitions)
kfree(info->partitions);
}
-#endif
- del_mtd_device(info->mtd);
map_destroy(info->mtd);
}
@@ -231,7 +228,6 @@ static int platram_probe(struct platform_device *pdev)
/* check to see if there are any available partitions, or wether
* to add this device whole */
-#ifdef CONFIG_MTD_PARTITIONS
if (!pdata->nr_partitions) {
/* try to probe using the supplied probe type */
if (pdata->probes) {
@@ -239,24 +235,22 @@ static int platram_probe(struct platform_device *pdev)
&info->partitions, 0);
info->free_partitions = 1;
if (err > 0)
- err = add_mtd_partitions(info->mtd,
+ err = mtd_device_register(info->mtd,
info->partitions, err);
}
}
/* use the static mapping */
else
- err = add_mtd_partitions(info->mtd, pdata->partitions,
- pdata->nr_partitions);
-#endif /* CONFIG_MTD_PARTITIONS */
-
- if (add_mtd_device(info->mtd)) {
- dev_err(&pdev->dev, "add_mtd_device() failed\n");
- err = -ENOMEM;
- }
-
+ err = mtd_device_register(info->mtd, pdata->partitions,
+ pdata->nr_partitions);
if (!err)
dev_info(&pdev->dev, "registered mtd device\n");
+ /* add the whole device. */
+ err = mtd_device_register(info->mtd, NULL, 0);
+ if (err)
+ dev_err(&pdev->dev, "failed to register the entire device\n");
+
return err;
exit_free:
diff --git a/drivers/mtd/maps/pmcmsp-flash.c b/drivers/mtd/maps/pmcmsp-flash.c
index 64aea6acd48e..744ca5cacc9b 100644
--- a/drivers/mtd/maps/pmcmsp-flash.c
+++ b/drivers/mtd/maps/pmcmsp-flash.c
@@ -173,7 +173,7 @@ static int __init init_msp_flash(void)
msp_flash[i] = do_map_probe("cfi_probe", &msp_maps[i]);
if (msp_flash[i]) {
msp_flash[i]->owner = THIS_MODULE;
- add_mtd_partitions(msp_flash[i], msp_parts[i], pcnt);
+ mtd_device_register(msp_flash[i], msp_parts[i], pcnt);
} else {
printk(KERN_ERR "map probe failed for flash\n");
ret = -ENXIO;
@@ -188,7 +188,7 @@ static int __init init_msp_flash(void)
cleanup_loop:
while (i--) {
- del_mtd_partitions(msp_flash[i]);
+ mtd_device_unregister(msp_flash[i]);
map_destroy(msp_flash[i]);
kfree(msp_maps[i].name);
iounmap(msp_maps[i].virt);
@@ -207,7 +207,7 @@ static void __exit cleanup_msp_flash(void)
int i;
for (i = 0; i < fcnt; i++) {
- del_mtd_partitions(msp_flash[i]);
+ mtd_device_unregister(msp_flash[i]);
map_destroy(msp_flash[i]);
iounmap((void *)msp_maps[i].virt);
diff --git a/drivers/mtd/maps/pxa2xx-flash.c b/drivers/mtd/maps/pxa2xx-flash.c
index d8ae634d347e..f59d62f74d44 100644
--- a/drivers/mtd/maps/pxa2xx-flash.c
+++ b/drivers/mtd/maps/pxa2xx-flash.c
@@ -104,23 +104,18 @@ static int __devinit pxa2xx_flash_probe(struct platform_device *pdev)
}
info->mtd->owner = THIS_MODULE;
-#ifdef CONFIG_MTD_PARTITIONS
ret = parse_mtd_partitions(info->mtd, probes, &parts, 0);
if (ret > 0) {
info->nr_parts = ret;
info->parts = parts;
}
-#endif
- if (info->nr_parts) {
- add_mtd_partitions(info->mtd, info->parts,
- info->nr_parts);
- } else {
+ if (!info->nr_parts)
printk("Registering %s as whole device\n",
info->map.name);
- add_mtd_device(info->mtd);
- }
+
+ mtd_device_register(info->mtd, info->parts, info->nr_parts);
platform_set_drvdata(pdev, info);
return 0;
@@ -132,12 +127,7 @@ static int __devexit pxa2xx_flash_remove(struct platform_device *dev)
platform_set_drvdata(dev, NULL);
-#ifdef CONFIG_MTD_PARTITIONS
- if (info->nr_parts)
- del_mtd_partitions(info->mtd);
- else
-#endif
- del_mtd_device(info->mtd);
+ mtd_device_unregister(info->mtd);
map_destroy(info->mtd);
iounmap(info->map.virt);
diff --git a/drivers/mtd/maps/rbtx4939-flash.c b/drivers/mtd/maps/rbtx4939-flash.c
index 83ed64512c5e..761fb459d2c7 100644
--- a/drivers/mtd/maps/rbtx4939-flash.c
+++ b/drivers/mtd/maps/rbtx4939-flash.c
@@ -25,10 +25,8 @@
struct rbtx4939_flash_info {
struct mtd_info *mtd;
struct map_info map;
-#ifdef CONFIG_MTD_PARTITIONS
int nr_parts;
struct mtd_partition *parts;
-#endif
};
static int rbtx4939_flash_remove(struct platform_device *dev)
@@ -41,28 +39,18 @@ static int rbtx4939_flash_remove(struct platform_device *dev)
platform_set_drvdata(dev, NULL);
if (info->mtd) {
-#ifdef CONFIG_MTD_PARTITIONS
struct rbtx4939_flash_data *pdata = dev->dev.platform_data;
- if (info->nr_parts) {
- del_mtd_partitions(info->mtd);
+ if (info->nr_parts)
kfree(info->parts);
- } else if (pdata->nr_parts)
- del_mtd_partitions(info->mtd);
- else
- del_mtd_device(info->mtd);
-#else
- del_mtd_device(info->mtd);
-#endif
+ mtd_device_unregister(info->mtd);
map_destroy(info->mtd);
}
return 0;
}
static const char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL };
-#ifdef CONFIG_MTD_PARTITIONS
static const char *part_probe_types[] = { "cmdlinepart", NULL };
-#endif
static int rbtx4939_flash_probe(struct platform_device *dev)
{
@@ -120,23 +108,21 @@ static int rbtx4939_flash_probe(struct platform_device *dev)
if (err)
goto err_out;
-#ifdef CONFIG_MTD_PARTITIONS
err = parse_mtd_partitions(info->mtd, part_probe_types,
&info->parts, 0);
if (err > 0) {
- add_mtd_partitions(info->mtd, info->parts, err);
+ mtd_device_register(info->mtd, info->parts, err);
info->nr_parts = err;
return 0;
}
if (pdata->nr_parts) {
pr_notice("Using rbtx4939 partition information\n");
- add_mtd_partitions(info->mtd, pdata->parts, pdata->nr_parts);
+ mtd_device_register(info->mtd, pdata->parts, pdata->nr_parts);
return 0;
}
-#endif
- add_mtd_device(info->mtd);
+ mtd_device_register(info->mtd, NULL, 0);
return 0;
err_out:
diff --git a/drivers/mtd/maps/rpxlite.c b/drivers/mtd/maps/rpxlite.c
index 3e3ef53d4fd4..ed88225bf667 100644
--- a/drivers/mtd/maps/rpxlite.c
+++ b/drivers/mtd/maps/rpxlite.c
@@ -36,7 +36,7 @@ static int __init init_rpxlite(void)
mymtd = do_map_probe("cfi_probe", &rpxlite_map);
if (mymtd) {
mymtd->owner = THIS_MODULE;
- add_mtd_device(mymtd);
+ mtd_device_register(mymtd, NULL, 0);
return 0;
}
@@ -47,7 +47,7 @@ static int __init init_rpxlite(void)
static void __exit cleanup_rpxlite(void)
{
if (mymtd) {
- del_mtd_device(mymtd);
+ mtd_device_unregister(mymtd);
map_destroy(mymtd);
}
if (rpxlite_map.virt) {
diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c
index da875908ea8e..a9b5e0e5c4c5 100644
--- a/drivers/mtd/maps/sa1100-flash.c
+++ b/drivers/mtd/maps/sa1100-flash.c
@@ -226,12 +226,7 @@ static void sa1100_destroy(struct sa_info *info, struct flash_platform_data *pla
int i;
if (info->mtd) {
- if (info->nr_parts == 0)
- del_mtd_device(info->mtd);
-#ifdef CONFIG_MTD_PARTITIONS
- else
- del_mtd_partitions(info->mtd);
-#endif
+ mtd_device_unregister(info->mtd);
if (info->mtd != info->subdev[0].mtd)
mtd_concat_destroy(info->mtd);
}
@@ -363,28 +358,24 @@ static int __devinit sa1100_mtd_probe(struct platform_device *pdev)
/*
* Partition selection stuff.
*/
-#ifdef CONFIG_MTD_PARTITIONS
nr_parts = parse_mtd_partitions(info->mtd, part_probes, &parts, 0);
if (nr_parts > 0) {
info->parts = parts;
part_type = "dynamic";
- } else
-#endif
- {
+ } else {
parts = plat->parts;
nr_parts = plat->nr_parts;
part_type = "static";
}
- if (nr_parts == 0) {
+ if (nr_parts == 0)
printk(KERN_NOTICE "SA1100 flash: no partition info "
"available, registering whole flash\n");
- add_mtd_device(info->mtd);
- } else {
+ else
printk(KERN_NOTICE "SA1100 flash: using %s partition "
"definition\n", part_type);
- add_mtd_partitions(info->mtd, parts, nr_parts);
- }
+
+ mtd_device_register(info->mtd, parts, nr_parts);
info->nr_parts = nr_parts;
diff --git a/drivers/mtd/maps/sbc_gxx.c b/drivers/mtd/maps/sbc_gxx.c
index 04b2781fc627..556a2dfe94c5 100644
--- a/drivers/mtd/maps/sbc_gxx.c
+++ b/drivers/mtd/maps/sbc_gxx.c
@@ -182,7 +182,7 @@ static struct mtd_info *all_mtd;
static void cleanup_sbc_gxx(void)
{
if( all_mtd ) {
- del_mtd_partitions( all_mtd );
+ mtd_device_unregister(all_mtd);
map_destroy( all_mtd );
}
@@ -223,7 +223,7 @@ static int __init init_sbc_gxx(void)
all_mtd->owner = THIS_MODULE;
/* Create MTD devices for each partition. */
- add_mtd_partitions(all_mtd, partition_info, NUM_PARTITIONS );
+ mtd_device_register(all_mtd, partition_info, NUM_PARTITIONS);
return 0;
}
diff --git a/drivers/mtd/maps/sc520cdp.c b/drivers/mtd/maps/sc520cdp.c
index 4d8aaaf4bb76..8fead8e46bce 100644
--- a/drivers/mtd/maps/sc520cdp.c
+++ b/drivers/mtd/maps/sc520cdp.c
@@ -266,10 +266,10 @@ static int __init init_sc520cdp(void)
/* Combine the two flash banks into a single MTD device & register it: */
merged_mtd = mtd_concat_create(mymtd, 2, "SC520CDP Flash Banks #0 and #1");
if(merged_mtd)
- add_mtd_device(merged_mtd);
+ mtd_device_register(merged_mtd, NULL, 0);
}
if(devices_found == 3) /* register the third (DIL-Flash) device */
- add_mtd_device(mymtd[2]);
+ mtd_device_register(mymtd[2], NULL, 0);
return(devices_found ? 0 : -ENXIO);
}
@@ -278,11 +278,11 @@ static void __exit cleanup_sc520cdp(void)
int i;
if (merged_mtd) {
- del_mtd_device(merged_mtd);
+ mtd_device_unregister(merged_mtd);
mtd_concat_destroy(merged_mtd);
}
if (mymtd[2])
- del_mtd_device(mymtd[2]);
+ mtd_device_unregister(mymtd[2]);
for (i = 0; i < NUM_FLASH_BANKS; i++) {
if (mymtd[i])
diff --git a/drivers/mtd/maps/scb2_flash.c b/drivers/mtd/maps/scb2_flash.c
index 7e329f09a548..d88c8426bb0f 100644
--- a/drivers/mtd/maps/scb2_flash.c
+++ b/drivers/mtd/maps/scb2_flash.c
@@ -180,7 +180,7 @@ scb2_flash_probe(struct pci_dev *dev, const struct pci_device_id *ent)
scb2_mtd->owner = THIS_MODULE;
if (scb2_fixup_mtd(scb2_mtd) < 0) {
- del_mtd_device(scb2_mtd);
+ mtd_device_unregister(scb2_mtd);
map_destroy(scb2_mtd);
iounmap(scb2_ioaddr);
if (!region_fail)
@@ -192,7 +192,7 @@ scb2_flash_probe(struct pci_dev *dev, const struct pci_device_id *ent)
(unsigned long long)scb2_mtd->size,
(unsigned long long)(SCB2_WINDOW - scb2_mtd->size));
- add_mtd_device(scb2_mtd);
+ mtd_device_register(scb2_mtd, NULL, 0);
return 0;
}
@@ -207,7 +207,7 @@ scb2_flash_remove(struct pci_dev *dev)
if (scb2_mtd->lock)
scb2_mtd->lock(scb2_mtd, 0, scb2_mtd->size);
- del_mtd_device(scb2_mtd);
+ mtd_device_unregister(scb2_mtd);
map_destroy(scb2_mtd);
iounmap(scb2_ioaddr);
diff --git a/drivers/mtd/maps/scx200_docflash.c b/drivers/mtd/maps/scx200_docflash.c
index 027e628a4f1d..f1c1f737d0d7 100644
--- a/drivers/mtd/maps/scx200_docflash.c
+++ b/drivers/mtd/maps/scx200_docflash.c
@@ -44,7 +44,6 @@ static struct resource docmem = {
static struct mtd_info *mymtd;
-#ifdef CONFIG_MTD_PARTITIONS
static struct mtd_partition partition_info[] = {
{
.name = "DOCCS Boot kernel",
@@ -68,8 +67,6 @@ static struct mtd_partition partition_info[] = {
},
};
#define NUM_PARTITIONS ARRAY_SIZE(partition_info)
-#endif
-
static struct map_info scx200_docflash_map = {
.name = "NatSemi SCx200 DOCCS Flash",
@@ -198,24 +195,17 @@ static int __init init_scx200_docflash(void)
mymtd->owner = THIS_MODULE;
-#ifdef CONFIG_MTD_PARTITIONS
partition_info[3].offset = mymtd->size-partition_info[3].size;
partition_info[2].size = partition_info[3].offset-partition_info[2].offset;
- add_mtd_partitions(mymtd, partition_info, NUM_PARTITIONS);
-#else
- add_mtd_device(mymtd);
-#endif
+ mtd_device_register(mymtd, partition_info, NUM_PARTITIONS);
+
return 0;
}
static void __exit cleanup_scx200_docflash(void)
{
if (mymtd) {
-#ifdef CONFIG_MTD_PARTITIONS
- del_mtd_partitions(mymtd);
-#else
- del_mtd_device(mymtd);
-#endif
+ mtd_device_unregister(mymtd);
map_destroy(mymtd);
}
if (scx200_docflash_map.virt) {
diff --git a/drivers/mtd/maps/solutionengine.c b/drivers/mtd/maps/solutionengine.c
index 0eb41d9c6786..cbf6bade9354 100644
--- a/drivers/mtd/maps/solutionengine.c
+++ b/drivers/mtd/maps/solutionengine.c
@@ -89,7 +89,7 @@ static int __init init_soleng_maps(void)
eprom_mtd = do_map_probe("map_rom", &soleng_eprom_map);
if (eprom_mtd) {
eprom_mtd->owner = THIS_MODULE;
- add_mtd_device(eprom_mtd);
+ mtd_device_register(eprom_mtd, NULL, 0);
}
nr_parts = parse_mtd_partitions(flash_mtd, probes, &parsed_parts, 0);
@@ -104,9 +104,9 @@ static int __init init_soleng_maps(void)
#endif /* CONFIG_MTD_SUPERH_RESERVE */
if (nr_parts > 0)
- add_mtd_partitions(flash_mtd, parsed_parts, nr_parts);
+ mtd_device_register(flash_mtd, parsed_parts, nr_parts);
else
- add_mtd_device(flash_mtd);
+ mtd_device_register(flash_mtd, NULL, 0);
return 0;
}
@@ -114,14 +114,14 @@ static int __init init_soleng_maps(void)
static void __exit cleanup_soleng_maps(void)
{
if (eprom_mtd) {
- del_mtd_device(eprom_mtd);
+ mtd_device_unregister(eprom_mtd);
map_destroy(eprom_mtd);
}
if (parsed_parts)
- del_mtd_partitions(flash_mtd);
+ mtd_device_unregister(flash_mtd);
else
- del_mtd_device(flash_mtd);
+ mtd_device_unregister(flash_mtd);
map_destroy(flash_mtd);
}
diff --git a/drivers/mtd/maps/sun_uflash.c b/drivers/mtd/maps/sun_uflash.c
index 3f1cb328a574..2d66234f57cb 100644
--- a/drivers/mtd/maps/sun_uflash.c
+++ b/drivers/mtd/maps/sun_uflash.c
@@ -101,7 +101,7 @@ int uflash_devinit(struct platform_device *op, struct device_node *dp)
up->mtd->owner = THIS_MODULE;
- add_mtd_device(up->mtd);
+ mtd_device_register(up->mtd, NULL, 0);
dev_set_drvdata(&op->dev, up);
@@ -126,7 +126,7 @@ static int __devexit uflash_remove(struct platform_device *op)
struct uflash_dev *up = dev_get_drvdata(&op->dev);
if (up->mtd) {
- del_mtd_device(up->mtd);
+ mtd_device_unregister(up->mtd);
map_destroy(up->mtd);
}
if (up->map.virt) {
diff --git a/drivers/mtd/maps/tqm8xxl.c b/drivers/mtd/maps/tqm8xxl.c
index 0718dfb3ee64..d78587990e7e 100644
--- a/drivers/mtd/maps/tqm8xxl.c
+++ b/drivers/mtd/maps/tqm8xxl.c
@@ -62,7 +62,6 @@ static void __iomem *start_scan_addr;
* "struct map_desc *_io_desc" for the corresponding machine.
*/
-#ifdef CONFIG_MTD_PARTITIONS
/* Currently, TQM8xxL has up to 8MiB flash */
static unsigned long tqm8xxl_max_flash_size = 0x00800000;
@@ -107,7 +106,6 @@ static struct mtd_partition tqm8xxl_fs_partitions[] = {
//.size = MTDPART_SIZ_FULL,
}
};
-#endif
static int __init init_tqm_mtd(void)
{
@@ -188,7 +186,6 @@ static int __init init_tqm_mtd(void)
goto error_mem;
}
-#ifdef CONFIG_MTD_PARTITIONS
/*
* Select Static partition definitions
*/
@@ -201,21 +198,14 @@ static int __init init_tqm_mtd(void)
part_banks[1].nums = ARRAY_SIZE(tqm8xxl_fs_partitions);
for(idx = 0; idx < num_banks ; idx++) {
- if (part_banks[idx].nums == 0) {
+ if (part_banks[idx].nums == 0)
printk(KERN_NOTICE "TQM flash%d: no partition info available, registering whole flash at once\n", idx);
- add_mtd_device(mtd_banks[idx]);
- } else {
+ else
printk(KERN_NOTICE "TQM flash%d: Using %s partition definition\n",
idx, part_banks[idx].type);
- add_mtd_partitions(mtd_banks[idx], part_banks[idx].mtd_part,
- part_banks[idx].nums);
- }
+ mtd_device_register(mtd_banks[idx], part_banks[idx].mtd_part,
+ part_banks[idx].nums);
}
-#else
- printk(KERN_NOTICE "TQM flash: registering %d whole flash banks at once\n", num_banks);
- for(idx = 0 ; idx < num_banks ; idx++)
- add_mtd_device(mtd_banks[idx]);
-#endif
return 0;
error_mem:
for(idx = 0 ; idx < FLASH_BANK_MAX ; idx++) {
@@ -237,7 +227,7 @@ static void __exit cleanup_tqm_mtd(void)
for(idx = 0 ; idx < num_banks ; idx++) {
/* destroy mtd_info previously allocated */
if (mtd_banks[idx]) {
- del_mtd_partitions(mtd_banks[idx]);
+ mtd_device_unregister(mtd_banks[idx]);
map_destroy(mtd_banks[idx]);
}
/* release map_info not used anymore */
diff --git a/drivers/mtd/maps/ts5500_flash.c b/drivers/mtd/maps/ts5500_flash.c
index e02dfa9d4ddd..d1d671daf235 100644
--- a/drivers/mtd/maps/ts5500_flash.c
+++ b/drivers/mtd/maps/ts5500_flash.c
@@ -89,7 +89,7 @@ static int __init init_ts5500_map(void)
}
mymtd->owner = THIS_MODULE;
- add_mtd_partitions(mymtd, ts5500_partitions, NUM_PARTITIONS);
+ mtd_device_register(mymtd, ts5500_partitions, NUM_PARTITIONS);
return 0;
@@ -102,7 +102,7 @@ err2:
static void __exit cleanup_ts5500_map(void)
{
if (mymtd) {
- del_mtd_partitions(mymtd);
+ mtd_device_unregister(mymtd);
map_destroy(mymtd);
}
diff --git a/drivers/mtd/maps/tsunami_flash.c b/drivers/mtd/maps/tsunami_flash.c
index 77a8bfc02577..1de390e1c2fb 100644
--- a/drivers/mtd/maps/tsunami_flash.c
+++ b/drivers/mtd/maps/tsunami_flash.c
@@ -76,7 +76,7 @@ static void __exit cleanup_tsunami_flash(void)
struct mtd_info *mtd;
mtd = tsunami_flash_mtd;
if (mtd) {
- del_mtd_device(mtd);
+ mtd_device_unregister(mtd);
map_destroy(mtd);
}
tsunami_flash_mtd = 0;
@@ -97,7 +97,7 @@ static int __init init_tsunami_flash(void)
}
if (tsunami_flash_mtd) {
tsunami_flash_mtd->owner = THIS_MODULE;
- add_mtd_device(tsunami_flash_mtd);
+ mtd_device_register(tsunami_flash_mtd, NULL, 0);
return 0;
}
return -ENXIO;
diff --git a/drivers/mtd/maps/uclinux.c b/drivers/mtd/maps/uclinux.c
index 35009294b435..6793074f3f40 100644
--- a/drivers/mtd/maps/uclinux.c
+++ b/drivers/mtd/maps/uclinux.c
@@ -89,11 +89,7 @@ static int __init uclinux_mtd_init(void)
mtd->priv = mapp;
uclinux_ram_mtdinfo = mtd;
-#ifdef CONFIG_MTD_PARTITIONS
- add_mtd_partitions(mtd, uclinux_romfs, NUM_PARTITIONS);
-#else
- add_mtd_device(mtd);
-#endif
+ mtd_device_register(mtd, uclinux_romfs, NUM_PARTITIONS);
return(0);
}
@@ -103,11 +99,7 @@ static int __init uclinux_mtd_init(void)
static void __exit uclinux_mtd_cleanup(void)
{
if (uclinux_ram_mtdinfo) {
-#ifdef CONFIG_MTD_PARTITIONS
- del_mtd_partitions(uclinux_ram_mtdinfo);
-#else
- del_mtd_device(uclinux_ram_mtdinfo);
-#endif
+ mtd_device_unregister(uclinux_ram_mtdinfo);
map_destroy(uclinux_ram_mtdinfo);
uclinux_ram_mtdinfo = NULL;
}
diff --git a/drivers/mtd/maps/vmax301.c b/drivers/mtd/maps/vmax301.c
index 6adaa6acc193..5e68de73eabc 100644
--- a/drivers/mtd/maps/vmax301.c
+++ b/drivers/mtd/maps/vmax301.c
@@ -138,7 +138,7 @@ static void __exit cleanup_vmax301(void)
for (i=0; i<2; i++) {
if (vmax_mtd[i]) {
- del_mtd_device(vmax_mtd[i]);
+ mtd_device_unregister(vmax_mtd[i]);
map_destroy(vmax_mtd[i]);
}
}
@@ -176,7 +176,7 @@ static int __init init_vmax301(void)
vmax_mtd[i] = do_map_probe("map_rom", &vmax_map[i]);
if (vmax_mtd[i]) {
vmax_mtd[i]->owner = THIS_MODULE;
- add_mtd_device(vmax_mtd[i]);
+ mtd_device_register(vmax_mtd[i], NULL, 0);
}
}
diff --git a/drivers/mtd/maps/vmu-flash.c b/drivers/mtd/maps/vmu-flash.c
index 4afc167731ef..3a04b078576a 100644
--- a/drivers/mtd/maps/vmu-flash.c
+++ b/drivers/mtd/maps/vmu-flash.c
@@ -563,7 +563,7 @@ static void vmu_queryblocks(struct mapleq *mq)
goto fail_cache_create;
part_cur->pcache = pcache;
- error = add_mtd_device(mtd_cur);
+ error = mtd_device_register(mtd_cur, NULL, 0);
if (error)
goto fail_mtd_register;
@@ -709,7 +709,7 @@ static void __devexit vmu_disconnect(struct maple_device *mdev)
for (x = 0; x < card->partitions; x++) {
mpart = ((card->mtd)[x]).priv;
mpart->mdev = NULL;
- del_mtd_device(&((card->mtd)[x]));
+ mtd_device_unregister(&((card->mtd)[x]));
kfree(((card->parts)[x]).name);
}
kfree(card->parts);
diff --git a/drivers/mtd/maps/wr_sbc82xx_flash.c b/drivers/mtd/maps/wr_sbc82xx_flash.c
index 933a2b6598b4..901ce968efae 100644
--- a/drivers/mtd/maps/wr_sbc82xx_flash.c
+++ b/drivers/mtd/maps/wr_sbc82xx_flash.c
@@ -132,17 +132,20 @@ static int __init init_sbc82xx_flash(void)
nr_parts = parse_mtd_partitions(sbcmtd[i], part_probes,
&sbcmtd_parts[i], 0);
if (nr_parts > 0) {
- add_mtd_partitions (sbcmtd[i], sbcmtd_parts[i], nr_parts);
+ mtd_device_register(sbcmtd[i], sbcmtd_parts[i],
+ nr_parts);
continue;
}
/* No partitioning detected. Use default */
if (i == 2) {
- add_mtd_device(sbcmtd[i]);
+ mtd_device_register(sbcmtd[i], NULL, 0);
} else if (i == bigflash) {
- add_mtd_partitions (sbcmtd[i], bigflash_parts, ARRAY_SIZE(bigflash_parts));
+ mtd_device_register(sbcmtd[i], bigflash_parts,
+ ARRAY_SIZE(bigflash_parts));
} else {
- add_mtd_partitions (sbcmtd[i], smallflash_parts, ARRAY_SIZE(smallflash_parts));
+ mtd_device_register(sbcmtd[i], smallflash_parts,
+ ARRAY_SIZE(smallflash_parts));
}
}
return 0;
@@ -157,9 +160,9 @@ static void __exit cleanup_sbc82xx_flash(void)
continue;
if (i<2 || sbcmtd_parts[i])
- del_mtd_partitions(sbcmtd[i]);
+ mtd_device_unregister(sbcmtd[i]);
else
- del_mtd_device(sbcmtd[i]);
+ mtd_device_unregister(sbcmtd[i]);
kfree(sbcmtd_parts[i]);
map_destroy(sbcmtd[i]);
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index a534e1f0c348..ca385697446e 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -221,15 +221,33 @@ static int blktrans_open(struct block_device *bdev, fmode_t mode)
kref_get(&dev->ref);
__module_get(dev->tr->owner);
- if (dev->mtd) {
- ret = dev->tr->open ? dev->tr->open(dev) : 0;
- __get_mtd_device(dev->mtd);
+ if (!dev->mtd)
+ goto unlock;
+
+ if (dev->tr->open) {
+ ret = dev->tr->open(dev);
+ if (ret)
+ goto error_put;
}
+ ret = __get_mtd_device(dev->mtd);
+ if (ret)
+ goto error_release;
+
unlock:
mutex_unlock(&dev->lock);
blktrans_dev_put(dev);
return ret;
+
+error_release:
+ if (dev->tr->release)
+ dev->tr->release(dev);
+error_put:
+ module_put(dev->tr->owner);
+ kref_put(&dev->ref, blktrans_dev_release);
+ mutex_unlock(&dev->lock);
+ blktrans_dev_put(dev);
+ return ret;
}
static int blktrans_release(struct gendisk *disk, fmode_t mode)
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index 4c36ef66a46b..3f92731a5b9e 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -166,10 +166,23 @@ static int mtd_close(struct inode *inode, struct file *file)
return 0;
} /* mtd_close */
-/* FIXME: This _really_ needs to die. In 2.5, we should lock the
- userspace buffer down and use it directly with readv/writev.
-*/
-#define MAX_KMALLOC_SIZE 0x20000
+/* Back in June 2001, dwmw2 wrote:
+ *
+ * FIXME: This _really_ needs to die. In 2.5, we should lock the
+ * userspace buffer down and use it directly with readv/writev.
+ *
+ * The implementation below, using mtd_kmalloc_up_to, mitigates
+ * allocation failures when the system is under low-memory situations
+ * or if memory is highly fragmented at the cost of reducing the
+ * performance of the requested transfer due to a smaller buffer size.
+ *
+ * A more complex but more memory-efficient implementation based on
+ * get_user_pages and iovecs to cover extents of those pages is a
+ * longer-term goal, as intimated by dwmw2 above. However, for the
+ * write case, this requires yet more complex head and tail transfer
+ * handling when those head and tail offsets and sizes are such that
+ * alignment requirements are not met in the NAND subdriver.
+ */
static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t *ppos)
{
@@ -179,6 +192,7 @@ static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t
size_t total_retlen=0;
int ret=0;
int len;
+ size_t size = count;
char *kbuf;
DEBUG(MTD_DEBUG_LEVEL0,"MTD_read\n");
@@ -189,23 +203,12 @@ static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t
if (!count)
return 0;
- /* FIXME: Use kiovec in 2.5 to lock down the user's buffers
- and pass them directly to the MTD functions */
-
- if (count > MAX_KMALLOC_SIZE)
- kbuf=kmalloc(MAX_KMALLOC_SIZE, GFP_KERNEL);
- else
- kbuf=kmalloc(count, GFP_KERNEL);
-
+ kbuf = mtd_kmalloc_up_to(mtd, &size);
if (!kbuf)
return -ENOMEM;
while (count) {
-
- if (count > MAX_KMALLOC_SIZE)
- len = MAX_KMALLOC_SIZE;
- else
- len = count;
+ len = min_t(size_t, count, size);
switch (mfi->mode) {
case MTD_MODE_OTP_FACTORY:
@@ -268,6 +271,7 @@ static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count
{
struct mtd_file_info *mfi = file->private_data;
struct mtd_info *mtd = mfi->mtd;
+ size_t size = count;
char *kbuf;
size_t retlen;
size_t total_retlen=0;
@@ -285,20 +289,12 @@ static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count
if (!count)
return 0;
- if (count > MAX_KMALLOC_SIZE)
- kbuf=kmalloc(MAX_KMALLOC_SIZE, GFP_KERNEL);
- else
- kbuf=kmalloc(count, GFP_KERNEL);
-
+ kbuf = mtd_kmalloc_up_to(mtd, &size);
if (!kbuf)
return -ENOMEM;
while (count) {
-
- if (count > MAX_KMALLOC_SIZE)
- len = MAX_KMALLOC_SIZE;
- else
- len = count;
+ len = min_t(size_t, count, size);
if (copy_from_user(kbuf, buf, len)) {
kfree(kbuf);
@@ -512,7 +508,6 @@ static int shrink_ecclayout(const struct nand_ecclayout *from,
return 0;
}
-#ifdef CONFIG_MTD_PARTITIONS
static int mtd_blkpg_ioctl(struct mtd_info *mtd,
struct blkpg_ioctl_arg __user *arg)
{
@@ -548,8 +543,6 @@ static int mtd_blkpg_ioctl(struct mtd_info *mtd,
return -EINVAL;
}
}
-#endif
-
static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
{
@@ -941,7 +934,6 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
break;
}
-#ifdef CONFIG_MTD_PARTITIONS
case BLKPG:
{
ret = mtd_blkpg_ioctl(mtd,
@@ -955,7 +947,6 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
ret = 0;
break;
}
-#endif
default:
ret = -ENOTTY;
diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c
index 5060e608ea5d..e601672a5305 100644
--- a/drivers/mtd/mtdconcat.c
+++ b/drivers/mtd/mtdconcat.c
@@ -319,7 +319,7 @@ concat_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
if (!(mtd->flags & MTD_WRITEABLE))
return -EROFS;
- ops->retlen = 0;
+ ops->retlen = ops->oobretlen = 0;
for (i = 0; i < concat->num_subdev; i++) {
struct mtd_info *subdev = concat->subdev[i];
@@ -334,7 +334,7 @@ concat_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
devops.len = subdev->size - to;
err = subdev->write_oob(subdev, to, &devops);
- ops->retlen += devops.retlen;
+ ops->retlen += devops.oobretlen;
if (err)
return err;
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index da69bc8a5a7d..c510aff289a8 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -24,6 +24,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/ptrace.h>
+#include <linux/seq_file.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/major.h>
@@ -37,6 +38,7 @@
#include <linux/gfp.h>
#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
#include "mtdcore.h"
/*
@@ -391,7 +393,7 @@ fail_locked:
* if the requested device does not appear to be present in the list.
*/
-int del_mtd_device (struct mtd_info *mtd)
+int del_mtd_device(struct mtd_info *mtd)
{
int ret;
struct mtd_notifier *not;
@@ -427,6 +429,50 @@ out_error:
}
/**
+ * mtd_device_register - register an MTD device.
+ *
+ * @master: the MTD device to register
+ * @parts: the partitions to register - only valid if nr_parts > 0
+ * @nr_parts: the number of partitions in parts. If zero then the full MTD
+ * device is registered
+ *
+ * Register an MTD device with the system and optionally, a number of
+ * partitions. If nr_parts is 0 then the whole device is registered, otherwise
+ * only the partitions are registered. To register both the full device *and*
+ * the partitions, call mtd_device_register() twice, once with nr_parts == 0
+ * and once equal to the number of partitions.
+ */
+int mtd_device_register(struct mtd_info *master,
+ const struct mtd_partition *parts,
+ int nr_parts)
+{
+ return parts ? add_mtd_partitions(master, parts, nr_parts) :
+ add_mtd_device(master);
+}
+EXPORT_SYMBOL_GPL(mtd_device_register);
+
+/**
+ * mtd_device_unregister - unregister an existing MTD device.
+ *
+ * @master: the MTD device to unregister. This will unregister both the master
+ * and any partitions if registered.
+ */
+int mtd_device_unregister(struct mtd_info *master)
+{
+ int err;
+
+ err = del_mtd_partitions(master);
+ if (err)
+ return err;
+
+ if (!device_is_registered(&master->dev))
+ return 0;
+
+ return del_mtd_device(master);
+}
+EXPORT_SYMBOL_GPL(mtd_device_unregister);
+
+/**
* register_mtd_user - register a 'user' of MTD devices.
* @new: pointer to notifier info structure
*
@@ -443,7 +489,7 @@ void register_mtd_user (struct mtd_notifier *new)
list_add(&new->list, &mtd_notifiers);
- __module_get(THIS_MODULE);
+ __module_get(THIS_MODULE);
mtd_for_each_device(mtd)
new->add(mtd);
@@ -532,7 +578,6 @@ int __get_mtd_device(struct mtd_info *mtd)
return -ENODEV;
if (mtd->get_device) {
-
err = mtd->get_device(mtd);
if (err) {
@@ -570,21 +615,13 @@ struct mtd_info *get_mtd_device_nm(const char *name)
if (!mtd)
goto out_unlock;
- if (!try_module_get(mtd->owner))
+ err = __get_mtd_device(mtd);
+ if (err)
goto out_unlock;
- if (mtd->get_device) {
- err = mtd->get_device(mtd);
- if (err)
- goto out_put;
- }
-
- mtd->usecount++;
mutex_unlock(&mtd_table_mutex);
return mtd;
-out_put:
- module_put(mtd->owner);
out_unlock:
mutex_unlock(&mtd_table_mutex);
return ERR_PTR(err);
@@ -638,8 +675,54 @@ int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
return ret;
}
-EXPORT_SYMBOL_GPL(add_mtd_device);
-EXPORT_SYMBOL_GPL(del_mtd_device);
+/**
+ * mtd_kmalloc_up_to - allocate a contiguous buffer up to the specified size
+ * @size: A pointer to the ideal or maximum size of the allocation. Points
+ * to the actual allocation size on success.
+ *
+ * This routine attempts to allocate a contiguous kernel buffer up to
+ * the specified size, backing off the size of the request exponentially
+ * until the request succeeds or until the allocation size falls below
+ * the system page size. This attempts to make sure it does not adversely
+ * impact system performance, so when allocating more than one page, we
+ * ask the memory allocator to avoid re-trying, swapping, writing back
+ * or performing I/O.
+ *
+ * Note, this function also makes sure that the allocated buffer is aligned to
+ * the MTD device's min. I/O unit, i.e. the "mtd->writesize" value.
+ *
+ * This is called, for example by mtd_{read,write} and jffs2_scan_medium,
+ * to handle smaller (i.e. degraded) buffer allocations under low- or
+ * fragmented-memory situations where such reduced allocations, from a
+ * requested ideal, are allowed.
+ *
+ * Returns a pointer to the allocated buffer on success; otherwise, NULL.
+ */
+void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size)
+{
+ gfp_t flags = __GFP_NOWARN | __GFP_WAIT |
+ __GFP_NORETRY | __GFP_NO_KSWAPD;
+ size_t min_alloc = max_t(size_t, mtd->writesize, PAGE_SIZE);
+ void *kbuf;
+
+ *size = min_t(size_t, *size, KMALLOC_MAX_SIZE);
+
+ while (*size > min_alloc) {
+ kbuf = kmalloc(*size, flags);
+ if (kbuf)
+ return kbuf;
+
+ *size >>= 1;
+ *size = ALIGN(*size, mtd->writesize);
+ }
+
+ /*
+ * For the last resort allocation allow 'kmalloc()' to do all sorts of
+ * things (write-back, dropping caches, etc) by using GFP_KERNEL.
+ */
+ return kmalloc(*size, GFP_KERNEL);
+}
+
EXPORT_SYMBOL_GPL(get_mtd_device);
EXPORT_SYMBOL_GPL(get_mtd_device_nm);
EXPORT_SYMBOL_GPL(__get_mtd_device);
@@ -648,6 +731,7 @@ EXPORT_SYMBOL_GPL(__put_mtd_device);
EXPORT_SYMBOL_GPL(register_mtd_user);
EXPORT_SYMBOL_GPL(unregister_mtd_user);
EXPORT_SYMBOL_GPL(default_mtd_writev);
+EXPORT_SYMBOL_GPL(mtd_kmalloc_up_to);
#ifdef CONFIG_PROC_FS
@@ -656,44 +740,32 @@ EXPORT_SYMBOL_GPL(default_mtd_writev);
static struct proc_dir_entry *proc_mtd;
-static inline int mtd_proc_info(char *buf, struct mtd_info *this)
-{
- return sprintf(buf, "mtd%d: %8.8llx %8.8x \"%s\"\n", this->index,
- (unsigned long long)this->size,
- this->erasesize, this->name);
-}
-
-static int mtd_read_proc (char *page, char **start, off_t off, int count,
- int *eof, void *data_unused)
+static int mtd_proc_show(struct seq_file *m, void *v)
{
struct mtd_info *mtd;
- int len, l;
- off_t begin = 0;
+ seq_puts(m, "dev: size erasesize name\n");
mutex_lock(&mtd_table_mutex);
-
- len = sprintf(page, "dev: size erasesize name\n");
mtd_for_each_device(mtd) {
- l = mtd_proc_info(page + len, mtd);
- len += l;
- if (len+begin > off+count)
- goto done;
- if (len+begin < off) {
- begin += len;
- len = 0;
- }
- }
-
- *eof = 1;
-
-done:
+ seq_printf(m, "mtd%d: %8.8llx %8.8x \"%s\"\n",
+ mtd->index, (unsigned long long)mtd->size,
+ mtd->erasesize, mtd->name);
+ }
mutex_unlock(&mtd_table_mutex);
- if (off >= len+begin)
- return 0;
- *start = page + (off-begin);
- return ((count < begin+len-off) ? count : begin+len-off);
+ return 0;
+}
+
+static int mtd_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mtd_proc_show, NULL);
}
+static const struct file_operations mtd_proc_ops = {
+ .open = mtd_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
#endif /* CONFIG_PROC_FS */
/*====================================================================*/
@@ -734,8 +806,7 @@ static int __init init_mtd(void)
goto err_bdi3;
#ifdef CONFIG_PROC_FS
- if ((proc_mtd = create_proc_entry( "mtd", 0, NULL )))
- proc_mtd->read_proc = mtd_read_proc;
+ proc_mtd = proc_create("mtd", 0, NULL, &mtd_proc_ops);
#endif /* CONFIG_PROC_FS */
return 0;
@@ -753,7 +824,7 @@ err_reg:
static void __exit cleanup_mtd(void)
{
#ifdef CONFIG_PROC_FS
- if (proc_mtd)
+ if (proc_mtd)
remove_proc_entry( "mtd", NULL);
#endif /* CONFIG_PROC_FS */
class_unregister(&mtd_class);
diff --git a/drivers/mtd/mtdcore.h b/drivers/mtd/mtdcore.h
index 6a64fdebc898..0ed6126b4c1f 100644
--- a/drivers/mtd/mtdcore.h
+++ b/drivers/mtd/mtdcore.h
@@ -10,6 +10,12 @@
extern struct mutex mtd_table_mutex;
extern struct mtd_info *__mtd_next_device(int i);
+extern int add_mtd_device(struct mtd_info *mtd);
+extern int del_mtd_device(struct mtd_info *mtd);
+extern int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *,
+ int);
+extern int del_mtd_partitions(struct mtd_info *);
+
#define mtd_for_each_device(mtd) \
for ((mtd) = __mtd_next_device(0); \
(mtd) != NULL; \
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 0a4760174782..630be3e7da04 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -31,6 +31,8 @@
#include <linux/mtd/partitions.h>
#include <linux/err.h>
+#include "mtdcore.h"
+
/* Our partition linked list */
static LIST_HEAD(mtd_partitions);
static DEFINE_MUTEX(mtd_partitions_mutex);
@@ -376,7 +378,6 @@ int del_mtd_partitions(struct mtd_info *master)
return err;
}
-EXPORT_SYMBOL(del_mtd_partitions);
static struct mtd_part *allocate_partition(struct mtd_info *master,
const struct mtd_partition *part, int partno,
@@ -671,7 +672,6 @@ int add_mtd_partitions(struct mtd_info *master,
return 0;
}
-EXPORT_SYMBOL(add_mtd_partitions);
static DEFINE_SPINLOCK(part_parser_lock);
static LIST_HEAD(part_parsers);
@@ -722,11 +722,8 @@ int parse_mtd_partitions(struct mtd_info *master, const char **types,
parser = get_partition_parser(*types);
if (!parser && !request_module("%s", *types))
parser = get_partition_parser(*types);
- if (!parser) {
- printk(KERN_NOTICE "%s partition parsing not available\n",
- *types);
+ if (!parser)
continue;
- }
ret = (*parser->parse_fn)(master, pparts, origin);
if (ret > 0) {
printk(KERN_NOTICE "%d %s partitions found on MTD device %s\n",
diff --git a/drivers/mtd/mtdswap.c b/drivers/mtd/mtdswap.c
index fed215c4cfa1..fd7885327611 100644
--- a/drivers/mtd/mtdswap.c
+++ b/drivers/mtd/mtdswap.c
@@ -1450,7 +1450,13 @@ static void mtdswap_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
}
oinfo = mtd->ecclayout;
- if (!mtd->oobsize || !oinfo || oinfo->oobavail < MTDSWAP_OOBSIZE) {
+ if (!oinfo) {
+ printk(KERN_ERR "%s: mtd%d does not have OOB\n",
+ MTDSWAP_PREFIX, mtd->index);
+ return;
+ }
+
+ if (!mtd->oobsize || oinfo->oobavail < MTDSWAP_OOBSIZE) {
printk(KERN_ERR "%s: Not enough free bytes in OOB, "
"%d available, %zu needed.\n",
MTDSWAP_PREFIX, oinfo->oobavail, MTDSWAP_OOBSIZE);
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index edec457d361d..4c3425235adc 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -92,7 +92,7 @@ config MTD_NAND_EDB7312
config MTD_NAND_H1900
tristate "iPAQ H1900 flash"
- depends on ARCH_PXA && MTD_PARTITIONS
+ depends on ARCH_PXA
help
This enables the driver for the iPAQ h1900 flash.
@@ -419,7 +419,6 @@ config MTD_NAND_TMIO
config MTD_NAND_NANDSIM
tristate "Support for NAND Flash Simulator"
- depends on MTD_PARTITIONS
help
The simulator may simulate various NAND flash chips for the
MTD nand layer.
@@ -513,7 +512,7 @@ config MTD_NAND_SOCRATES
config MTD_NAND_NUC900
tristate "Support for NAND on Nuvoton NUC9xx/w90p910 evaluation boards."
- depends on ARCH_W90X900 && MTD_PARTITIONS
+ depends on ARCH_W90X900
help
This enables the driver for the NAND Flash on evaluation board based
on w90p910 / NUC9xx.
diff --git a/drivers/mtd/nand/alauda.c b/drivers/mtd/nand/alauda.c
index 8691e0482ed2..eb40ea829ab2 100644
--- a/drivers/mtd/nand/alauda.c
+++ b/drivers/mtd/nand/alauda.c
@@ -120,7 +120,7 @@ static void alauda_delete(struct kref *kref)
struct alauda *al = container_of(kref, struct alauda, kref);
if (al->mtd) {
- del_mtd_device(al->mtd);
+ mtd_device_unregister(al->mtd);
kfree(al->mtd);
}
usb_put_dev(al->dev);
@@ -592,7 +592,7 @@ static int alauda_init_media(struct alauda *al)
mtd->priv = al;
mtd->owner = THIS_MODULE;
- err = add_mtd_device(mtd);
+ err = mtd_device_register(mtd, NULL, 0);
if (err) {
err = -ENFILE;
goto error;
diff --git a/drivers/mtd/nand/ams-delta.c b/drivers/mtd/nand/ams-delta.c
index bc65bf71e1a2..78017eb9318e 100644
--- a/drivers/mtd/nand/ams-delta.c
+++ b/drivers/mtd/nand/ams-delta.c
@@ -235,8 +235,8 @@ static int __devinit ams_delta_init(struct platform_device *pdev)
}
/* Register the partitions */
- add_mtd_partitions(ams_delta_mtd, partition_info,
- ARRAY_SIZE(partition_info));
+ mtd_device_register(ams_delta_mtd, partition_info,
+ ARRAY_SIZE(partition_info));
goto out;
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index 950646aa4c4b..b300705d41cb 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -30,6 +30,7 @@
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
+#include <linux/dmaengine.h>
#include <linux/gpio.h>
#include <linux/io.h>
@@ -494,11 +495,8 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
struct resource *regs;
struct resource *mem;
int res;
-
-#ifdef CONFIG_MTD_PARTITIONS
struct mtd_partition *partitions = NULL;
int num_partitions = 0;
-#endif
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!mem) {
@@ -656,7 +654,6 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
goto err_scan_tail;
}
-#ifdef CONFIG_MTD_PARTITIONS
#ifdef CONFIG_MTD_CMDLINE_PARTS
mtd->name = "atmel_nand";
num_partitions = parse_mtd_partitions(mtd, part_probes,
@@ -672,17 +669,11 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
goto err_no_partitions;
}
- res = add_mtd_partitions(mtd, partitions, num_partitions);
-#else
- res = add_mtd_device(mtd);
-#endif
-
+ res = mtd_device_register(mtd, partitions, num_partitions);
if (!res)
return res;
-#ifdef CONFIG_MTD_PARTITIONS
err_no_partitions:
-#endif
nand_release(mtd);
err_scan_tail:
err_scan_ident:
diff --git a/drivers/mtd/nand/au1550nd.c b/drivers/mtd/nand/au1550nd.c
index 5d513b54a7d7..e7767eef4505 100644
--- a/drivers/mtd/nand/au1550nd.c
+++ b/drivers/mtd/nand/au1550nd.c
@@ -581,7 +581,8 @@ static int __init au1xxx_nand_init(void)
}
/* Register the partitions */
- add_mtd_partitions(au1550_mtd, partition_info, ARRAY_SIZE(partition_info));
+ mtd_device_register(au1550_mtd, partition_info,
+ ARRAY_SIZE(partition_info));
return 0;
diff --git a/drivers/mtd/nand/autcpu12.c b/drivers/mtd/nand/autcpu12.c
index 0911cf03db80..eddc9a224985 100644
--- a/drivers/mtd/nand/autcpu12.c
+++ b/drivers/mtd/nand/autcpu12.c
@@ -185,20 +185,20 @@ static int __init autcpu12_init(void)
/* Register the partitions */
switch (autcpu12_mtd->size) {
case SZ_16M:
- add_mtd_partitions(autcpu12_mtd, partition_info16k,
- NUM_PARTITIONS16K);
+ mtd_device_register(autcpu12_mtd, partition_info16k,
+ NUM_PARTITIONS16K);
break;
case SZ_32M:
- add_mtd_partitions(autcpu12_mtd, partition_info32k,
- NUM_PARTITIONS32K);
+ mtd_device_register(autcpu12_mtd, partition_info32k,
+ NUM_PARTITIONS32K);
break;
case SZ_64M:
- add_mtd_partitions(autcpu12_mtd, partition_info64k,
- NUM_PARTITIONS64K);
+ mtd_device_register(autcpu12_mtd, partition_info64k,
+ NUM_PARTITIONS64K);
break;
case SZ_128M:
- add_mtd_partitions(autcpu12_mtd, partition_info128k,
- NUM_PARTITIONS128K);
+ mtd_device_register(autcpu12_mtd, partition_info128k,
+ NUM_PARTITIONS128K);
break;
default:
printk("Unsupported SmartMedia device\n");
diff --git a/drivers/mtd/nand/bcm_umi_nand.c b/drivers/mtd/nand/bcm_umi_nand.c
index dfe262c726fb..9ec280738a9a 100644
--- a/drivers/mtd/nand/bcm_umi_nand.c
+++ b/drivers/mtd/nand/bcm_umi_nand.c
@@ -52,9 +52,7 @@
static const __devinitconst char gBanner[] = KERN_INFO \
"BCM UMI MTD NAND Driver: 1.00\n";
-#ifdef CONFIG_MTD_PARTITIONS
const char *part_probes[] = { "cmdlinepart", NULL };
-#endif
#if NAND_ECC_BCH
static uint8_t scan_ff_pattern[] = { 0xff };
@@ -509,7 +507,7 @@ static int __devinit bcm_umi_nand_probe(struct platform_device *pdev)
kfree(board_mtd);
return -EIO;
}
- add_mtd_partitions(board_mtd, partition_info, nr_partitions);
+ mtd_device_register(board_mtd, partition_info, nr_partitions);
}
/* Return happy */
diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c
index 79947bea4d57..dd899cb5d366 100644
--- a/drivers/mtd/nand/bf5xx_nand.c
+++ b/drivers/mtd/nand/bf5xx_nand.c
@@ -659,15 +659,10 @@ static int bf5xx_nand_hw_init(struct bf5xx_nand_info *info)
static int __devinit bf5xx_nand_add_partition(struct bf5xx_nand_info *info)
{
struct mtd_info *mtd = &info->mtd;
-
-#ifdef CONFIG_MTD_PARTITIONS
struct mtd_partition *parts = info->platform->partitions;
int nr = info->platform->nr_partitions;
- return add_mtd_partitions(mtd, parts, nr);
-#else
- return add_mtd_device(mtd);
-#endif
+ return mtd_device_register(mtd, parts, nr);
}
static int __devexit bf5xx_nand_remove(struct platform_device *pdev)
diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c
index e06c8983978e..87ebb4e5b0c3 100644
--- a/drivers/mtd/nand/cafe_nand.c
+++ b/drivers/mtd/nand/cafe_nand.c
@@ -90,9 +90,7 @@ static unsigned int numtimings;
static int timing[3];
module_param_array(timing, int, &numtimings, 0644);
-#ifdef CONFIG_MTD_PARTITIONS
static const char *part_probes[] = { "cmdlinepart", "RedBoot", NULL };
-#endif
/* Hrm. Why isn't this already conditional on something in the struct device? */
#define cafe_dev_dbg(dev, args...) do { if (debug) dev_dbg(dev, ##args); } while(0)
@@ -632,10 +630,8 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev,
struct cafe_priv *cafe;
uint32_t ctrl;
int err = 0;
-#ifdef CONFIG_MTD_PARTITIONS
struct mtd_partition *parts;
int nr_parts;
-#endif
/* Very old versions shared the same PCI ident for all three
functions on the chip. Verify the class too... */
@@ -804,9 +800,8 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev,
pci_set_drvdata(pdev, mtd);
/* We register the whole device first, separate from the partitions */
- add_mtd_device(mtd);
+ mtd_device_register(mtd, NULL, 0);
-#ifdef CONFIG_MTD_PARTITIONS
#ifdef CONFIG_MTD_CMDLINE_PARTS
mtd->name = "cafe_nand";
#endif
@@ -814,9 +809,8 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev,
if (nr_parts > 0) {
cafe->parts = parts;
dev_info(&cafe->pdev->dev, "%d partitions found\n", nr_parts);
- add_mtd_partitions(mtd, parts, nr_parts);
+ mtd_device_register(mtd, parts, nr_parts);
}
-#endif
goto out;
out_irq:
@@ -838,7 +832,6 @@ static void __devexit cafe_nand_remove(struct pci_dev *pdev)
struct mtd_info *mtd = pci_get_drvdata(pdev);
struct cafe_priv *cafe = mtd->priv;
- del_mtd_device(mtd);
/* Disable NAND IRQ in global IRQ mask register */
cafe_writel(cafe, ~1 & cafe_readl(cafe, GLOBAL_IRQ_MASK), GLOBAL_IRQ_MASK);
free_irq(pdev->irq, mtd);
diff --git a/drivers/mtd/nand/cmx270_nand.c b/drivers/mtd/nand/cmx270_nand.c
index 6e6495278258..6fc043a30d1e 100644
--- a/drivers/mtd/nand/cmx270_nand.c
+++ b/drivers/mtd/nand/cmx270_nand.c
@@ -238,7 +238,7 @@ static int __init cmx270_init(void)
/* Register the partitions */
pr_notice("Using %s partition definition\n", part_type);
- ret = add_mtd_partitions(cmx270_nand_mtd, mtd_parts, mtd_parts_nb);
+ ret = mtd_device_register(cmx270_nand_mtd, mtd_parts, mtd_parts_nb);
if (ret)
goto err_scan;
diff --git a/drivers/mtd/nand/cs553x_nand.c b/drivers/mtd/nand/cs553x_nand.c
index 71c35a0b9826..f59ad1f2d5db 100644
--- a/drivers/mtd/nand/cs553x_nand.c
+++ b/drivers/mtd/nand/cs553x_nand.c
@@ -277,22 +277,15 @@ static int is_geode(void)
return 0;
}
-
-#ifdef CONFIG_MTD_PARTITIONS
static const char *part_probes[] = { "cmdlinepart", NULL };
-#endif
-
static int __init cs553x_init(void)
{
int err = -ENXIO;
int i;
uint64_t val;
-
-#ifdef CONFIG_MTD_PARTITIONS
int mtd_parts_nb = 0;
struct mtd_partition *mtd_parts = NULL;
-#endif
/* If the CPU isn't a Geode GX or LX, abort */
if (!is_geode())
@@ -324,17 +317,11 @@ static int __init cs553x_init(void)
if (cs553x_mtd[i]) {
/* If any devices registered, return success. Else the last error. */
-#ifdef CONFIG_MTD_PARTITIONS
mtd_parts_nb = parse_mtd_partitions(cs553x_mtd[i], part_probes, &mtd_parts, 0);
- if (mtd_parts_nb > 0) {
+ if (mtd_parts_nb > 0)
printk(KERN_NOTICE "Using command line partition definition\n");
- add_mtd_partitions(cs553x_mtd[i], mtd_parts, mtd_parts_nb);
- } else {
- add_mtd_device(cs553x_mtd[i]);
- }
-#else
- add_mtd_device(cs553x_mtd[i]);
-#endif
+ mtd_device_register(cs553x_mtd[i], mtd_parts,
+ mtd_parts_nb);
err = 0;
}
}
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
index aff3468867ac..1f34951ae1a7 100644
--- a/drivers/mtd/nand/davinci_nand.c
+++ b/drivers/mtd/nand/davinci_nand.c
@@ -530,6 +530,8 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
int ret;
uint32_t val;
nand_ecc_modes_t ecc_mode;
+ struct mtd_partition *mtd_parts = NULL;
+ int mtd_parts_nb = 0;
/* insist on board-specific configuration */
if (!pdata)
@@ -749,41 +751,33 @@ syndrome_done:
if (ret < 0)
goto err_scan;
- if (mtd_has_partitions()) {
- struct mtd_partition *mtd_parts = NULL;
- int mtd_parts_nb = 0;
+ if (mtd_has_cmdlinepart()) {
+ static const char *probes[] __initconst = {
+ "cmdlinepart", NULL
+ };
- if (mtd_has_cmdlinepart()) {
- static const char *probes[] __initconst =
- { "cmdlinepart", NULL };
-
- mtd_parts_nb = parse_mtd_partitions(&info->mtd, probes,
- &mtd_parts, 0);
- }
-
- if (mtd_parts_nb <= 0) {
- mtd_parts = pdata->parts;
- mtd_parts_nb = pdata->nr_parts;
- }
+ mtd_parts_nb = parse_mtd_partitions(&info->mtd, probes,
+ &mtd_parts, 0);
+ }
- /* Register any partitions */
- if (mtd_parts_nb > 0) {
- ret = add_mtd_partitions(&info->mtd,
- mtd_parts, mtd_parts_nb);
- if (ret == 0)
- info->partitioned = true;
- }
+ if (mtd_parts_nb <= 0) {
+ mtd_parts = pdata->parts;
+ mtd_parts_nb = pdata->nr_parts;
+ }
- } else if (pdata->nr_parts) {
- dev_warn(&pdev->dev, "ignoring %d default partitions on %s\n",
- pdata->nr_parts, info->mtd.name);
+ /* Register any partitions */
+ if (mtd_parts_nb > 0) {
+ ret = mtd_device_register(&info->mtd, mtd_parts,
+ mtd_parts_nb);
+ if (ret == 0)
+ info->partitioned = true;
}
/* If there's no partition info, just package the whole chip
* as a single MTD device.
*/
if (!info->partitioned)
- ret = add_mtd_device(&info->mtd) ? -ENODEV : 0;
+ ret = mtd_device_register(&info->mtd, NULL, 0) ? -ENODEV : 0;
if (ret < 0)
goto err_scan;
@@ -824,10 +818,7 @@ static int __exit nand_davinci_remove(struct platform_device *pdev)
struct davinci_nand_info *info = platform_get_drvdata(pdev);
int status;
- if (mtd_has_partitions() && info->partitioned)
- status = del_mtd_partitions(&info->mtd);
- else
- status = del_mtd_device(&info->mtd);
+ status = mtd_device_unregister(&info->mtd);
spin_lock_irq(&davinci_nand_lock);
if (info->chip.ecc.mode == NAND_ECC_HW_SYNDROME)
diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
index 4633f094c510..d5276218945f 100644
--- a/drivers/mtd/nand/denali.c
+++ b/drivers/mtd/nand/denali.c
@@ -19,6 +19,7 @@
#include <linux/interrupt.h>
#include <linux/delay.h>
+#include <linux/dma-mapping.h>
#include <linux/wait.h>
#include <linux/mutex.h>
#include <linux/slab.h>
@@ -44,16 +45,16 @@ MODULE_PARM_DESC(onfi_timing_mode, "Overrides default ONFI setting."
/* We define a macro here that combines all interrupts this driver uses into
* a single constant value, for convenience. */
-#define DENALI_IRQ_ALL (INTR_STATUS0__DMA_CMD_COMP | \
- INTR_STATUS0__ECC_TRANSACTION_DONE | \
- INTR_STATUS0__ECC_ERR | \
- INTR_STATUS0__PROGRAM_FAIL | \
- INTR_STATUS0__LOAD_COMP | \
- INTR_STATUS0__PROGRAM_COMP | \
- INTR_STATUS0__TIME_OUT | \
- INTR_STATUS0__ERASE_FAIL | \
- INTR_STATUS0__RST_COMP | \
- INTR_STATUS0__ERASE_COMP)
+#define DENALI_IRQ_ALL (INTR_STATUS__DMA_CMD_COMP | \
+ INTR_STATUS__ECC_TRANSACTION_DONE | \
+ INTR_STATUS__ECC_ERR | \
+ INTR_STATUS__PROGRAM_FAIL | \
+ INTR_STATUS__LOAD_COMP | \
+ INTR_STATUS__PROGRAM_COMP | \
+ INTR_STATUS__TIME_OUT | \
+ INTR_STATUS__ERASE_FAIL | \
+ INTR_STATUS__RST_COMP | \
+ INTR_STATUS__ERASE_COMP)
/* indicates whether or not the internal value for the flash bank is
* valid or not */
@@ -95,30 +96,6 @@ static const struct pci_device_id denali_pci_ids[] = {
{ /* end: all zeroes */ }
};
-
-/* these are static lookup tables that give us easy access to
- * registers in the NAND controller.
- */
-static const uint32_t intr_status_addresses[4] = {INTR_STATUS0,
- INTR_STATUS1,
- INTR_STATUS2,
- INTR_STATUS3};
-
-static const uint32_t device_reset_banks[4] = {DEVICE_RESET__BANK0,
- DEVICE_RESET__BANK1,
- DEVICE_RESET__BANK2,
- DEVICE_RESET__BANK3};
-
-static const uint32_t operation_timeout[4] = {INTR_STATUS0__TIME_OUT,
- INTR_STATUS1__TIME_OUT,
- INTR_STATUS2__TIME_OUT,
- INTR_STATUS3__TIME_OUT};
-
-static const uint32_t reset_complete[4] = {INTR_STATUS0__RST_COMP,
- INTR_STATUS1__RST_COMP,
- INTR_STATUS2__RST_COMP,
- INTR_STATUS3__RST_COMP};
-
/* forward declarations */
static void clear_interrupts(struct denali_nand_info *denali);
static uint32_t wait_for_irq(struct denali_nand_info *denali,
@@ -180,19 +157,17 @@ static void read_status(struct denali_nand_info *denali)
static void reset_bank(struct denali_nand_info *denali)
{
uint32_t irq_status = 0;
- uint32_t irq_mask = reset_complete[denali->flash_bank] |
- operation_timeout[denali->flash_bank];
- int bank = 0;
+ uint32_t irq_mask = INTR_STATUS__RST_COMP |
+ INTR_STATUS__TIME_OUT;
clear_interrupts(denali);
- bank = device_reset_banks[denali->flash_bank];
- iowrite32(bank, denali->flash_reg + DEVICE_RESET);
+ iowrite32(1 << denali->flash_bank, denali->flash_reg + DEVICE_RESET);
irq_status = wait_for_irq(denali, irq_mask);
- if (irq_status & operation_timeout[denali->flash_bank])
- dev_err(&denali->dev->dev, "reset bank failed.\n");
+ if (irq_status & INTR_STATUS__TIME_OUT)
+ dev_err(denali->dev, "reset bank failed.\n");
}
/* Reset the flash controller */
@@ -200,29 +175,28 @@ static uint16_t denali_nand_reset(struct denali_nand_info *denali)
{
uint32_t i;
- dev_dbg(&denali->dev->dev, "%s, Line %d, Function: %s\n",
+ dev_dbg(denali->dev, "%s, Line %d, Function: %s\n",
__FILE__, __LINE__, __func__);
- for (i = 0 ; i < LLD_MAX_FLASH_BANKS; i++)
- iowrite32(reset_complete[i] | operation_timeout[i],
- denali->flash_reg + intr_status_addresses[i]);
+ for (i = 0 ; i < denali->max_banks; i++)
+ iowrite32(INTR_STATUS__RST_COMP | INTR_STATUS__TIME_OUT,
+ denali->flash_reg + INTR_STATUS(i));
- for (i = 0 ; i < LLD_MAX_FLASH_BANKS; i++) {
- iowrite32(device_reset_banks[i],
- denali->flash_reg + DEVICE_RESET);
+ for (i = 0 ; i < denali->max_banks; i++) {
+ iowrite32(1 << i, denali->flash_reg + DEVICE_RESET);
while (!(ioread32(denali->flash_reg +
- intr_status_addresses[i]) &
- (reset_complete[i] | operation_timeout[i])))
+ INTR_STATUS(i)) &
+ (INTR_STATUS__RST_COMP | INTR_STATUS__TIME_OUT)))
cpu_relax();
- if (ioread32(denali->flash_reg + intr_status_addresses[i]) &
- operation_timeout[i])
- dev_dbg(&denali->dev->dev,
+ if (ioread32(denali->flash_reg + INTR_STATUS(i)) &
+ INTR_STATUS__TIME_OUT)
+ dev_dbg(denali->dev,
"NAND Reset operation timed out on bank %d\n", i);
}
- for (i = 0; i < LLD_MAX_FLASH_BANKS; i++)
- iowrite32(reset_complete[i] | operation_timeout[i],
- denali->flash_reg + intr_status_addresses[i]);
+ for (i = 0; i < denali->max_banks; i++)
+ iowrite32(INTR_STATUS__RST_COMP | INTR_STATUS__TIME_OUT,
+ denali->flash_reg + INTR_STATUS(i));
return PASS;
}
@@ -254,7 +228,7 @@ static void nand_onfi_timing_set(struct denali_nand_info *denali,
uint16_t acc_clks;
uint16_t addr_2_data, re_2_we, re_2_re, we_2_re, cs_cnt;
- dev_dbg(&denali->dev->dev, "%s, Line %d, Function: %s\n",
+ dev_dbg(denali->dev, "%s, Line %d, Function: %s\n",
__FILE__, __LINE__, __func__);
en_lo = CEIL_DIV(Trp[mode], CLK_X);
@@ -291,7 +265,7 @@ static void nand_onfi_timing_set(struct denali_nand_info *denali,
acc_clks++;
if ((data_invalid - acc_clks * CLK_X) < 2)
- dev_warn(&denali->dev->dev, "%s, Line %d: Warning!\n",
+ dev_warn(denali->dev, "%s, Line %d: Warning!\n",
__FILE__, __LINE__);
addr_2_data = CEIL_DIV(Tadl[mode], CLK_X);
@@ -419,7 +393,7 @@ static void get_hynix_nand_para(struct denali_nand_info *denali,
#endif
break;
default:
- dev_warn(&denali->dev->dev,
+ dev_warn(denali->dev,
"Spectra: Unknown Hynix NAND (Device ID: 0x%x)."
"Will use default parameter values instead.\n",
device_id);
@@ -431,17 +405,17 @@ static void get_hynix_nand_para(struct denali_nand_info *denali,
*/
static void find_valid_banks(struct denali_nand_info *denali)
{
- uint32_t id[LLD_MAX_FLASH_BANKS];
+ uint32_t id[denali->max_banks];
int i;
denali->total_used_banks = 1;
- for (i = 0; i < LLD_MAX_FLASH_BANKS; i++) {
+ for (i = 0; i < denali->max_banks; i++) {
index_addr(denali, (uint32_t)(MODE_11 | (i << 24) | 0), 0x90);
index_addr(denali, (uint32_t)(MODE_11 | (i << 24) | 1), 0);
index_addr_read_data(denali,
(uint32_t)(MODE_11 | (i << 24) | 2), &id[i]);
- dev_dbg(&denali->dev->dev,
+ dev_dbg(denali->dev,
"Return 1st ID for bank[%d]: %x\n", i, id[i]);
if (i == 0) {
@@ -461,16 +435,27 @@ static void find_valid_banks(struct denali_nand_info *denali)
* Multichip support is not enabled.
*/
if (denali->total_used_banks != 1) {
- dev_err(&denali->dev->dev,
+ dev_err(denali->dev,
"Sorry, Intel CE4100 only supports "
"a single NAND device.\n");
BUG();
}
}
- dev_dbg(&denali->dev->dev,
+ dev_dbg(denali->dev,
"denali->total_used_banks: %d\n", denali->total_used_banks);
}
+/*
+ * Use the configuration feature register to determine the maximum number of
+ * banks that the hardware supports.
+ */
+static void detect_max_banks(struct denali_nand_info *denali)
+{
+ uint32_t features = ioread32(denali->flash_reg + FEATURES);
+
+ denali->max_banks = 2 << (features & FEATURES__N_BANKS);
+}
+
static void detect_partition_feature(struct denali_nand_info *denali)
{
/* For MRST platform, denali->fwblks represent the
@@ -480,15 +465,15 @@ static void detect_partition_feature(struct denali_nand_info *denali)
* blocks it can't touch.
* */
if (ioread32(denali->flash_reg + FEATURES) & FEATURES__PARTITION) {
- if ((ioread32(denali->flash_reg + PERM_SRC_ID_1) &
- PERM_SRC_ID_1__SRCID) == SPECTRA_PARTITION_ID) {
+ if ((ioread32(denali->flash_reg + PERM_SRC_ID(1)) &
+ PERM_SRC_ID__SRCID) == SPECTRA_PARTITION_ID) {
denali->fwblks =
- ((ioread32(denali->flash_reg + MIN_MAX_BANK_1) &
- MIN_MAX_BANK_1__MIN_VALUE) *
+ ((ioread32(denali->flash_reg + MIN_MAX_BANK(1)) &
+ MIN_MAX_BANK__MIN_VALUE) *
denali->blksperchip)
+
- (ioread32(denali->flash_reg + MIN_BLK_ADDR_1) &
- MIN_BLK_ADDR_1__VALUE);
+ (ioread32(denali->flash_reg + MIN_BLK_ADDR(1)) &
+ MIN_BLK_ADDR__VALUE);
} else
denali->fwblks = SPECTRA_START_BLOCK;
} else
@@ -501,7 +486,7 @@ static uint16_t denali_nand_timing_set(struct denali_nand_info *denali)
uint32_t id_bytes[5], addr;
uint8_t i, maf_id, device_id;
- dev_dbg(&denali->dev->dev,
+ dev_dbg(denali->dev,
"%s, Line %d, Function: %s\n",
__FILE__, __LINE__, __func__);
@@ -530,7 +515,7 @@ static uint16_t denali_nand_timing_set(struct denali_nand_info *denali)
get_hynix_nand_para(denali, device_id);
}
- dev_info(&denali->dev->dev,
+ dev_info(denali->dev,
"Dump timing register values:"
"acc_clks: %d, re_2_we: %d, re_2_re: %d\n"
"we_2_re: %d, addr_2_data: %d, rdwr_en_lo_cnt: %d\n"
@@ -560,7 +545,7 @@ static uint16_t denali_nand_timing_set(struct denali_nand_info *denali)
static void denali_set_intr_modes(struct denali_nand_info *denali,
uint16_t INT_ENABLE)
{
- dev_dbg(&denali->dev->dev, "%s, Line %d, Function: %s\n",
+ dev_dbg(denali->dev, "%s, Line %d, Function: %s\n",
__FILE__, __LINE__, __func__);
if (INT_ENABLE)
@@ -580,6 +565,7 @@ static inline bool is_flash_bank_valid(int flash_bank)
static void denali_irq_init(struct denali_nand_info *denali)
{
uint32_t int_mask = 0;
+ int i;
/* Disable global interrupts */
denali_set_intr_modes(denali, false);
@@ -587,10 +573,8 @@ static void denali_irq_init(struct denali_nand_info *denali)
int_mask = DENALI_IRQ_ALL;
/* Clear all status bits */
- iowrite32(0xFFFF, denali->flash_reg + INTR_STATUS0);
- iowrite32(0xFFFF, denali->flash_reg + INTR_STATUS1);
- iowrite32(0xFFFF, denali->flash_reg + INTR_STATUS2);
- iowrite32(0xFFFF, denali->flash_reg + INTR_STATUS3);
+ for (i = 0; i < denali->max_banks; ++i)
+ iowrite32(0xFFFF, denali->flash_reg + INTR_STATUS(i));
denali_irq_enable(denali, int_mask);
}
@@ -604,10 +588,10 @@ static void denali_irq_cleanup(int irqnum, struct denali_nand_info *denali)
static void denali_irq_enable(struct denali_nand_info *denali,
uint32_t int_mask)
{
- iowrite32(int_mask, denali->flash_reg + INTR_EN0);
- iowrite32(int_mask, denali->flash_reg + INTR_EN1);
- iowrite32(int_mask, denali->flash_reg + INTR_EN2);
- iowrite32(int_mask, denali->flash_reg + INTR_EN3);
+ int i;
+
+ for (i = 0; i < denali->max_banks; ++i)
+ iowrite32(int_mask, denali->flash_reg + INTR_EN(i));
}
/* This function only returns when an interrupt that this driver cares about
@@ -624,7 +608,7 @@ static inline void clear_interrupt(struct denali_nand_info *denali,
{
uint32_t intr_status_reg = 0;
- intr_status_reg = intr_status_addresses[denali->flash_bank];
+ intr_status_reg = INTR_STATUS(denali->flash_bank);
iowrite32(irq_mask, denali->flash_reg + intr_status_reg);
}
@@ -645,7 +629,7 @@ static uint32_t read_interrupt_status(struct denali_nand_info *denali)
{
uint32_t intr_status_reg = 0;
- intr_status_reg = intr_status_addresses[denali->flash_bank];
+ intr_status_reg = INTR_STATUS(denali->flash_bank);
return ioread32(denali->flash_reg + intr_status_reg);
}
@@ -754,7 +738,7 @@ static int denali_send_pipeline_cmd(struct denali_nand_info *denali,
irq_mask = 0;
if (op == DENALI_READ)
- irq_mask = INTR_STATUS0__LOAD_COMP;
+ irq_mask = INTR_STATUS__LOAD_COMP;
else if (op == DENALI_WRITE)
irq_mask = 0;
else
@@ -800,7 +784,7 @@ static int denali_send_pipeline_cmd(struct denali_nand_info *denali,
irq_status = wait_for_irq(denali, irq_mask);
if (irq_status == 0) {
- dev_err(&denali->dev->dev,
+ dev_err(denali->dev,
"cmd, page, addr on timeout "
"(0x%x, 0x%x, 0x%x)\n",
cmd, denali->page, addr);
@@ -861,8 +845,8 @@ static int write_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
{
struct denali_nand_info *denali = mtd_to_denali(mtd);
uint32_t irq_status = 0;
- uint32_t irq_mask = INTR_STATUS0__PROGRAM_COMP |
- INTR_STATUS0__PROGRAM_FAIL;
+ uint32_t irq_mask = INTR_STATUS__PROGRAM_COMP |
+ INTR_STATUS__PROGRAM_FAIL;
int status = 0;
denali->page = page;
@@ -875,11 +859,11 @@ static int write_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
irq_status = wait_for_irq(denali, irq_mask);
if (irq_status == 0) {
- dev_err(&denali->dev->dev, "OOB write failed\n");
+ dev_err(denali->dev, "OOB write failed\n");
status = -EIO;
}
} else {
- dev_err(&denali->dev->dev, "unable to send pipeline command\n");
+ dev_err(denali->dev, "unable to send pipeline command\n");
status = -EIO;
}
return status;
@@ -889,7 +873,7 @@ static int write_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
static void read_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
{
struct denali_nand_info *denali = mtd_to_denali(mtd);
- uint32_t irq_mask = INTR_STATUS0__LOAD_COMP,
+ uint32_t irq_mask = INTR_STATUS__LOAD_COMP,
irq_status = 0, addr = 0x0, cmd = 0x0;
denali->page = page;
@@ -904,7 +888,7 @@ static void read_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
irq_status = wait_for_irq(denali, irq_mask);
if (irq_status == 0)
- dev_err(&denali->dev->dev, "page on OOB timeout %d\n",
+ dev_err(denali->dev, "page on OOB timeout %d\n",
denali->page);
/* We set the device back to MAIN_ACCESS here as I observed
@@ -944,7 +928,7 @@ static bool handle_ecc(struct denali_nand_info *denali, uint8_t *buf,
{
bool check_erased_page = false;
- if (irq_status & INTR_STATUS0__ECC_ERR) {
+ if (irq_status & INTR_STATUS__ECC_ERR) {
/* read the ECC errors. we'll ignore them for now */
uint32_t err_address = 0, err_correction_info = 0;
uint32_t err_byte = 0, err_sector = 0, err_device = 0;
@@ -995,7 +979,7 @@ static bool handle_ecc(struct denali_nand_info *denali, uint8_t *buf,
* for a while for this interrupt
* */
while (!(read_interrupt_status(denali) &
- INTR_STATUS0__ECC_TRANSACTION_DONE))
+ INTR_STATUS__ECC_TRANSACTION_DONE))
cpu_relax();
clear_interrupts(denali);
denali_set_intr_modes(denali, true);
@@ -1045,14 +1029,13 @@ static void write_page(struct mtd_info *mtd, struct nand_chip *chip,
const uint8_t *buf, bool raw_xfer)
{
struct denali_nand_info *denali = mtd_to_denali(mtd);
- struct pci_dev *pci_dev = denali->dev;
dma_addr_t addr = denali->buf.dma_buf;
size_t size = denali->mtd.writesize + denali->mtd.oobsize;
uint32_t irq_status = 0;
- uint32_t irq_mask = INTR_STATUS0__DMA_CMD_COMP |
- INTR_STATUS0__PROGRAM_FAIL;
+ uint32_t irq_mask = INTR_STATUS__DMA_CMD_COMP |
+ INTR_STATUS__PROGRAM_FAIL;
/* if it is a raw xfer, we want to disable ecc, and send
* the spare area.
@@ -1071,7 +1054,7 @@ static void write_page(struct mtd_info *mtd, struct nand_chip *chip,
mtd->oobsize);
}
- pci_dma_sync_single_for_device(pci_dev, addr, size, PCI_DMA_TODEVICE);
+ dma_sync_single_for_device(denali->dev, addr, size, DMA_TO_DEVICE);
clear_interrupts(denali);
denali_enable_dma(denali, true);
@@ -1082,16 +1065,16 @@ static void write_page(struct mtd_info *mtd, struct nand_chip *chip,
irq_status = wait_for_irq(denali, irq_mask);
if (irq_status == 0) {
- dev_err(&denali->dev->dev,
+ dev_err(denali->dev,
"timeout on write_page (type = %d)\n",
raw_xfer);
denali->status =
- (irq_status & INTR_STATUS0__PROGRAM_FAIL) ?
+ (irq_status & INTR_STATUS__PROGRAM_FAIL) ?
NAND_STATUS_FAIL : PASS;
}
denali_enable_dma(denali, false);
- pci_dma_sync_single_for_cpu(pci_dev, addr, size, PCI_DMA_TODEVICE);
+ dma_sync_single_for_cpu(denali->dev, addr, size, DMA_TO_DEVICE);
}
/* NAND core entry points */
@@ -1139,18 +1122,17 @@ static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
uint8_t *buf, int page)
{
struct denali_nand_info *denali = mtd_to_denali(mtd);
- struct pci_dev *pci_dev = denali->dev;
dma_addr_t addr = denali->buf.dma_buf;
size_t size = denali->mtd.writesize + denali->mtd.oobsize;
uint32_t irq_status = 0;
- uint32_t irq_mask = INTR_STATUS0__ECC_TRANSACTION_DONE |
- INTR_STATUS0__ECC_ERR;
+ uint32_t irq_mask = INTR_STATUS__ECC_TRANSACTION_DONE |
+ INTR_STATUS__ECC_ERR;
bool check_erased_page = false;
if (page != denali->page) {
- dev_err(&denali->dev->dev, "IN %s: page %d is not"
+ dev_err(denali->dev, "IN %s: page %d is not"
" equal to denali->page %d, investigate!!",
__func__, page, denali->page);
BUG();
@@ -1159,7 +1141,7 @@ static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
setup_ecc_for_xfer(denali, true, false);
denali_enable_dma(denali, true);
- pci_dma_sync_single_for_device(pci_dev, addr, size, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(denali->dev, addr, size, DMA_FROM_DEVICE);
clear_interrupts(denali);
denali_setup_dma(denali, DENALI_READ);
@@ -1167,7 +1149,7 @@ static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
/* wait for operation to complete */
irq_status = wait_for_irq(denali, irq_mask);
- pci_dma_sync_single_for_cpu(pci_dev, addr, size, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(denali->dev, addr, size, DMA_FROM_DEVICE);
memcpy(buf, denali->buf.buf, mtd->writesize);
@@ -1192,16 +1174,15 @@ static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
uint8_t *buf, int page)
{
struct denali_nand_info *denali = mtd_to_denali(mtd);
- struct pci_dev *pci_dev = denali->dev;
dma_addr_t addr = denali->buf.dma_buf;
size_t size = denali->mtd.writesize + denali->mtd.oobsize;
uint32_t irq_status = 0;
- uint32_t irq_mask = INTR_STATUS0__DMA_CMD_COMP;
+ uint32_t irq_mask = INTR_STATUS__DMA_CMD_COMP;
if (page != denali->page) {
- dev_err(&denali->dev->dev, "IN %s: page %d is not"
+ dev_err(denali->dev, "IN %s: page %d is not"
" equal to denali->page %d, investigate!!",
__func__, page, denali->page);
BUG();
@@ -1210,7 +1191,7 @@ static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
setup_ecc_for_xfer(denali, false, true);
denali_enable_dma(denali, true);
- pci_dma_sync_single_for_device(pci_dev, addr, size, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(denali->dev, addr, size, DMA_FROM_DEVICE);
clear_interrupts(denali);
denali_setup_dma(denali, DENALI_READ);
@@ -1218,7 +1199,7 @@ static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
/* wait for operation to complete */
irq_status = wait_for_irq(denali, irq_mask);
- pci_dma_sync_single_for_cpu(pci_dev, addr, size, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(denali->dev, addr, size, DMA_FROM_DEVICE);
denali_enable_dma(denali, false);
@@ -1271,10 +1252,10 @@ static void denali_erase(struct mtd_info *mtd, int page)
index_addr(denali, (uint32_t)cmd, 0x1);
/* wait for erase to complete or failure to occur */
- irq_status = wait_for_irq(denali, INTR_STATUS0__ERASE_COMP |
- INTR_STATUS0__ERASE_FAIL);
+ irq_status = wait_for_irq(denali, INTR_STATUS__ERASE_COMP |
+ INTR_STATUS__ERASE_FAIL);
- denali->status = (irq_status & INTR_STATUS0__ERASE_FAIL) ?
+ denali->status = (irq_status & INTR_STATUS__ERASE_FAIL) ?
NAND_STATUS_FAIL : PASS;
}
@@ -1330,7 +1311,7 @@ static int denali_ecc_calculate(struct mtd_info *mtd, const uint8_t *data,
uint8_t *ecc_code)
{
struct denali_nand_info *denali = mtd_to_denali(mtd);
- dev_err(&denali->dev->dev,
+ dev_err(denali->dev,
"denali_ecc_calculate called unexpectedly\n");
BUG();
return -EIO;
@@ -1340,7 +1321,7 @@ static int denali_ecc_correct(struct mtd_info *mtd, uint8_t *data,
uint8_t *read_ecc, uint8_t *calc_ecc)
{
struct denali_nand_info *denali = mtd_to_denali(mtd);
- dev_err(&denali->dev->dev,
+ dev_err(denali->dev,
"denali_ecc_correct called unexpectedly\n");
BUG();
return -EIO;
@@ -1349,7 +1330,7 @@ static int denali_ecc_correct(struct mtd_info *mtd, uint8_t *data,
static void denali_ecc_hwctl(struct mtd_info *mtd, int mode)
{
struct denali_nand_info *denali = mtd_to_denali(mtd);
- dev_err(&denali->dev->dev,
+ dev_err(denali->dev,
"denali_ecc_hwctl called unexpectedly\n");
BUG();
}
@@ -1375,6 +1356,7 @@ static void denali_hw_init(struct denali_nand_info *denali)
/* Should set value for these registers when init */
iowrite32(0, denali->flash_reg + TWO_ROW_ADDR_CYCLES);
iowrite32(1, denali->flash_reg + ECC_ENABLE);
+ detect_max_banks(denali);
denali_nand_timing_set(denali);
denali_irq_init(denali);
}
@@ -1484,24 +1466,22 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
}
/* Is 32-bit DMA supported? */
- ret = pci_set_dma_mask(dev, DMA_BIT_MASK(32));
-
+ ret = dma_set_mask(&dev->dev, DMA_BIT_MASK(32));
if (ret) {
printk(KERN_ERR "Spectra: no usable DMA configuration\n");
goto failed_enable_dev;
}
- denali->buf.dma_buf =
- pci_map_single(dev, denali->buf.buf,
- DENALI_BUF_SIZE,
- PCI_DMA_BIDIRECTIONAL);
+ denali->buf.dma_buf = dma_map_single(&dev->dev, denali->buf.buf,
+ DENALI_BUF_SIZE,
+ DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(dev, denali->buf.dma_buf)) {
+ if (dma_mapping_error(&dev->dev, denali->buf.dma_buf)) {
dev_err(&dev->dev, "Spectra: failed to map DMA buffer\n");
goto failed_enable_dev;
}
pci_set_master(dev);
- denali->dev = dev;
+ denali->dev = &dev->dev;
denali->mtd.dev.parent = &dev->dev;
ret = pci_request_regions(dev, DENALI_NAND_NAME);
@@ -1554,7 +1534,7 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
/* scan for NAND devices attached to the controller
* this is the first stage in a two step process to register
* with the nand subsystem */
- if (nand_scan_ident(&denali->mtd, LLD_MAX_FLASH_BANKS, NULL)) {
+ if (nand_scan_ident(&denali->mtd, denali->max_banks, NULL)) {
ret = -ENXIO;
goto failed_req_irq;
}
@@ -1664,7 +1644,7 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
goto failed_req_irq;
}
- ret = add_mtd_device(&denali->mtd);
+ ret = mtd_device_register(&denali->mtd, NULL, 0);
if (ret) {
dev_err(&dev->dev, "Spectra: Failed to register MTD: %d\n",
ret);
@@ -1681,8 +1661,8 @@ failed_remap_reg:
failed_req_regions:
pci_release_regions(dev);
failed_dma_map:
- pci_unmap_single(dev, denali->buf.dma_buf, DENALI_BUF_SIZE,
- PCI_DMA_BIDIRECTIONAL);
+ dma_unmap_single(&dev->dev, denali->buf.dma_buf, DENALI_BUF_SIZE,
+ DMA_BIDIRECTIONAL);
failed_enable_dev:
pci_disable_device(dev);
failed_alloc_memery:
@@ -1696,7 +1676,7 @@ static void denali_pci_remove(struct pci_dev *dev)
struct denali_nand_info *denali = pci_get_drvdata(dev);
nand_release(&denali->mtd);
- del_mtd_device(&denali->mtd);
+ mtd_device_unregister(&denali->mtd);
denali_irq_cleanup(dev->irq, denali);
@@ -1704,8 +1684,8 @@ static void denali_pci_remove(struct pci_dev *dev)
iounmap(denali->flash_mem);
pci_release_regions(dev);
pci_disable_device(dev);
- pci_unmap_single(dev, denali->buf.dma_buf, DENALI_BUF_SIZE,
- PCI_DMA_BIDIRECTIONAL);
+ dma_unmap_single(&dev->dev, denali->buf.dma_buf, DENALI_BUF_SIZE,
+ DMA_BIDIRECTIONAL);
pci_set_drvdata(dev, NULL);
kfree(denali);
}
@@ -1721,8 +1701,7 @@ static struct pci_driver denali_pci_driver = {
static int __devinit denali_init(void)
{
- printk(KERN_INFO "Spectra MTD driver built on %s @ %s\n",
- __DATE__, __TIME__);
+ printk(KERN_INFO "Spectra MTD driver\n");
return pci_register_driver(&denali_pci_driver);
}
diff --git a/drivers/mtd/nand/denali.h b/drivers/mtd/nand/denali.h
index 3918bcb1561e..fabb9d56b39e 100644
--- a/drivers/mtd/nand/denali.h
+++ b/drivers/mtd/nand/denali.h
@@ -211,185 +211,46 @@
#define TRANSFER_MODE 0x400
#define TRANSFER_MODE__VALUE 0x0003
-#define INTR_STATUS0 0x410
-#define INTR_STATUS0__ECC_TRANSACTION_DONE 0x0001
-#define INTR_STATUS0__ECC_ERR 0x0002
-#define INTR_STATUS0__DMA_CMD_COMP 0x0004
-#define INTR_STATUS0__TIME_OUT 0x0008
-#define INTR_STATUS0__PROGRAM_FAIL 0x0010
-#define INTR_STATUS0__ERASE_FAIL 0x0020
-#define INTR_STATUS0__LOAD_COMP 0x0040
-#define INTR_STATUS0__PROGRAM_COMP 0x0080
-#define INTR_STATUS0__ERASE_COMP 0x0100
-#define INTR_STATUS0__PIPE_CPYBCK_CMD_COMP 0x0200
-#define INTR_STATUS0__LOCKED_BLK 0x0400
-#define INTR_STATUS0__UNSUP_CMD 0x0800
-#define INTR_STATUS0__INT_ACT 0x1000
-#define INTR_STATUS0__RST_COMP 0x2000
-#define INTR_STATUS0__PIPE_CMD_ERR 0x4000
-#define INTR_STATUS0__PAGE_XFER_INC 0x8000
-
-#define INTR_EN0 0x420
-#define INTR_EN0__ECC_TRANSACTION_DONE 0x0001
-#define INTR_EN0__ECC_ERR 0x0002
-#define INTR_EN0__DMA_CMD_COMP 0x0004
-#define INTR_EN0__TIME_OUT 0x0008
-#define INTR_EN0__PROGRAM_FAIL 0x0010
-#define INTR_EN0__ERASE_FAIL 0x0020
-#define INTR_EN0__LOAD_COMP 0x0040
-#define INTR_EN0__PROGRAM_COMP 0x0080
-#define INTR_EN0__ERASE_COMP 0x0100
-#define INTR_EN0__PIPE_CPYBCK_CMD_COMP 0x0200
-#define INTR_EN0__LOCKED_BLK 0x0400
-#define INTR_EN0__UNSUP_CMD 0x0800
-#define INTR_EN0__INT_ACT 0x1000
-#define INTR_EN0__RST_COMP 0x2000
-#define INTR_EN0__PIPE_CMD_ERR 0x4000
-#define INTR_EN0__PAGE_XFER_INC 0x8000
-
-#define PAGE_CNT0 0x430
-#define PAGE_CNT0__VALUE 0x00ff
-
-#define ERR_PAGE_ADDR0 0x440
-#define ERR_PAGE_ADDR0__VALUE 0xffff
-
-#define ERR_BLOCK_ADDR0 0x450
-#define ERR_BLOCK_ADDR0__VALUE 0xffff
-
-#define INTR_STATUS1 0x460
-#define INTR_STATUS1__ECC_TRANSACTION_DONE 0x0001
-#define INTR_STATUS1__ECC_ERR 0x0002
-#define INTR_STATUS1__DMA_CMD_COMP 0x0004
-#define INTR_STATUS1__TIME_OUT 0x0008
-#define INTR_STATUS1__PROGRAM_FAIL 0x0010
-#define INTR_STATUS1__ERASE_FAIL 0x0020
-#define INTR_STATUS1__LOAD_COMP 0x0040
-#define INTR_STATUS1__PROGRAM_COMP 0x0080
-#define INTR_STATUS1__ERASE_COMP 0x0100
-#define INTR_STATUS1__PIPE_CPYBCK_CMD_COMP 0x0200
-#define INTR_STATUS1__LOCKED_BLK 0x0400
-#define INTR_STATUS1__UNSUP_CMD 0x0800
-#define INTR_STATUS1__INT_ACT 0x1000
-#define INTR_STATUS1__RST_COMP 0x2000
-#define INTR_STATUS1__PIPE_CMD_ERR 0x4000
-#define INTR_STATUS1__PAGE_XFER_INC 0x8000
-
-#define INTR_EN1 0x470
-#define INTR_EN1__ECC_TRANSACTION_DONE 0x0001
-#define INTR_EN1__ECC_ERR 0x0002
-#define INTR_EN1__DMA_CMD_COMP 0x0004
-#define INTR_EN1__TIME_OUT 0x0008
-#define INTR_EN1__PROGRAM_FAIL 0x0010
-#define INTR_EN1__ERASE_FAIL 0x0020
-#define INTR_EN1__LOAD_COMP 0x0040
-#define INTR_EN1__PROGRAM_COMP 0x0080
-#define INTR_EN1__ERASE_COMP 0x0100
-#define INTR_EN1__PIPE_CPYBCK_CMD_COMP 0x0200
-#define INTR_EN1__LOCKED_BLK 0x0400
-#define INTR_EN1__UNSUP_CMD 0x0800
-#define INTR_EN1__INT_ACT 0x1000
-#define INTR_EN1__RST_COMP 0x2000
-#define INTR_EN1__PIPE_CMD_ERR 0x4000
-#define INTR_EN1__PAGE_XFER_INC 0x8000
-
-#define PAGE_CNT1 0x480
-#define PAGE_CNT1__VALUE 0x00ff
-
-#define ERR_PAGE_ADDR1 0x490
-#define ERR_PAGE_ADDR1__VALUE 0xffff
-
-#define ERR_BLOCK_ADDR1 0x4a0
-#define ERR_BLOCK_ADDR1__VALUE 0xffff
-
-#define INTR_STATUS2 0x4b0
-#define INTR_STATUS2__ECC_TRANSACTION_DONE 0x0001
-#define INTR_STATUS2__ECC_ERR 0x0002
-#define INTR_STATUS2__DMA_CMD_COMP 0x0004
-#define INTR_STATUS2__TIME_OUT 0x0008
-#define INTR_STATUS2__PROGRAM_FAIL 0x0010
-#define INTR_STATUS2__ERASE_FAIL 0x0020
-#define INTR_STATUS2__LOAD_COMP 0x0040
-#define INTR_STATUS2__PROGRAM_COMP 0x0080
-#define INTR_STATUS2__ERASE_COMP 0x0100
-#define INTR_STATUS2__PIPE_CPYBCK_CMD_COMP 0x0200
-#define INTR_STATUS2__LOCKED_BLK 0x0400
-#define INTR_STATUS2__UNSUP_CMD 0x0800
-#define INTR_STATUS2__INT_ACT 0x1000
-#define INTR_STATUS2__RST_COMP 0x2000
-#define INTR_STATUS2__PIPE_CMD_ERR 0x4000
-#define INTR_STATUS2__PAGE_XFER_INC 0x8000
-
-#define INTR_EN2 0x4c0
-#define INTR_EN2__ECC_TRANSACTION_DONE 0x0001
-#define INTR_EN2__ECC_ERR 0x0002
-#define INTR_EN2__DMA_CMD_COMP 0x0004
-#define INTR_EN2__TIME_OUT 0x0008
-#define INTR_EN2__PROGRAM_FAIL 0x0010
-#define INTR_EN2__ERASE_FAIL 0x0020
-#define INTR_EN2__LOAD_COMP 0x0040
-#define INTR_EN2__PROGRAM_COMP 0x0080
-#define INTR_EN2__ERASE_COMP 0x0100
-#define INTR_EN2__PIPE_CPYBCK_CMD_COMP 0x0200
-#define INTR_EN2__LOCKED_BLK 0x0400
-#define INTR_EN2__UNSUP_CMD 0x0800
-#define INTR_EN2__INT_ACT 0x1000
-#define INTR_EN2__RST_COMP 0x2000
-#define INTR_EN2__PIPE_CMD_ERR 0x4000
-#define INTR_EN2__PAGE_XFER_INC 0x8000
-
-#define PAGE_CNT2 0x4d0
-#define PAGE_CNT2__VALUE 0x00ff
-
-#define ERR_PAGE_ADDR2 0x4e0
-#define ERR_PAGE_ADDR2__VALUE 0xffff
-
-#define ERR_BLOCK_ADDR2 0x4f0
-#define ERR_BLOCK_ADDR2__VALUE 0xffff
-
-#define INTR_STATUS3 0x500
-#define INTR_STATUS3__ECC_TRANSACTION_DONE 0x0001
-#define INTR_STATUS3__ECC_ERR 0x0002
-#define INTR_STATUS3__DMA_CMD_COMP 0x0004
-#define INTR_STATUS3__TIME_OUT 0x0008
-#define INTR_STATUS3__PROGRAM_FAIL 0x0010
-#define INTR_STATUS3__ERASE_FAIL 0x0020
-#define INTR_STATUS3__LOAD_COMP 0x0040
-#define INTR_STATUS3__PROGRAM_COMP 0x0080
-#define INTR_STATUS3__ERASE_COMP 0x0100
-#define INTR_STATUS3__PIPE_CPYBCK_CMD_COMP 0x0200
-#define INTR_STATUS3__LOCKED_BLK 0x0400
-#define INTR_STATUS3__UNSUP_CMD 0x0800
-#define INTR_STATUS3__INT_ACT 0x1000
-#define INTR_STATUS3__RST_COMP 0x2000
-#define INTR_STATUS3__PIPE_CMD_ERR 0x4000
-#define INTR_STATUS3__PAGE_XFER_INC 0x8000
-
-#define INTR_EN3 0x510
-#define INTR_EN3__ECC_TRANSACTION_DONE 0x0001
-#define INTR_EN3__ECC_ERR 0x0002
-#define INTR_EN3__DMA_CMD_COMP 0x0004
-#define INTR_EN3__TIME_OUT 0x0008
-#define INTR_EN3__PROGRAM_FAIL 0x0010
-#define INTR_EN3__ERASE_FAIL 0x0020
-#define INTR_EN3__LOAD_COMP 0x0040
-#define INTR_EN3__PROGRAM_COMP 0x0080
-#define INTR_EN3__ERASE_COMP 0x0100
-#define INTR_EN3__PIPE_CPYBCK_CMD_COMP 0x0200
-#define INTR_EN3__LOCKED_BLK 0x0400
-#define INTR_EN3__UNSUP_CMD 0x0800
-#define INTR_EN3__INT_ACT 0x1000
-#define INTR_EN3__RST_COMP 0x2000
-#define INTR_EN3__PIPE_CMD_ERR 0x4000
-#define INTR_EN3__PAGE_XFER_INC 0x8000
-
-#define PAGE_CNT3 0x520
-#define PAGE_CNT3__VALUE 0x00ff
-
-#define ERR_PAGE_ADDR3 0x530
-#define ERR_PAGE_ADDR3__VALUE 0xffff
-
-#define ERR_BLOCK_ADDR3 0x540
-#define ERR_BLOCK_ADDR3__VALUE 0xffff
+#define INTR_STATUS(__bank) (0x410 + ((__bank) * 0x50))
+#define INTR_EN(__bank) (0x420 + ((__bank) * 0x50))
+
+#define INTR_STATUS__ECC_TRANSACTION_DONE 0x0001
+#define INTR_STATUS__ECC_ERR 0x0002
+#define INTR_STATUS__DMA_CMD_COMP 0x0004
+#define INTR_STATUS__TIME_OUT 0x0008
+#define INTR_STATUS__PROGRAM_FAIL 0x0010
+#define INTR_STATUS__ERASE_FAIL 0x0020
+#define INTR_STATUS__LOAD_COMP 0x0040
+#define INTR_STATUS__PROGRAM_COMP 0x0080
+#define INTR_STATUS__ERASE_COMP 0x0100
+#define INTR_STATUS__PIPE_CPYBCK_CMD_COMP 0x0200
+#define INTR_STATUS__LOCKED_BLK 0x0400
+#define INTR_STATUS__UNSUP_CMD 0x0800
+#define INTR_STATUS__INT_ACT 0x1000
+#define INTR_STATUS__RST_COMP 0x2000
+#define INTR_STATUS__PIPE_CMD_ERR 0x4000
+#define INTR_STATUS__PAGE_XFER_INC 0x8000
+
+#define INTR_EN__ECC_TRANSACTION_DONE 0x0001
+#define INTR_EN__ECC_ERR 0x0002
+#define INTR_EN__DMA_CMD_COMP 0x0004
+#define INTR_EN__TIME_OUT 0x0008
+#define INTR_EN__PROGRAM_FAIL 0x0010
+#define INTR_EN__ERASE_FAIL 0x0020
+#define INTR_EN__LOAD_COMP 0x0040
+#define INTR_EN__PROGRAM_COMP 0x0080
+#define INTR_EN__ERASE_COMP 0x0100
+#define INTR_EN__PIPE_CPYBCK_CMD_COMP 0x0200
+#define INTR_EN__LOCKED_BLK 0x0400
+#define INTR_EN__UNSUP_CMD 0x0800
+#define INTR_EN__INT_ACT 0x1000
+#define INTR_EN__RST_COMP 0x2000
+#define INTR_EN__PIPE_CMD_ERR 0x4000
+#define INTR_EN__PAGE_XFER_INC 0x8000
+
+#define PAGE_CNT(__bank) (0x430 + ((__bank) * 0x50))
+#define ERR_PAGE_ADDR(__bank) (0x440 + ((__bank) * 0x50))
+#define ERR_BLOCK_ADDR(__bank) (0x450 + ((__bank) * 0x50))
#define DATA_INTR 0x550
#define DATA_INTR__WRITE_SPACE_AV 0x0001
@@ -484,141 +345,23 @@
#define PTN_INTR_EN__ACCESS_ERROR_BANK3 0x0010
#define PTN_INTR_EN__REG_ACCESS_ERROR 0x0020
-#define PERM_SRC_ID_0 0x830
-#define PERM_SRC_ID_0__SRCID 0x00ff
-#define PERM_SRC_ID_0__DIRECT_ACCESS_ACTIVE 0x0800
-#define PERM_SRC_ID_0__WRITE_ACTIVE 0x2000
-#define PERM_SRC_ID_0__READ_ACTIVE 0x4000
-#define PERM_SRC_ID_0__PARTITION_VALID 0x8000
-
-#define MIN_BLK_ADDR_0 0x840
-#define MIN_BLK_ADDR_0__VALUE 0xffff
-
-#define MAX_BLK_ADDR_0 0x850
-#define MAX_BLK_ADDR_0__VALUE 0xffff
-
-#define MIN_MAX_BANK_0 0x860
-#define MIN_MAX_BANK_0__MIN_VALUE 0x0003
-#define MIN_MAX_BANK_0__MAX_VALUE 0x000c
-
-#define PERM_SRC_ID_1 0x870
-#define PERM_SRC_ID_1__SRCID 0x00ff
-#define PERM_SRC_ID_1__DIRECT_ACCESS_ACTIVE 0x0800
-#define PERM_SRC_ID_1__WRITE_ACTIVE 0x2000
-#define PERM_SRC_ID_1__READ_ACTIVE 0x4000
-#define PERM_SRC_ID_1__PARTITION_VALID 0x8000
-
-#define MIN_BLK_ADDR_1 0x880
-#define MIN_BLK_ADDR_1__VALUE 0xffff
-
-#define MAX_BLK_ADDR_1 0x890
-#define MAX_BLK_ADDR_1__VALUE 0xffff
-
-#define MIN_MAX_BANK_1 0x8a0
-#define MIN_MAX_BANK_1__MIN_VALUE 0x0003
-#define MIN_MAX_BANK_1__MAX_VALUE 0x000c
-
-#define PERM_SRC_ID_2 0x8b0
-#define PERM_SRC_ID_2__SRCID 0x00ff
-#define PERM_SRC_ID_2__DIRECT_ACCESS_ACTIVE 0x0800
-#define PERM_SRC_ID_2__WRITE_ACTIVE 0x2000
-#define PERM_SRC_ID_2__READ_ACTIVE 0x4000
-#define PERM_SRC_ID_2__PARTITION_VALID 0x8000
-
-#define MIN_BLK_ADDR_2 0x8c0
-#define MIN_BLK_ADDR_2__VALUE 0xffff
-
-#define MAX_BLK_ADDR_2 0x8d0
-#define MAX_BLK_ADDR_2__VALUE 0xffff
-
-#define MIN_MAX_BANK_2 0x8e0
-#define MIN_MAX_BANK_2__MIN_VALUE 0x0003
-#define MIN_MAX_BANK_2__MAX_VALUE 0x000c
-
-#define PERM_SRC_ID_3 0x8f0
-#define PERM_SRC_ID_3__SRCID 0x00ff
-#define PERM_SRC_ID_3__DIRECT_ACCESS_ACTIVE 0x0800
-#define PERM_SRC_ID_3__WRITE_ACTIVE 0x2000
-#define PERM_SRC_ID_3__READ_ACTIVE 0x4000
-#define PERM_SRC_ID_3__PARTITION_VALID 0x8000
-
-#define MIN_BLK_ADDR_3 0x900
-#define MIN_BLK_ADDR_3__VALUE 0xffff
-
-#define MAX_BLK_ADDR_3 0x910
-#define MAX_BLK_ADDR_3__VALUE 0xffff
-
-#define MIN_MAX_BANK_3 0x920
-#define MIN_MAX_BANK_3__MIN_VALUE 0x0003
-#define MIN_MAX_BANK_3__MAX_VALUE 0x000c
-
-#define PERM_SRC_ID_4 0x930
-#define PERM_SRC_ID_4__SRCID 0x00ff
-#define PERM_SRC_ID_4__DIRECT_ACCESS_ACTIVE 0x0800
-#define PERM_SRC_ID_4__WRITE_ACTIVE 0x2000
-#define PERM_SRC_ID_4__READ_ACTIVE 0x4000
-#define PERM_SRC_ID_4__PARTITION_VALID 0x8000
-
-#define MIN_BLK_ADDR_4 0x940
-#define MIN_BLK_ADDR_4__VALUE 0xffff
-
-#define MAX_BLK_ADDR_4 0x950
-#define MAX_BLK_ADDR_4__VALUE 0xffff
-
-#define MIN_MAX_BANK_4 0x960
-#define MIN_MAX_BANK_4__MIN_VALUE 0x0003
-#define MIN_MAX_BANK_4__MAX_VALUE 0x000c
-
-#define PERM_SRC_ID_5 0x970
-#define PERM_SRC_ID_5__SRCID 0x00ff
-#define PERM_SRC_ID_5__DIRECT_ACCESS_ACTIVE 0x0800
-#define PERM_SRC_ID_5__WRITE_ACTIVE 0x2000
-#define PERM_SRC_ID_5__READ_ACTIVE 0x4000
-#define PERM_SRC_ID_5__PARTITION_VALID 0x8000
-
-#define MIN_BLK_ADDR_5 0x980
-#define MIN_BLK_ADDR_5__VALUE 0xffff
-
-#define MAX_BLK_ADDR_5 0x990
-#define MAX_BLK_ADDR_5__VALUE 0xffff
-
-#define MIN_MAX_BANK_5 0x9a0
-#define MIN_MAX_BANK_5__MIN_VALUE 0x0003
-#define MIN_MAX_BANK_5__MAX_VALUE 0x000c
-
-#define PERM_SRC_ID_6 0x9b0
-#define PERM_SRC_ID_6__SRCID 0x00ff
-#define PERM_SRC_ID_6__DIRECT_ACCESS_ACTIVE 0x0800
-#define PERM_SRC_ID_6__WRITE_ACTIVE 0x2000
-#define PERM_SRC_ID_6__READ_ACTIVE 0x4000
-#define PERM_SRC_ID_6__PARTITION_VALID 0x8000
-
-#define MIN_BLK_ADDR_6 0x9c0
-#define MIN_BLK_ADDR_6__VALUE 0xffff
-
-#define MAX_BLK_ADDR_6 0x9d0
-#define MAX_BLK_ADDR_6__VALUE 0xffff
-
-#define MIN_MAX_BANK_6 0x9e0
-#define MIN_MAX_BANK_6__MIN_VALUE 0x0003
-#define MIN_MAX_BANK_6__MAX_VALUE 0x000c
-
-#define PERM_SRC_ID_7 0x9f0
-#define PERM_SRC_ID_7__SRCID 0x00ff
-#define PERM_SRC_ID_7__DIRECT_ACCESS_ACTIVE 0x0800
-#define PERM_SRC_ID_7__WRITE_ACTIVE 0x2000
-#define PERM_SRC_ID_7__READ_ACTIVE 0x4000
-#define PERM_SRC_ID_7__PARTITION_VALID 0x8000
+#define PERM_SRC_ID(__bank) (0x830 + ((__bank) * 0x40))
+#define PERM_SRC_ID__SRCID 0x00ff
+#define PERM_SRC_ID__DIRECT_ACCESS_ACTIVE 0x0800
+#define PERM_SRC_ID__WRITE_ACTIVE 0x2000
+#define PERM_SRC_ID__READ_ACTIVE 0x4000
+#define PERM_SRC_ID__PARTITION_VALID 0x8000
+
+#define MIN_BLK_ADDR(__bank) (0x840 + ((__bank) * 0x40))
+#define MIN_BLK_ADDR__VALUE 0xffff
+
+#define MAX_BLK_ADDR(__bank) (0x850 + ((__bank) * 0x40))
+#define MAX_BLK_ADDR__VALUE 0xffff
+
+#define MIN_MAX_BANK(__bank) (0x860 + ((__bank) * 0x40))
+#define MIN_MAX_BANK__MIN_VALUE 0x0003
+#define MIN_MAX_BANK__MAX_VALUE 0x000c
-#define MIN_BLK_ADDR_7 0xa00
-#define MIN_BLK_ADDR_7__VALUE 0xffff
-
-#define MAX_BLK_ADDR_7 0xa10
-#define MAX_BLK_ADDR_7__VALUE 0xffff
-
-#define MIN_MAX_BANK_7 0xa20
-#define MIN_MAX_BANK_7__MIN_VALUE 0x0003
-#define MIN_MAX_BANK_7__MAX_VALUE 0x000c
/* ffsdefs.h */
#define CLEAR 0 /*use this to clear a field instead of "fail"*/
@@ -711,7 +454,6 @@
#define READ_WRITE_ENABLE_HIGH_COUNT 22
#define ECC_SECTOR_SIZE 512
-#define LLD_MAX_FLASH_BANKS 4
#define DENALI_BUF_SIZE (NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE)
@@ -732,7 +474,7 @@ struct denali_nand_info {
int status;
int platform;
struct nand_buf buf;
- struct pci_dev *dev;
+ struct device *dev;
int total_used_banks;
uint32_t block; /* stored for future use */
uint16_t page;
@@ -751,6 +493,7 @@ struct denali_nand_info {
uint32_t totalblks;
uint32_t blksperchip;
uint32_t bbtskipbytes;
+ uint32_t max_banks;
};
#endif /*_LLD_NAND_*/
diff --git a/drivers/mtd/nand/diskonchip.c b/drivers/mtd/nand/diskonchip.c
index 657b9f4b6f9b..7837728d02ff 100644
--- a/drivers/mtd/nand/diskonchip.c
+++ b/drivers/mtd/nand/diskonchip.c
@@ -1360,11 +1360,9 @@ static int __init nftl_scan_bbt(struct mtd_info *mtd)
At least as nand_bbt.c is currently written. */
if ((ret = nand_scan_bbt(mtd, NULL)))
return ret;
- add_mtd_device(mtd);
-#ifdef CONFIG_MTD_PARTITIONS
+ mtd_device_register(mtd, NULL, 0);
if (!no_autopart)
- add_mtd_partitions(mtd, parts, numparts);
-#endif
+ mtd_device_register(mtd, parts, numparts);
return 0;
}
@@ -1419,11 +1417,9 @@ static int __init inftl_scan_bbt(struct mtd_info *mtd)
autopartitioning, but I want to give it more thought. */
if (!numparts)
return -EIO;
- add_mtd_device(mtd);
-#ifdef CONFIG_MTD_PARTITIONS
+ mtd_device_register(mtd, NULL, 0);
if (!no_autopart)
- add_mtd_partitions(mtd, parts, numparts);
-#endif
+ mtd_device_register(mtd, parts, numparts);
return 0;
}
@@ -1678,9 +1674,9 @@ static int __init doc_probe(unsigned long physadr)
/* DBB note: i believe nand_release is necessary here, as
buffers may have been allocated in nand_base. Check with
Thomas. FIX ME! */
- /* nand_release will call del_mtd_device, but we haven't yet
- added it. This is handled without incident by
- del_mtd_device, as far as I can tell. */
+ /* nand_release will call mtd_device_unregister, but we
+ haven't yet added it. This is handled without incident by
+ mtd_device_unregister, as far as I can tell. */
nand_release(mtd);
kfree(mtd);
goto fail;
diff --git a/drivers/mtd/nand/edb7312.c b/drivers/mtd/nand/edb7312.c
index 86366bfba9f8..8400d0f6dada 100644
--- a/drivers/mtd/nand/edb7312.c
+++ b/drivers/mtd/nand/edb7312.c
@@ -55,7 +55,6 @@ static unsigned long ep7312_fio_pbase = EP7312_FIO_PBASE;
static void __iomem *ep7312_pxdr = (void __iomem *)EP7312_PXDR;
static void __iomem *ep7312_pxddr = (void __iomem *)EP7312_PXDDR;
-#ifdef CONFIG_MTD_PARTITIONS
/*
* Define static partitions for flash device
*/
@@ -67,8 +66,6 @@ static struct mtd_partition partition_info[] = {
#define NUM_PARTITIONS 1
-#endif
-
/*
* hardware specific access to control-lines
*
@@ -101,9 +98,7 @@ static int ep7312_device_ready(struct mtd_info *mtd)
return 1;
}
-#ifdef CONFIG_MTD_PARTITIONS
const char *part_probes[] = { "cmdlinepart", NULL };
-#endif
/*
* Main initialization routine
@@ -162,14 +157,12 @@ static int __init ep7312_init(void)
kfree(ep7312_mtd);
return -ENXIO;
}
-#ifdef CONFIG_MTD_PARTITIONS
ep7312_mtd->name = "edb7312-nand";
mtd_parts_nb = parse_mtd_partitions(ep7312_mtd, part_probes, &mtd_parts, 0);
if (mtd_parts_nb > 0)
part_type = "command line";
else
mtd_parts_nb = 0;
-#endif
if (mtd_parts_nb == 0) {
mtd_parts = partition_info;
mtd_parts_nb = NUM_PARTITIONS;
@@ -178,7 +171,7 @@ static int __init ep7312_init(void)
/* Register the partitions */
printk(KERN_NOTICE "Using %s partition definition\n", part_type);
- add_mtd_partitions(ep7312_mtd, mtd_parts, mtd_parts_nb);
+ mtd_device_register(ep7312_mtd, mtd_parts, mtd_parts_nb);
/* Return happy */
return 0;
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index 537e380b8dcb..0bb254c7d2b1 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -841,12 +841,9 @@ static int __devinit fsl_elbc_nand_probe(struct platform_device *pdev)
struct fsl_elbc_mtd *priv;
struct resource res;
struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl;
-
-#ifdef CONFIG_MTD_PARTITIONS
static const char *part_probe_types[]
= { "cmdlinepart", "RedBoot", NULL };
struct mtd_partition *parts;
-#endif
int ret;
int bank;
struct device *dev;
@@ -935,26 +932,19 @@ static int __devinit fsl_elbc_nand_probe(struct platform_device *pdev)
if (ret)
goto err;
-#ifdef CONFIG_MTD_PARTITIONS
/* First look for RedBoot table or partitions on the command
* line, these take precedence over device tree information */
ret = parse_mtd_partitions(&priv->mtd, part_probe_types, &parts, 0);
if (ret < 0)
goto err;
-#ifdef CONFIG_MTD_OF_PARTS
if (ret == 0) {
ret = of_mtd_parse_partitions(priv->dev, node, &parts);
if (ret < 0)
goto err;
}
-#endif
- if (ret > 0)
- add_mtd_partitions(&priv->mtd, parts, ret);
- else
-#endif
- add_mtd_device(&priv->mtd);
+ mtd_device_register(&priv->mtd, parts, ret);
printk(KERN_INFO "eLBC NAND device at 0x%llx, bank %d\n",
(unsigned long long)res.start, priv->bank);
diff --git a/drivers/mtd/nand/fsl_upm.c b/drivers/mtd/nand/fsl_upm.c
index 073ee026a17c..23752fd5bc59 100644
--- a/drivers/mtd/nand/fsl_upm.c
+++ b/drivers/mtd/nand/fsl_upm.c
@@ -33,10 +33,7 @@ struct fsl_upm_nand {
struct mtd_info mtd;
struct nand_chip chip;
int last_ctrl;
-#ifdef CONFIG_MTD_PARTITIONS
struct mtd_partition *parts;
-#endif
-
struct fsl_upm upm;
uint8_t upm_addr_offset;
uint8_t upm_cmd_offset;
@@ -161,9 +158,7 @@ static int __devinit fun_chip_init(struct fsl_upm_nand *fun,
{
int ret;
struct device_node *flash_np;
-#ifdef CONFIG_MTD_PARTITIONS
static const char *part_types[] = { "cmdlinepart", NULL, };
-#endif
fun->chip.IO_ADDR_R = fun->io_base;
fun->chip.IO_ADDR_W = fun->io_base;
@@ -197,7 +192,6 @@ static int __devinit fun_chip_init(struct fsl_upm_nand *fun,
if (ret)
goto err;
-#ifdef CONFIG_MTD_PARTITIONS
ret = parse_mtd_partitions(&fun->mtd, part_types, &fun->parts, 0);
#ifdef CONFIG_MTD_OF_PARTS
@@ -207,11 +201,7 @@ static int __devinit fun_chip_init(struct fsl_upm_nand *fun,
goto err;
}
#endif
- if (ret > 0)
- ret = add_mtd_partitions(&fun->mtd, fun->parts, ret);
- else
-#endif
- ret = add_mtd_device(&fun->mtd);
+ ret = mtd_device_register(&fun->mtd, fun->parts, ret);
err:
of_node_put(flash_np);
return ret;
diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c
index 0d45ef3883e8..e9b275ac381c 100644
--- a/drivers/mtd/nand/fsmc_nand.c
+++ b/drivers/mtd/nand/fsmc_nand.c
@@ -120,8 +120,6 @@ static struct fsmc_eccplace fsmc_ecc4_sp_place = {
}
};
-
-#ifdef CONFIG_MTD_PARTITIONS
/*
* Default partition tables to be used if the partition information not
* provided through platform data.
@@ -182,7 +180,6 @@ static struct mtd_partition partition_info_128KB_blk[] = {
#ifdef CONFIG_MTD_CMDLINE_PARTS
const char *part_probes[] = { "cmdlinepart", NULL };
#endif
-#endif
/**
* struct fsmc_nand_data - structure for FSMC NAND device state
@@ -719,7 +716,6 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
* platform data,
* default partition information present in driver.
*/
-#ifdef CONFIG_MTD_PARTITIONS
#ifdef CONFIG_MTD_CMDLINE_PARTS
/*
* Check if partition info passed via command line
@@ -777,19 +773,10 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
}
#endif
- if (host->partitions) {
- ret = add_mtd_partitions(&host->mtd, host->partitions,
- host->nr_partitions);
- if (ret)
- goto err_probe;
- }
-#else
- dev_info(&pdev->dev, "Registering %s as whole device\n", mtd->name);
- if (!add_mtd_device(mtd)) {
- ret = -ENXIO;
+ ret = mtd_device_register(&host->mtd, host->partitions,
+ host->nr_partitions);
+ if (ret)
goto err_probe;
- }
-#endif
platform_set_drvdata(pdev, host);
dev_info(&pdev->dev, "FSMC NAND driver registration successful\n");
@@ -835,11 +822,7 @@ static int fsmc_nand_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
if (host) {
-#ifdef CONFIG_MTD_PARTITIONS
- del_mtd_partitions(&host->mtd);
-#else
- del_mtd_device(&host->mtd);
-#endif
+ mtd_device_unregister(&host->mtd);
clk_disable(host->clk);
clk_put(host->clk);
diff --git a/drivers/mtd/nand/gpio.c b/drivers/mtd/nand/gpio.c
index 0cde618bcc1e..2c2060b2800e 100644
--- a/drivers/mtd/nand/gpio.c
+++ b/drivers/mtd/nand/gpio.c
@@ -316,8 +316,8 @@ static int __devinit gpio_nand_probe(struct platform_device *dev)
gpiomtd->plat.adjust_parts(&gpiomtd->plat,
gpiomtd->mtd_info.size);
- add_mtd_partitions(&gpiomtd->mtd_info, gpiomtd->plat.parts,
- gpiomtd->plat.num_parts);
+ mtd_device_register(&gpiomtd->mtd_info, gpiomtd->plat.parts,
+ gpiomtd->plat.num_parts);
platform_set_drvdata(dev, gpiomtd);
return 0;
diff --git a/drivers/mtd/nand/h1910.c b/drivers/mtd/nand/h1910.c
index f8ce79b446ed..02a03e67109c 100644
--- a/drivers/mtd/nand/h1910.c
+++ b/drivers/mtd/nand/h1910.c
@@ -38,7 +38,6 @@ static struct mtd_info *h1910_nand_mtd = NULL;
* Module stuff
*/
-#ifdef CONFIG_MTD_PARTITIONS
/*
* Define static partitions for flash device
*/
@@ -50,8 +49,6 @@ static struct mtd_partition partition_info[] = {
#define NUM_PARTITIONS 1
-#endif
-
/*
* hardware specific access to control-lines
*
@@ -154,7 +151,7 @@ static int __init h1910_init(void)
/* Register the partitions */
printk(KERN_NOTICE "Using %s partition definition\n", part_type);
- add_mtd_partitions(h1910_nand_mtd, mtd_parts, mtd_parts_nb);
+ mtd_device_register(h1910_nand_mtd, mtd_parts, mtd_parts_nb);
/* Return happy */
return 0;
diff --git a/drivers/mtd/nand/jz4740_nand.c b/drivers/mtd/nand/jz4740_nand.c
index cea38a5d4ac5..6e813daed068 100644
--- a/drivers/mtd/nand/jz4740_nand.c
+++ b/drivers/mtd/nand/jz4740_nand.c
@@ -299,10 +299,8 @@ static int __devinit jz_nand_probe(struct platform_device *pdev)
struct nand_chip *chip;
struct mtd_info *mtd;
struct jz_nand_platform_data *pdata = pdev->dev.platform_data;
-#ifdef CONFIG_MTD_PARTITIONS
struct mtd_partition *partition_info;
int num_partitions = 0;
-#endif
nand = kzalloc(sizeof(*nand), GFP_KERNEL);
if (!nand) {
@@ -375,7 +373,6 @@ static int __devinit jz_nand_probe(struct platform_device *pdev)
goto err_gpio_free;
}
-#ifdef CONFIG_MTD_PARTITIONS
#ifdef CONFIG_MTD_CMDLINE_PARTS
num_partitions = parse_mtd_partitions(mtd, part_probes,
&partition_info, 0);
@@ -384,12 +381,7 @@ static int __devinit jz_nand_probe(struct platform_device *pdev)
num_partitions = pdata->num_partitions;
partition_info = pdata->partitions;
}
-
- if (num_partitions > 0)
- ret = add_mtd_partitions(mtd, partition_info, num_partitions);
- else
-#endif
- ret = add_mtd_device(mtd);
+ ret = mtd_device_register(mtd, partition_info, num_partitions);
if (ret) {
dev_err(&pdev->dev, "Failed to add mtd device\n");
diff --git a/drivers/mtd/nand/mpc5121_nfc.c b/drivers/mtd/nand/mpc5121_nfc.c
index 0b81b5b499d1..2f7c930872f9 100644
--- a/drivers/mtd/nand/mpc5121_nfc.c
+++ b/drivers/mtd/nand/mpc5121_nfc.c
@@ -131,9 +131,7 @@ struct mpc5121_nfc_prv {
static void mpc5121_nfc_done(struct mtd_info *mtd);
-#ifdef CONFIG_MTD_PARTITIONS
static const char *mpc5121_nfc_pprobes[] = { "cmdlinepart", NULL };
-#endif
/* Read NFC register */
static inline u16 nfc_read(struct mtd_info *mtd, uint reg)
@@ -658,9 +656,7 @@ static int __devinit mpc5121_nfc_probe(struct platform_device *op)
struct mpc5121_nfc_prv *prv;
struct resource res;
struct mtd_info *mtd;
-#ifdef CONFIG_MTD_PARTITIONS
struct mtd_partition *parts;
-#endif
struct nand_chip *chip;
unsigned long regs_paddr, regs_size;
const __be32 *chips_no;
@@ -841,7 +837,6 @@ static int __devinit mpc5121_nfc_probe(struct platform_device *op)
dev_set_drvdata(dev, mtd);
/* Register device in MTD */
-#ifdef CONFIG_MTD_PARTITIONS
retval = parse_mtd_partitions(mtd, mpc5121_nfc_pprobes, &parts, 0);
#ifdef CONFIG_MTD_OF_PARTS
if (retval == 0)
@@ -854,12 +849,7 @@ static int __devinit mpc5121_nfc_probe(struct platform_device *op)
goto error;
}
- if (retval > 0)
- retval = add_mtd_partitions(mtd, parts, retval);
- else
-#endif
- retval = add_mtd_device(mtd);
-
+ retval = mtd_device_register(mtd, parts, retval);
if (retval) {
dev_err(dev, "Error adding MTD device!\n");
devm_free_irq(dev, prv->irq, mtd);
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index 42a95fb41504..90df34c4d26c 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -56,8 +56,14 @@
#define NFC_V1_V2_WRPROT (host->regs + 0x12)
#define NFC_V1_UNLOCKSTART_BLKADDR (host->regs + 0x14)
#define NFC_V1_UNLOCKEND_BLKADDR (host->regs + 0x16)
-#define NFC_V21_UNLOCKSTART_BLKADDR (host->regs + 0x20)
-#define NFC_V21_UNLOCKEND_BLKADDR (host->regs + 0x22)
+#define NFC_V21_UNLOCKSTART_BLKADDR0 (host->regs + 0x20)
+#define NFC_V21_UNLOCKSTART_BLKADDR1 (host->regs + 0x24)
+#define NFC_V21_UNLOCKSTART_BLKADDR2 (host->regs + 0x28)
+#define NFC_V21_UNLOCKSTART_BLKADDR3 (host->regs + 0x2c)
+#define NFC_V21_UNLOCKEND_BLKADDR0 (host->regs + 0x22)
+#define NFC_V21_UNLOCKEND_BLKADDR1 (host->regs + 0x26)
+#define NFC_V21_UNLOCKEND_BLKADDR2 (host->regs + 0x2a)
+#define NFC_V21_UNLOCKEND_BLKADDR3 (host->regs + 0x2e)
#define NFC_V1_V2_NF_WRPRST (host->regs + 0x18)
#define NFC_V1_V2_CONFIG1 (host->regs + 0x1a)
#define NFC_V1_V2_CONFIG2 (host->regs + 0x1c)
@@ -152,6 +158,7 @@ struct mxc_nand_host {
int clk_act;
int irq;
int eccsize;
+ int active_cs;
struct completion op_completion;
@@ -236,9 +243,7 @@ static struct nand_ecclayout nandv2_hw_eccoob_4k = {
}
};
-#ifdef CONFIG_MTD_PARTITIONS
static const char *part_probes[] = { "RedBoot", "cmdlinepart", NULL };
-#endif
static irqreturn_t mxc_nfc_irq(int irq, void *dev_id)
{
@@ -445,7 +450,7 @@ static void send_page_v1_v2(struct mtd_info *mtd, unsigned int ops)
for (i = 0; i < bufs; i++) {
/* NANDFC buffer 0 is used for page read/write */
- writew(i, NFC_V1_V2_BUF_ADDR);
+ writew((host->active_cs << 4) | i, NFC_V1_V2_BUF_ADDR);
writew(ops, NFC_V1_V2_CONFIG2);
@@ -470,7 +475,7 @@ static void send_read_id_v1_v2(struct mxc_nand_host *host)
struct nand_chip *this = &host->nand;
/* NANDFC buffer 0 is used for device ID output */
- writew(0x0, NFC_V1_V2_BUF_ADDR);
+ writew(host->active_cs << 4, NFC_V1_V2_BUF_ADDR);
writew(NFC_ID, NFC_V1_V2_CONFIG2);
@@ -505,7 +510,7 @@ static uint16_t get_dev_status_v1_v2(struct mxc_nand_host *host)
uint32_t store;
uint16_t ret;
- writew(0x0, NFC_V1_V2_BUF_ADDR);
+ writew(host->active_cs << 4, NFC_V1_V2_BUF_ADDR);
/*
* The device status is stored in main_area0. To
@@ -686,24 +691,24 @@ static void mxc_nand_select_chip(struct mtd_info *mtd, int chip)
struct nand_chip *nand_chip = mtd->priv;
struct mxc_nand_host *host = nand_chip->priv;
- switch (chip) {
- case -1:
+ if (chip == -1) {
/* Disable the NFC clock */
if (host->clk_act) {
clk_disable(host->clk);
host->clk_act = 0;
}
- break;
- case 0:
+ return;
+ }
+
+ if (!host->clk_act) {
/* Enable the NFC clock */
- if (!host->clk_act) {
- clk_enable(host->clk);
- host->clk_act = 1;
- }
- break;
+ clk_enable(host->clk);
+ host->clk_act = 1;
+ }
- default:
- break;
+ if (nfc_is_v21()) {
+ host->active_cs = chip;
+ writew(host->active_cs << 4, NFC_V1_V2_BUF_ADDR);
}
}
@@ -834,8 +839,14 @@ static void preset_v1_v2(struct mtd_info *mtd)
/* Blocks to be unlocked */
if (nfc_is_v21()) {
- writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR);
- writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR);
+ writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR0);
+ writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR1);
+ writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR2);
+ writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR3);
+ writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR0);
+ writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR1);
+ writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR2);
+ writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR3);
} else if (nfc_is_v1()) {
writew(0x0, NFC_V1_UNLOCKSTART_BLKADDR);
writew(0x4000, NFC_V1_UNLOCKEND_BLKADDR);
@@ -1200,7 +1211,7 @@ static int __init mxcnd_probe(struct platform_device *pdev)
irq_control_v1_v2(host, 1);
/* first scan to find the device and get the page size */
- if (nand_scan_ident(mtd, 1, NULL)) {
+ if (nand_scan_ident(mtd, nfc_is_v21() ? 4 : 1, NULL)) {
err = -ENXIO;
goto escan;
}
@@ -1220,18 +1231,15 @@ static int __init mxcnd_probe(struct platform_device *pdev)
}
/* Register the partitions */
-#ifdef CONFIG_MTD_PARTITIONS
nr_parts =
parse_mtd_partitions(mtd, part_probes, &host->parts, 0);
if (nr_parts > 0)
- add_mtd_partitions(mtd, host->parts, nr_parts);
+ mtd_device_register(mtd, host->parts, nr_parts);
else if (pdata->parts)
- add_mtd_partitions(mtd, pdata->parts, pdata->nr_parts);
- else
-#endif
- {
+ mtd_device_register(mtd, pdata->parts, pdata->nr_parts);
+ else {
pr_info("Registering %s as whole device\n", mtd->name);
- add_mtd_device(mtd);
+ mtd_device_register(mtd, NULL, 0);
}
platform_set_drvdata(pdev, host);
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index c54a4cbac6bc..a46e9bb847bd 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -47,10 +47,7 @@
#include <linux/bitops.h>
#include <linux/leds.h>
#include <linux/io.h>
-
-#ifdef CONFIG_MTD_PARTITIONS
#include <linux/mtd/partitions.h>
-#endif
/* Define default oob placement schemes for large and small page devices */
static struct nand_ecclayout nand_oob_8 = {
@@ -976,9 +973,6 @@ int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
ret = __nand_unlock(mtd, ofs, len, 0);
out:
- /* de-select the NAND device */
- chip->select_chip(mtd, -1);
-
nand_release_device(mtd);
return ret;
@@ -1046,9 +1040,6 @@ int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
ret = __nand_unlock(mtd, ofs, len, 0x1);
out:
- /* de-select the NAND device */
- chip->select_chip(mtd, -1);
-
nand_release_device(mtd);
return ret;
@@ -3112,6 +3103,8 @@ ident_done:
chip->chip_shift += 32 - 1;
}
+ chip->badblockbits = 8;
+
/* Set the bad block position */
if (mtd->writesize > 512 || (busw & NAND_BUSWIDTH_16))
chip->badblockpos = NAND_LARGE_BADBLOCK_POS;
@@ -3539,12 +3532,7 @@ void nand_release(struct mtd_info *mtd)
if (chip->ecc.mode == NAND_ECC_SOFT_BCH)
nand_bch_free((struct nand_bch_control *)chip->ecc.priv);
-#ifdef CONFIG_MTD_PARTITIONS
- /* Deregister partitions */
- del_mtd_partitions(mtd);
-#endif
- /* Deregister the device */
- del_mtd_device(mtd);
+ mtd_device_unregister(mtd);
/* Free bad block table memory */
kfree(chip->bbt);
diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c
index af46428286fe..ccbeaa1e4a8e 100644
--- a/drivers/mtd/nand/nand_bbt.c
+++ b/drivers/mtd/nand/nand_bbt.c
@@ -1276,20 +1276,6 @@ int nand_update_bbt(struct mtd_info *mtd, loff_t offs)
* while scanning a device for factory marked good / bad blocks. */
static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
-static struct nand_bbt_descr smallpage_flashbased = {
- .options = NAND_BBT_SCAN2NDPAGE,
- .offs = NAND_SMALL_BADBLOCK_POS,
- .len = 1,
- .pattern = scan_ff_pattern
-};
-
-static struct nand_bbt_descr largepage_flashbased = {
- .options = NAND_BBT_SCAN2NDPAGE,
- .offs = NAND_LARGE_BADBLOCK_POS,
- .len = 2,
- .pattern = scan_ff_pattern
-};
-
static uint8_t scan_agand_pattern[] = { 0x1C, 0x71, 0xC7, 0x1C, 0x71, 0xC7 };
static struct nand_bbt_descr agand_flashbased = {
@@ -1355,10 +1341,6 @@ static struct nand_bbt_descr bbt_mirror_no_bbt_descr = {
* this->badblock_pattern. Thus, this->badblock_pattern should be NULL when
* passed to this function.
*
- * TODO: Handle other flags, replace other static structs
- * (e.g. handle NAND_BBT_FLASH for flash-based BBT,
- * replace smallpage_flashbased)
- *
*/
static int nand_create_default_bbt_descr(struct nand_chip *this)
{
@@ -1422,15 +1404,14 @@ int nand_default_bbt(struct mtd_info *mtd)
this->bbt_md = &bbt_mirror_descr;
}
}
- if (!this->badblock_pattern) {
- this->badblock_pattern = (mtd->writesize > 512) ? &largepage_flashbased : &smallpage_flashbased;
- }
} else {
this->bbt_td = NULL;
this->bbt_md = NULL;
- if (!this->badblock_pattern)
- nand_create_default_bbt_descr(this);
}
+
+ if (!this->badblock_pattern)
+ nand_create_default_bbt_descr(this);
+
return nand_scan_bbt(mtd, this->badblock_pattern);
}
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index 893d95bfea48..357e8c5252a8 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -2383,7 +2383,9 @@ static int __init ns_init_module(void)
goto err_exit;
/* Register NAND partitions */
- if ((retval = add_mtd_partitions(nsmtd, &nand->partitions[0], nand->nbparts)) != 0)
+ retval = mtd_device_register(nsmtd, &nand->partitions[0],
+ nand->nbparts);
+ if (retval != 0)
goto err_exit;
return 0;
diff --git a/drivers/mtd/nand/ndfc.c b/drivers/mtd/nand/ndfc.c
index bbe6d451290d..ea2dea8a9c88 100644
--- a/drivers/mtd/nand/ndfc.c
+++ b/drivers/mtd/nand/ndfc.c
@@ -33,6 +33,7 @@
#include <linux/of_platform.h>
#include <asm/io.h>
+#define NDFC_MAX_CS 4
struct ndfc_controller {
struct platform_device *ofdev;
@@ -41,17 +42,16 @@ struct ndfc_controller {
struct nand_chip chip;
int chip_select;
struct nand_hw_control ndfc_control;
-#ifdef CONFIG_MTD_PARTITIONS
struct mtd_partition *parts;
-#endif
};
-static struct ndfc_controller ndfc_ctrl;
+static struct ndfc_controller ndfc_ctrl[NDFC_MAX_CS];
static void ndfc_select_chip(struct mtd_info *mtd, int chip)
{
uint32_t ccr;
- struct ndfc_controller *ndfc = &ndfc_ctrl;
+ struct nand_chip *nchip = mtd->priv;
+ struct ndfc_controller *ndfc = nchip->priv;
ccr = in_be32(ndfc->ndfcbase + NDFC_CCR);
if (chip >= 0) {
@@ -64,7 +64,8 @@ static void ndfc_select_chip(struct mtd_info *mtd, int chip)
static void ndfc_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
{
- struct ndfc_controller *ndfc = &ndfc_ctrl;
+ struct nand_chip *chip = mtd->priv;
+ struct ndfc_controller *ndfc = chip->priv;
if (cmd == NAND_CMD_NONE)
return;
@@ -77,7 +78,8 @@ static void ndfc_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
static int ndfc_ready(struct mtd_info *mtd)
{
- struct ndfc_controller *ndfc = &ndfc_ctrl;
+ struct nand_chip *chip = mtd->priv;
+ struct ndfc_controller *ndfc = chip->priv;
return in_be32(ndfc->ndfcbase + NDFC_STAT) & NDFC_STAT_IS_READY;
}
@@ -85,7 +87,8 @@ static int ndfc_ready(struct mtd_info *mtd)
static void ndfc_enable_hwecc(struct mtd_info *mtd, int mode)
{
uint32_t ccr;
- struct ndfc_controller *ndfc = &ndfc_ctrl;
+ struct nand_chip *chip = mtd->priv;
+ struct ndfc_controller *ndfc = chip->priv;
ccr = in_be32(ndfc->ndfcbase + NDFC_CCR);
ccr |= NDFC_CCR_RESET_ECC;
@@ -96,7 +99,8 @@ static void ndfc_enable_hwecc(struct mtd_info *mtd, int mode)
static int ndfc_calculate_ecc(struct mtd_info *mtd,
const u_char *dat, u_char *ecc_code)
{
- struct ndfc_controller *ndfc = &ndfc_ctrl;
+ struct nand_chip *chip = mtd->priv;
+ struct ndfc_controller *ndfc = chip->priv;
uint32_t ecc;
uint8_t *p = (uint8_t *)&ecc;
@@ -119,7 +123,8 @@ static int ndfc_calculate_ecc(struct mtd_info *mtd,
*/
static void ndfc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
{
- struct ndfc_controller *ndfc = &ndfc_ctrl;
+ struct nand_chip *chip = mtd->priv;
+ struct ndfc_controller *ndfc = chip->priv;
uint32_t *p = (uint32_t *) buf;
for(;len > 0; len -= 4)
@@ -128,7 +133,8 @@ static void ndfc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
static void ndfc_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
{
- struct ndfc_controller *ndfc = &ndfc_ctrl;
+ struct nand_chip *chip = mtd->priv;
+ struct ndfc_controller *ndfc = chip->priv;
uint32_t *p = (uint32_t *) buf;
for(;len > 0; len -= 4)
@@ -137,7 +143,8 @@ static void ndfc_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
static int ndfc_verify_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
{
- struct ndfc_controller *ndfc = &ndfc_ctrl;
+ struct nand_chip *chip = mtd->priv;
+ struct ndfc_controller *ndfc = chip->priv;
uint32_t *p = (uint32_t *) buf;
for(;len > 0; len -= 4)
@@ -152,13 +159,11 @@ static int ndfc_verify_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
static int ndfc_chip_init(struct ndfc_controller *ndfc,
struct device_node *node)
{
-#ifdef CONFIG_MTD_PARTITIONS
#ifdef CONFIG_MTD_CMDLINE_PARTS
static const char *part_types[] = { "cmdlinepart", NULL };
#else
static const char *part_types[] = { NULL };
#endif
-#endif
struct device_node *flash_np;
struct nand_chip *chip = &ndfc->chip;
int ret;
@@ -179,6 +184,7 @@ static int ndfc_chip_init(struct ndfc_controller *ndfc,
chip->ecc.mode = NAND_ECC_HW;
chip->ecc.size = 256;
chip->ecc.bytes = 3;
+ chip->priv = ndfc;
ndfc->mtd.priv = chip;
ndfc->mtd.owner = THIS_MODULE;
@@ -198,25 +204,18 @@ static int ndfc_chip_init(struct ndfc_controller *ndfc,
if (ret)
goto err;
-#ifdef CONFIG_MTD_PARTITIONS
ret = parse_mtd_partitions(&ndfc->mtd, part_types, &ndfc->parts, 0);
if (ret < 0)
goto err;
-#ifdef CONFIG_MTD_OF_PARTS
if (ret == 0) {
ret = of_mtd_parse_partitions(&ndfc->ofdev->dev, flash_np,
&ndfc->parts);
if (ret < 0)
goto err;
}
-#endif
- if (ret > 0)
- ret = add_mtd_partitions(&ndfc->mtd, ndfc->parts, ret);
- else
-#endif
- ret = add_mtd_device(&ndfc->mtd);
+ ret = mtd_device_register(&ndfc->mtd, ndfc->parts, ret);
err:
of_node_put(flash_np);
@@ -227,15 +226,10 @@ err:
static int __devinit ndfc_probe(struct platform_device *ofdev)
{
- struct ndfc_controller *ndfc = &ndfc_ctrl;
+ struct ndfc_controller *ndfc;
const __be32 *reg;
u32 ccr;
- int err, len;
-
- spin_lock_init(&ndfc->ndfc_control.lock);
- init_waitqueue_head(&ndfc->ndfc_control.wq);
- ndfc->ofdev = ofdev;
- dev_set_drvdata(&ofdev->dev, ndfc);
+ int err, len, cs;
/* Read the reg property to get the chip select */
reg = of_get_property(ofdev->dev.of_node, "reg", &len);
@@ -243,7 +237,20 @@ static int __devinit ndfc_probe(struct platform_device *ofdev)
dev_err(&ofdev->dev, "unable read reg property (%d)\n", len);
return -ENOENT;
}
- ndfc->chip_select = be32_to_cpu(reg[0]);
+
+ cs = be32_to_cpu(reg[0]);
+ if (cs >= NDFC_MAX_CS) {
+ dev_err(&ofdev->dev, "invalid CS number (%d)\n", cs);
+ return -EINVAL;
+ }
+
+ ndfc = &ndfc_ctrl[cs];
+ ndfc->chip_select = cs;
+
+ spin_lock_init(&ndfc->ndfc_control.lock);
+ init_waitqueue_head(&ndfc->ndfc_control.wq);
+ ndfc->ofdev = ofdev;
+ dev_set_drvdata(&ofdev->dev, ndfc);
ndfc->ndfcbase = of_iomap(ofdev->dev.of_node, 0);
if (!ndfc->ndfcbase) {
diff --git a/drivers/mtd/nand/nomadik_nand.c b/drivers/mtd/nand/nomadik_nand.c
index a045a4a581b6..b6a5c86ab31e 100644
--- a/drivers/mtd/nand/nomadik_nand.c
+++ b/drivers/mtd/nand/nomadik_nand.c
@@ -158,12 +158,7 @@ static int nomadik_nand_probe(struct platform_device *pdev)
goto err_unmap;
}
-#ifdef CONFIG_MTD_PARTITIONS
- add_mtd_partitions(&host->mtd, pdata->parts, pdata->nparts);
-#else
- pr_info("Registering %s as whole device\n", mtd->name);
- add_mtd_device(mtd);
-#endif
+ mtd_device_register(&host->mtd, pdata->parts, pdata->nparts);
platform_set_drvdata(pdev, host);
return 0;
diff --git a/drivers/mtd/nand/nuc900_nand.c b/drivers/mtd/nand/nuc900_nand.c
index 6eddf7361ed7..9c30a0b03171 100644
--- a/drivers/mtd/nand/nuc900_nand.c
+++ b/drivers/mtd/nand/nuc900_nand.c
@@ -321,8 +321,8 @@ static int __devinit nuc900_nand_probe(struct platform_device *pdev)
goto fail3;
}
- add_mtd_partitions(&(nuc900_nand->mtd), partitions,
- ARRAY_SIZE(partitions));
+ mtd_device_register(&(nuc900_nand->mtd), partitions,
+ ARRAY_SIZE(partitions));
platform_set_drvdata(pdev, nuc900_nand);
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index da9a351c9d79..0db2c0e7656a 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -94,9 +94,7 @@
#define P4e_s(a) (TF(a & NAND_Ecc_P4e) << 0)
#define P4o_s(a) (TF(a & NAND_Ecc_P4o) << 1)
-#ifdef CONFIG_MTD_PARTITIONS
static const char *part_probes[] = { "cmdlinepart", NULL };
-#endif
/* oob info generated runtime depending on ecc algorithm and layout selected */
static struct nand_ecclayout omap_oobinfo;
@@ -263,11 +261,10 @@ static void omap_read_buf_pref(struct mtd_info *mtd, u_char *buf, int len)
if (ret) {
/* PFPW engine is busy, use cpu copy method */
if (info->nand.options & NAND_BUSWIDTH_16)
- omap_read_buf16(mtd, buf, len);
+ omap_read_buf16(mtd, (u_char *)p, len);
else
- omap_read_buf8(mtd, buf, len);
+ omap_read_buf8(mtd, (u_char *)p, len);
} else {
- p = (u32 *) buf;
do {
r_count = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT);
r_count = r_count >> 2;
@@ -293,7 +290,7 @@ static void omap_write_buf_pref(struct mtd_info *mtd,
struct omap_nand_info, mtd);
uint32_t w_count = 0;
int i = 0, ret = 0;
- u16 *p;
+ u16 *p = (u16 *)buf;
unsigned long tim, limit;
/* take care of subpage writes */
@@ -309,11 +306,10 @@ static void omap_write_buf_pref(struct mtd_info *mtd,
if (ret) {
/* PFPW engine is busy, use cpu copy method */
if (info->nand.options & NAND_BUSWIDTH_16)
- omap_write_buf16(mtd, buf, len);
+ omap_write_buf16(mtd, (u_char *)p, len);
else
- omap_write_buf8(mtd, buf, len);
+ omap_write_buf8(mtd, (u_char *)p, len);
} else {
- p = (u16 *) buf;
while (len) {
w_count = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT);
w_count = w_count >> 1;
@@ -1073,9 +1069,9 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
/* DIP switches on some boards change between 8 and 16 bit
* bus widths for flash. Try the other width if the first try fails.
*/
- if (nand_scan(&info->mtd, 1)) {
+ if (nand_scan_ident(&info->mtd, 1, NULL)) {
info->nand.options ^= NAND_BUSWIDTH_16;
- if (nand_scan(&info->mtd, 1)) {
+ if (nand_scan_ident(&info->mtd, 1, NULL)) {
err = -ENXIO;
goto out_release_mem_region;
}
@@ -1101,15 +1097,19 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
info->nand.ecc.layout = &omap_oobinfo;
}
-#ifdef CONFIG_MTD_PARTITIONS
+ /* second phase scan */
+ if (nand_scan_tail(&info->mtd)) {
+ err = -ENXIO;
+ goto out_release_mem_region;
+ }
+
err = parse_mtd_partitions(&info->mtd, part_probes, &info->parts, 0);
if (err > 0)
- add_mtd_partitions(&info->mtd, info->parts, err);
+ mtd_device_register(&info->mtd, info->parts, err);
else if (pdata->parts)
- add_mtd_partitions(&info->mtd, pdata->parts, pdata->nr_parts);
+ mtd_device_register(&info->mtd, pdata->parts, pdata->nr_parts);
else
-#endif
- add_mtd_device(&info->mtd);
+ mtd_device_register(&info->mtd, NULL, 0);
platform_set_drvdata(pdev, &info->mtd);
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c
index da6e75343052..7794d0680f91 100644
--- a/drivers/mtd/nand/orion_nand.c
+++ b/drivers/mtd/nand/orion_nand.c
@@ -21,9 +21,7 @@
#include <mach/hardware.h>
#include <plat/orion_nand.h>
-#ifdef CONFIG_MTD_CMDLINE_PARTS
static const char *part_probes[] = { "cmdlinepart", NULL };
-#endif
static void orion_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
{
@@ -83,10 +81,8 @@ static int __init orion_nand_probe(struct platform_device *pdev)
struct resource *res;
void __iomem *io_base;
int ret = 0;
-#ifdef CONFIG_MTD_PARTITIONS
struct mtd_partition *partitions = NULL;
int num_part = 0;
-#endif
nc = kzalloc(sizeof(struct nand_chip) + sizeof(struct mtd_info), GFP_KERNEL);
if (!nc) {
@@ -136,7 +132,6 @@ static int __init orion_nand_probe(struct platform_device *pdev)
goto no_dev;
}
-#ifdef CONFIG_MTD_PARTITIONS
#ifdef CONFIG_MTD_CMDLINE_PARTS
mtd->name = "orion_nand";
num_part = parse_mtd_partitions(mtd, part_probes, &partitions, 0);
@@ -147,14 +142,7 @@ static int __init orion_nand_probe(struct platform_device *pdev)
partitions = board->parts;
}
- if (partitions && num_part > 0)
- ret = add_mtd_partitions(mtd, partitions, num_part);
- else
- ret = add_mtd_device(mtd);
-#else
- ret = add_mtd_device(mtd);
-#endif
-
+ ret = mtd_device_register(mtd, partitions, num_part);
if (ret) {
nand_release(mtd);
goto no_dev;
diff --git a/drivers/mtd/nand/pasemi_nand.c b/drivers/mtd/nand/pasemi_nand.c
index 20bfe5f15afd..b1aa41b8a4eb 100644
--- a/drivers/mtd/nand/pasemi_nand.c
+++ b/drivers/mtd/nand/pasemi_nand.c
@@ -163,7 +163,7 @@ static int __devinit pasemi_nand_probe(struct platform_device *ofdev)
goto out_lpc;
}
- if (add_mtd_device(pasemi_nand_mtd)) {
+ if (mtd_device_register(pasemi_nand_mtd, NULL, 0)) {
printk(KERN_ERR "pasemi_nand: Unable to register MTD device\n");
err = -ENODEV;
goto out_lpc;
diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c
index caf5a736340a..633c04bf76f6 100644
--- a/drivers/mtd/nand/plat_nand.c
+++ b/drivers/mtd/nand/plat_nand.c
@@ -21,10 +21,8 @@ struct plat_nand_data {
struct nand_chip chip;
struct mtd_info mtd;
void __iomem *io_base;
-#ifdef CONFIG_MTD_PARTITIONS
int nr_parts;
struct mtd_partition *parts;
-#endif
};
/*
@@ -101,13 +99,12 @@ static int __devinit plat_nand_probe(struct platform_device *pdev)
goto out;
}
-#ifdef CONFIG_MTD_PARTITIONS
if (pdata->chip.part_probe_types) {
err = parse_mtd_partitions(&data->mtd,
pdata->chip.part_probe_types,
&data->parts, 0);
if (err > 0) {
- add_mtd_partitions(&data->mtd, data->parts, err);
+ mtd_device_register(&data->mtd, data->parts, err);
return 0;
}
}
@@ -115,11 +112,10 @@ static int __devinit plat_nand_probe(struct platform_device *pdev)
pdata->chip.set_parts(data->mtd.size, &pdata->chip);
if (pdata->chip.partitions) {
data->parts = pdata->chip.partitions;
- err = add_mtd_partitions(&data->mtd, data->parts,
+ err = mtd_device_register(&data->mtd, data->parts,
pdata->chip.nr_partitions);
} else
-#endif
- err = add_mtd_device(&data->mtd);
+ err = mtd_device_register(&data->mtd, NULL, 0);
if (!err)
return err;
@@ -149,10 +145,8 @@ static int __devexit plat_nand_remove(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
nand_release(&data->mtd);
-#ifdef CONFIG_MTD_PARTITIONS
if (data->parts && data->parts != pdata->chip.partitions)
kfree(data->parts);
-#endif
if (pdata->ctrl.remove)
pdata->ctrl.remove(pdev);
iounmap(data->io_base);
diff --git a/drivers/mtd/nand/ppchameleonevb.c b/drivers/mtd/nand/ppchameleonevb.c
index cc8658431851..3bbb796b451c 100644
--- a/drivers/mtd/nand/ppchameleonevb.c
+++ b/drivers/mtd/nand/ppchameleonevb.c
@@ -73,7 +73,6 @@ __setup("ppchameleon_fio_pbase=", ppchameleon_fio_pbase);
__setup("ppchameleonevb_fio_pbase=", ppchameleonevb_fio_pbase);
#endif
-#ifdef CONFIG_MTD_PARTITIONS
/*
* Define static partitions for flash devices
*/
@@ -101,7 +100,6 @@ static struct mtd_partition partition_info_evb[] = {
#define NUM_PARTITIONS 1
extern int parse_cmdline_partitions(struct mtd_info *master, struct mtd_partition **pparts, const char *mtd_id);
-#endif
/*
* hardware specific access to control-lines
@@ -189,10 +187,8 @@ static int ppchameleonevb_device_ready(struct mtd_info *minfo)
}
#endif
-#ifdef CONFIG_MTD_PARTITIONS
const char *part_probes[] = { "cmdlinepart", NULL };
const char *part_probes_evb[] = { "cmdlinepart", NULL };
-#endif
/*
* Main initialization routine
@@ -284,14 +280,13 @@ static int __init ppchameleonevb_init(void)
this->chip_delay = NAND_SMALL_DELAY_US;
#endif
-#ifdef CONFIG_MTD_PARTITIONS
ppchameleon_mtd->name = "ppchameleon-nand";
mtd_parts_nb = parse_mtd_partitions(ppchameleon_mtd, part_probes, &mtd_parts, 0);
if (mtd_parts_nb > 0)
part_type = "command line";
else
mtd_parts_nb = 0;
-#endif
+
if (mtd_parts_nb == 0) {
if (ppchameleon_mtd->size == NAND_SMALL_SIZE)
mtd_parts = partition_info_me;
@@ -303,7 +298,7 @@ static int __init ppchameleonevb_init(void)
/* Register the partitions */
printk(KERN_NOTICE "Using %s partition definition\n", part_type);
- add_mtd_partitions(ppchameleon_mtd, mtd_parts, mtd_parts_nb);
+ mtd_device_register(ppchameleon_mtd, mtd_parts, mtd_parts_nb);
nand_evb_init:
/****************************
@@ -385,14 +380,14 @@ static int __init ppchameleonevb_init(void)
iounmap(ppchameleon_fio_base);
return -ENXIO;
}
-#ifdef CONFIG_MTD_PARTITIONS
+
ppchameleonevb_mtd->name = NAND_EVB_MTD_NAME;
mtd_parts_nb = parse_mtd_partitions(ppchameleonevb_mtd, part_probes_evb, &mtd_parts, 0);
if (mtd_parts_nb > 0)
part_type = "command line";
else
mtd_parts_nb = 0;
-#endif
+
if (mtd_parts_nb == 0) {
mtd_parts = partition_info_evb;
mtd_parts_nb = NUM_PARTITIONS;
@@ -401,7 +396,7 @@ static int __init ppchameleonevb_init(void)
/* Register the partitions */
printk(KERN_NOTICE "Using %s partition definition\n", part_type);
- add_mtd_partitions(ppchameleonevb_mtd, mtd_parts, mtd_parts_nb);
+ mtd_device_register(ppchameleonevb_mtd, mtd_parts, mtd_parts_nb);
/* Return happy */
return 0;
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index ff0701276d65..1fb3b3a80581 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -1119,10 +1119,7 @@ static int pxa3xx_nand_remove(struct platform_device *pdev)
clk_put(info->clk);
if (mtd) {
- del_mtd_device(mtd);
-#ifdef CONFIG_MTD_PARTITIONS
- del_mtd_partitions(mtd);
-#endif
+ mtd_device_unregister(mtd);
kfree(mtd);
}
return 0;
@@ -1149,7 +1146,6 @@ static int pxa3xx_nand_probe(struct platform_device *pdev)
return -ENODEV;
}
-#ifdef CONFIG_MTD_PARTITIONS
if (mtd_has_cmdlinepart()) {
const char *probes[] = { "cmdlinepart", NULL };
struct mtd_partition *parts;
@@ -1158,13 +1154,10 @@ static int pxa3xx_nand_probe(struct platform_device *pdev)
nr_parts = parse_mtd_partitions(info->mtd, probes, &parts, 0);
if (nr_parts)
- return add_mtd_partitions(info->mtd, parts, nr_parts);
+ return mtd_device_register(info->mtd, parts, nr_parts);
}
- return add_mtd_partitions(info->mtd, pdata->parts, pdata->nr_parts);
-#else
- return 0;
-#endif
+ return mtd_device_register(info->mtd, pdata->parts, pdata->nr_parts);
}
#ifdef CONFIG_PM
diff --git a/drivers/mtd/nand/rtc_from4.c b/drivers/mtd/nand/rtc_from4.c
index 67440b5beef8..c9f9127ff770 100644
--- a/drivers/mtd/nand/rtc_from4.c
+++ b/drivers/mtd/nand/rtc_from4.c
@@ -580,7 +580,8 @@ static int __init rtc_from4_init(void)
#endif
/* Register the partitions */
- ret = add_mtd_partitions(rtc_from4_mtd, partition_info, NUM_PARTITIONS);
+ ret = mtd_device_register(rtc_from4_mtd, partition_info,
+ NUM_PARTITIONS);
if (ret)
goto err_3;
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index 33d832dddfdd..4405468f196b 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -55,7 +55,7 @@ static int hardware_ecc = 0;
#endif
#ifdef CONFIG_MTD_NAND_S3C2410_CLKSTOP
-static int clock_stop = 1;
+static const int clock_stop = 1;
#else
static const int clock_stop = 0;
#endif
@@ -96,6 +96,12 @@ enum s3c_cpu_type {
TYPE_S3C2440,
};
+enum s3c_nand_clk_state {
+ CLOCK_DISABLE = 0,
+ CLOCK_ENABLE,
+ CLOCK_SUSPEND,
+};
+
/* overview of the s3c2410 nand state */
/**
@@ -111,6 +117,7 @@ enum s3c_cpu_type {
* @mtd_count: The number of MTDs created from this controller.
* @save_sel: The contents of @sel_reg to be saved over suspend.
* @clk_rate: The clock rate from @clk.
+ * @clk_state: The current clock state.
* @cpu_type: The exact type of this controller.
*/
struct s3c2410_nand_info {
@@ -129,6 +136,7 @@ struct s3c2410_nand_info {
int mtd_count;
unsigned long save_sel;
unsigned long clk_rate;
+ enum s3c_nand_clk_state clk_state;
enum s3c_cpu_type cpu_type;
@@ -159,11 +167,33 @@ static struct s3c2410_platform_nand *to_nand_plat(struct platform_device *dev)
return dev->dev.platform_data;
}
-static inline int allow_clk_stop(struct s3c2410_nand_info *info)
+static inline int allow_clk_suspend(struct s3c2410_nand_info *info)
{
return clock_stop;
}
+/**
+ * s3c2410_nand_clk_set_state - Enable, disable or suspend NAND clock.
+ * @info: The controller instance.
+ * @new_state: State to which clock should be set.
+ */
+static void s3c2410_nand_clk_set_state(struct s3c2410_nand_info *info,
+ enum s3c_nand_clk_state new_state)
+{
+ if (!allow_clk_suspend(info) && new_state == CLOCK_SUSPEND)
+ return;
+
+ if (info->clk_state == CLOCK_ENABLE) {
+ if (new_state != CLOCK_ENABLE)
+ clk_disable(info->clk);
+ } else {
+ if (new_state == CLOCK_ENABLE)
+ clk_enable(info->clk);
+ }
+
+ info->clk_state = new_state;
+}
+
/* timing calculations */
#define NS_IN_KHZ 1000000
@@ -333,8 +363,8 @@ static void s3c2410_nand_select_chip(struct mtd_info *mtd, int chip)
nmtd = this->priv;
info = nmtd->info;
- if (chip != -1 && allow_clk_stop(info))
- clk_enable(info->clk);
+ if (chip != -1)
+ s3c2410_nand_clk_set_state(info, CLOCK_ENABLE);
cur = readl(info->sel_reg);
@@ -356,8 +386,8 @@ static void s3c2410_nand_select_chip(struct mtd_info *mtd, int chip)
writel(cur, info->sel_reg);
- if (chip == -1 && allow_clk_stop(info))
- clk_disable(info->clk);
+ if (chip == -1)
+ s3c2410_nand_clk_set_state(info, CLOCK_SUSPEND);
}
/* s3c2410_nand_hwcontrol
@@ -694,8 +724,7 @@ static int s3c24xx_nand_remove(struct platform_device *pdev)
/* free the common resources */
if (info->clk != NULL && !IS_ERR(info->clk)) {
- if (!allow_clk_stop(info))
- clk_disable(info->clk);
+ s3c2410_nand_clk_set_state(info, CLOCK_DISABLE);
clk_put(info->clk);
}
@@ -715,7 +744,6 @@ static int s3c24xx_nand_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_MTD_PARTITIONS
const char *part_probes[] = { "cmdlinepart", NULL };
static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info,
struct s3c2410_nand_mtd *mtd,
@@ -725,7 +753,7 @@ static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info,
int nr_part = 0;
if (set == NULL)
- return add_mtd_device(&mtd->mtd);
+ return mtd_device_register(&mtd->mtd, NULL, 0);
mtd->mtd.name = set->name;
nr_part = parse_mtd_partitions(&mtd->mtd, part_probes, &part_info, 0);
@@ -735,19 +763,8 @@ static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info,
part_info = set->partitions;
}
- if (nr_part > 0 && part_info)
- return add_mtd_partitions(&mtd->mtd, part_info, nr_part);
-
- return add_mtd_device(&mtd->mtd);
-}
-#else
-static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info,
- struct s3c2410_nand_mtd *mtd,
- struct s3c2410_nand_set *set)
-{
- return add_mtd_device(&mtd->mtd);
+ return mtd_device_register(&mtd->mtd, part_info, nr_part);
}
-#endif
/**
* s3c2410_nand_init_chip - initialise a single instance of an chip
@@ -947,7 +964,7 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
goto exit_error;
}
- clk_enable(info->clk);
+ s3c2410_nand_clk_set_state(info, CLOCK_ENABLE);
/* allocate and map the resource */
@@ -1026,9 +1043,9 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
goto exit_error;
}
- if (allow_clk_stop(info)) {
+ if (allow_clk_suspend(info)) {
dev_info(&pdev->dev, "clock idle support enabled\n");
- clk_disable(info->clk);
+ s3c2410_nand_clk_set_state(info, CLOCK_SUSPEND);
}
pr_debug("initialised ok\n");
@@ -1059,8 +1076,7 @@ static int s3c24xx_nand_suspend(struct platform_device *dev, pm_message_t pm)
writel(info->save_sel | info->sel_bit, info->sel_reg);
- if (!allow_clk_stop(info))
- clk_disable(info->clk);
+ s3c2410_nand_clk_set_state(info, CLOCK_DISABLE);
}
return 0;
@@ -1072,7 +1088,7 @@ static int s3c24xx_nand_resume(struct platform_device *dev)
unsigned long sel;
if (info) {
- clk_enable(info->clk);
+ s3c2410_nand_clk_set_state(info, CLOCK_ENABLE);
s3c2410_nand_inithw(info);
/* Restore the state of the nFCE line. */
@@ -1082,8 +1098,7 @@ static int s3c24xx_nand_resume(struct platform_device *dev)
sel |= info->save_sel & info->sel_bit;
writel(sel, info->sel_reg);
- if (allow_clk_stop(info))
- clk_disable(info->clk);
+ s3c2410_nand_clk_set_state(info, CLOCK_SUSPEND);
}
return 0;
diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c
index 81bbb5ee148d..93b1f74321c2 100644
--- a/drivers/mtd/nand/sh_flctl.c
+++ b/drivers/mtd/nand/sh_flctl.c
@@ -867,7 +867,7 @@ static int __devinit flctl_probe(struct platform_device *pdev)
if (ret)
goto err;
- add_mtd_partitions(flctl_mtd, pdata->parts, pdata->nr_parts);
+ mtd_device_register(flctl_mtd, pdata->parts, pdata->nr_parts);
return 0;
diff --git a/drivers/mtd/nand/sharpsl.c b/drivers/mtd/nand/sharpsl.c
index 54ec7542a7b7..19e24ed089ea 100644
--- a/drivers/mtd/nand/sharpsl.c
+++ b/drivers/mtd/nand/sharpsl.c
@@ -103,9 +103,7 @@ static int sharpsl_nand_calculate_ecc(struct mtd_info *mtd, const u_char * dat,
return readb(sharpsl->io + ECCCNTR) != 0;
}
-#ifdef CONFIG_MTD_PARTITIONS
static const char *part_probes[] = { "cmdlinepart", NULL };
-#endif
/*
* Main initialization routine
@@ -113,10 +111,8 @@ static const char *part_probes[] = { "cmdlinepart", NULL };
static int __devinit sharpsl_nand_probe(struct platform_device *pdev)
{
struct nand_chip *this;
-#ifdef CONFIG_MTD_PARTITIONS
struct mtd_partition *sharpsl_partition_info;
int nr_partitions;
-#endif
struct resource *r;
int err = 0;
struct sharpsl_nand *sharpsl;
@@ -188,18 +184,14 @@ static int __devinit sharpsl_nand_probe(struct platform_device *pdev)
/* Register the partitions */
sharpsl->mtd.name = "sharpsl-nand";
-#ifdef CONFIG_MTD_PARTITIONS
nr_partitions = parse_mtd_partitions(&sharpsl->mtd, part_probes, &sharpsl_partition_info, 0);
if (nr_partitions <= 0) {
nr_partitions = data->nr_partitions;
sharpsl_partition_info = data->partitions;
}
- if (nr_partitions > 0)
- err = add_mtd_partitions(&sharpsl->mtd, sharpsl_partition_info, nr_partitions);
- else
-#endif
- err = add_mtd_device(&sharpsl->mtd);
+ err = mtd_device_register(&sharpsl->mtd, sharpsl_partition_info,
+ nr_partitions);
if (err)
goto err_add;
diff --git a/drivers/mtd/nand/sm_common.c b/drivers/mtd/nand/sm_common.c
index 57cc80cd01a3..b6332e83b289 100644
--- a/drivers/mtd/nand/sm_common.c
+++ b/drivers/mtd/nand/sm_common.c
@@ -139,7 +139,7 @@ int sm_register_device(struct mtd_info *mtd, int smartmedia)
if (ret)
return ret;
- return add_mtd_device(mtd);
+ return mtd_device_register(mtd, NULL, 0);
}
EXPORT_SYMBOL_GPL(sm_register_device);
diff --git a/drivers/mtd/nand/socrates_nand.c b/drivers/mtd/nand/socrates_nand.c
index a853548986f0..ca2d0555729e 100644
--- a/drivers/mtd/nand/socrates_nand.c
+++ b/drivers/mtd/nand/socrates_nand.c
@@ -155,9 +155,7 @@ static int socrates_nand_device_ready(struct mtd_info *mtd)
return 1;
}
-#ifdef CONFIG_MTD_PARTITIONS
static const char *part_probes[] = { "cmdlinepart", NULL };
-#endif
/*
* Probe for the NAND device.
@@ -168,11 +166,8 @@ static int __devinit socrates_nand_probe(struct platform_device *ofdev)
struct mtd_info *mtd;
struct nand_chip *nand_chip;
int res;
-
-#ifdef CONFIG_MTD_PARTITIONS
struct mtd_partition *partitions = NULL;
int num_partitions = 0;
-#endif
/* Allocate memory for the device structure (and zero it) */
host = kzalloc(sizeof(struct socrates_nand_host), GFP_KERNEL);
@@ -230,7 +225,6 @@ static int __devinit socrates_nand_probe(struct platform_device *ofdev)
goto out;
}
-#ifdef CONFIG_MTD_PARTITIONS
#ifdef CONFIG_MTD_CMDLINE_PARTS
num_partitions = parse_mtd_partitions(mtd, part_probes,
&partitions, 0);
@@ -240,7 +234,6 @@ static int __devinit socrates_nand_probe(struct platform_device *ofdev)
}
#endif
-#ifdef CONFIG_MTD_OF_PARTS
if (num_partitions == 0) {
num_partitions = of_mtd_parse_partitions(&ofdev->dev,
ofdev->dev.of_node,
@@ -250,19 +243,12 @@ static int __devinit socrates_nand_probe(struct platform_device *ofdev)
goto release;
}
}
-#endif
- if (partitions && (num_partitions > 0))
- res = add_mtd_partitions(mtd, partitions, num_partitions);
- else
-#endif
- res = add_mtd_device(mtd);
+ res = mtd_device_register(mtd, partitions, num_partitions);
if (!res)
return res;
-#ifdef CONFIG_MTD_PARTITIONS
release:
-#endif
nand_release(mtd);
out:
diff --git a/drivers/mtd/nand/spia.c b/drivers/mtd/nand/spia.c
index 0cc6d0acb8fe..bef76cd7c24c 100644
--- a/drivers/mtd/nand/spia.c
+++ b/drivers/mtd/nand/spia.c
@@ -149,7 +149,7 @@ static int __init spia_init(void)
}
/* Register the partitions */
- add_mtd_partitions(spia_mtd, partition_info, NUM_PARTITIONS);
+ mtd_device_register(spia_mtd, partition_info, NUM_PARTITIONS);
/* Return happy */
return 0;
diff --git a/drivers/mtd/nand/tmio_nand.c b/drivers/mtd/nand/tmio_nand.c
index c004e474631b..11e8371b5683 100644
--- a/drivers/mtd/nand/tmio_nand.c
+++ b/drivers/mtd/nand/tmio_nand.c
@@ -381,10 +381,8 @@ static int tmio_probe(struct platform_device *dev)
struct tmio_nand *tmio;
struct mtd_info *mtd;
struct nand_chip *nand_chip;
-#ifdef CONFIG_MTD_PARTITIONS
struct mtd_partition *parts;
int nbparts = 0;
-#endif
int retval;
if (data == NULL)
@@ -463,7 +461,6 @@ static int tmio_probe(struct platform_device *dev)
goto err_scan;
}
/* Register the partitions */
-#ifdef CONFIG_MTD_PARTITIONS
#ifdef CONFIG_MTD_CMDLINE_PARTS
nbparts = parse_mtd_partitions(mtd, part_probes, &parts, 0);
#endif
@@ -472,12 +469,7 @@ static int tmio_probe(struct platform_device *dev)
nbparts = data->num_partitions;
}
- if (nbparts)
- retval = add_mtd_partitions(mtd, parts, nbparts);
- else
-#endif
- retval = add_mtd_device(mtd);
-
+ retval = mtd_device_register(mtd, parts, nbparts);
if (!retval)
return retval;
diff --git a/drivers/mtd/nand/txx9ndfmc.c b/drivers/mtd/nand/txx9ndfmc.c
index ca270a4881a4..bfba4e39a6c5 100644
--- a/drivers/mtd/nand/txx9ndfmc.c
+++ b/drivers/mtd/nand/txx9ndfmc.c
@@ -74,9 +74,7 @@ struct txx9ndfmc_drvdata {
unsigned char hold; /* in gbusclock */
unsigned char spw; /* in gbusclock */
struct nand_hw_control hw_control;
-#ifdef CONFIG_MTD_PARTITIONS
struct mtd_partition *parts[MAX_TXX9NDFMC_DEV];
-#endif
};
static struct platform_device *mtd_to_platdev(struct mtd_info *mtd)
@@ -289,9 +287,7 @@ static int txx9ndfmc_nand_scan(struct mtd_info *mtd)
static int __init txx9ndfmc_probe(struct platform_device *dev)
{
struct txx9ndfmc_platform_data *plat = dev->dev.platform_data;
-#ifdef CONFIG_MTD_PARTITIONS
static const char *probes[] = { "cmdlinepart", NULL };
-#endif
int hold, spw;
int i;
struct txx9ndfmc_drvdata *drvdata;
@@ -337,9 +333,7 @@ static int __init txx9ndfmc_probe(struct platform_device *dev)
struct txx9ndfmc_priv *txx9_priv;
struct nand_chip *chip;
struct mtd_info *mtd;
-#ifdef CONFIG_MTD_PARTITIONS
int nr_parts;
-#endif
if (!(plat->ch_mask & (1 << i)))
continue;
@@ -399,13 +393,9 @@ static int __init txx9ndfmc_probe(struct platform_device *dev)
}
mtd->name = txx9_priv->mtdname;
-#ifdef CONFIG_MTD_PARTITIONS
nr_parts = parse_mtd_partitions(mtd, probes,
&drvdata->parts[i], 0);
- if (nr_parts > 0)
- add_mtd_partitions(mtd, drvdata->parts[i], nr_parts);
-#endif
- add_mtd_device(mtd);
+ mtd_device_register(mtd, drvdata->parts[i], nr_parts);
drvdata->mtds[i] = mtd;
}
@@ -431,9 +421,7 @@ static int __exit txx9ndfmc_remove(struct platform_device *dev)
txx9_priv = chip->priv;
nand_release(mtd);
-#ifdef CONFIG_MTD_PARTITIONS
kfree(drvdata->parts[i]);
-#endif
kfree(txx9_priv->mtdname);
kfree(txx9_priv);
}
diff --git a/drivers/mtd/onenand/Kconfig b/drivers/mtd/onenand/Kconfig
index 4f426195f8db..772ad2966619 100644
--- a/drivers/mtd/onenand/Kconfig
+++ b/drivers/mtd/onenand/Kconfig
@@ -1,7 +1,6 @@
menuconfig MTD_ONENAND
tristate "OneNAND Device Support"
depends on MTD
- select MTD_PARTITIONS
help
This enables support for accessing all type of OneNAND flash
devices. For further information see
diff --git a/drivers/mtd/onenand/generic.c b/drivers/mtd/onenand/generic.c
index ac08750748a3..2d70d354d846 100644
--- a/drivers/mtd/onenand/generic.c
+++ b/drivers/mtd/onenand/generic.c
@@ -30,9 +30,7 @@
*/
#define DRIVER_NAME "onenand-flash"
-#ifdef CONFIG_MTD_PARTITIONS
static const char *part_probes[] = { "cmdlinepart", NULL, };
-#endif
struct onenand_info {
struct mtd_info mtd;
@@ -75,15 +73,13 @@ static int __devinit generic_onenand_probe(struct platform_device *pdev)
goto out_iounmap;
}
-#ifdef CONFIG_MTD_PARTITIONS
err = parse_mtd_partitions(&info->mtd, part_probes, &info->parts, 0);
if (err > 0)
- add_mtd_partitions(&info->mtd, info->parts, err);
+ mtd_device_register(&info->mtd, info->parts, err);
else if (err <= 0 && pdata && pdata->parts)
- add_mtd_partitions(&info->mtd, pdata->parts, pdata->nr_parts);
+ mtd_device_register(&info->mtd, pdata->parts, pdata->nr_parts);
else
-#endif
- err = add_mtd_device(&info->mtd);
+ err = mtd_device_register(&info->mtd, NULL, 0);
platform_set_drvdata(pdev, info);
@@ -108,11 +104,7 @@ static int __devexit generic_onenand_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
if (info) {
- if (info->parts)
- del_mtd_partitions(&info->mtd);
- else
- del_mtd_device(&info->mtd);
-
+ mtd_device_unregister(&info->mtd);
onenand_release(&info->mtd);
release_mem_region(res->start, size);
iounmap(info->onenand.base);
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index 1fcb41adab07..a916dec29215 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -67,9 +67,7 @@ struct omap2_onenand {
struct regulator *regulator;
};
-#ifdef CONFIG_MTD_PARTITIONS
static const char *part_probes[] = { "cmdlinepart", NULL, };
-#endif
static void omap2_onenand_dma_cb(int lch, u16 ch_status, void *data)
{
@@ -755,15 +753,13 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev)
if ((r = onenand_scan(&c->mtd, 1)) < 0)
goto err_release_regulator;
-#ifdef CONFIG_MTD_PARTITIONS
r = parse_mtd_partitions(&c->mtd, part_probes, &c->parts, 0);
if (r > 0)
- r = add_mtd_partitions(&c->mtd, c->parts, r);
+ r = mtd_device_register(&c->mtd, c->parts, r);
else if (pdata->parts != NULL)
- r = add_mtd_partitions(&c->mtd, pdata->parts, pdata->nr_parts);
+ r = mtd_device_register(&c->mtd, pdata->parts, pdata->nr_parts);
else
-#endif
- r = add_mtd_device(&c->mtd);
+ r = mtd_device_register(&c->mtd, NULL, 0);
if (r)
goto err_release_onenand;
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index 56a8b2005bda..ac9e959802a7 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -65,11 +65,11 @@ MODULE_PARM_DESC(otp, "Corresponding behaviour of OneNAND in OTP"
" : 2 -> 1st Block lock"
" : 3 -> BOTH OTP Block and 1st Block lock");
-/**
- * onenand_oob_128 - oob info for Flex-Onenand with 4KB page
- * For now, we expose only 64 out of 80 ecc bytes
+/*
+ * flexonenand_oob_128 - oob info for Flex-Onenand with 4KB page
+ * For now, we expose only 64 out of 80 ecc bytes
*/
-static struct nand_ecclayout onenand_oob_128 = {
+static struct nand_ecclayout flexonenand_oob_128 = {
.eccbytes = 64,
.eccpos = {
6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
@@ -86,6 +86,35 @@ static struct nand_ecclayout onenand_oob_128 = {
}
};
+/*
+ * onenand_oob_128 - oob info for OneNAND with 4KB page
+ *
+ * Based on specification:
+ * 4Gb M-die OneNAND Flash (KFM4G16Q4M, KFN8G16Q4M). Rev. 1.3, Apr. 2010
+ *
+ * For eccpos we expose only 64 bytes out of 72 (see struct nand_ecclayout)
+ *
+ * oobfree uses the spare area fields marked as
+ * "Managed by internal ECC logic for Logical Sector Number area"
+ */
+static struct nand_ecclayout onenand_oob_128 = {
+ .eccbytes = 64,
+ .eccpos = {
+ 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ 39, 40, 41, 42, 43, 44, 45, 46, 47,
+ 55, 56, 57, 58, 59, 60, 61, 62, 63,
+ 71, 72, 73, 74, 75, 76, 77, 78, 79,
+ 87, 88, 89, 90, 91, 92, 93, 94, 95,
+ 103, 104, 105, 106, 107, 108, 109, 110, 111,
+ 119
+ },
+ .oobfree = {
+ {2, 3}, {18, 3}, {34, 3}, {50, 3},
+ {66, 3}, {82, 3}, {98, 3}, {114, 3}
+ }
+};
+
/**
* onenand_oob_64 - oob info for large (2KB) page
*/
@@ -2424,7 +2453,7 @@ static int onenand_block_by_block_erase(struct mtd_info *mtd,
len -= block_size;
addr += block_size;
- if (addr == region_end) {
+ if (region && addr == region_end) {
if (!len)
break;
region++;
@@ -4018,8 +4047,13 @@ int onenand_scan(struct mtd_info *mtd, int maxchips)
*/
switch (mtd->oobsize) {
case 128:
- this->ecclayout = &onenand_oob_128;
- mtd->subpage_sft = 0;
+ if (FLEXONENAND(this)) {
+ this->ecclayout = &flexonenand_oob_128;
+ mtd->subpage_sft = 0;
+ } else {
+ this->ecclayout = &onenand_oob_128;
+ mtd->subpage_sft = 2;
+ }
break;
case 64:
this->ecclayout = &onenand_oob_64;
@@ -4108,12 +4142,8 @@ void onenand_release(struct mtd_info *mtd)
{
struct onenand_chip *this = mtd->priv;
-#ifdef CONFIG_MTD_PARTITIONS
/* Deregister partitions */
- del_mtd_partitions (mtd);
-#endif
- /* Deregister the device */
- del_mtd_device (mtd);
+ mtd_device_unregister(mtd);
/* Free bad block table memory, if allocated */
if (this->bbm) {
diff --git a/drivers/mtd/onenand/onenand_sim.c b/drivers/mtd/onenand/onenand_sim.c
index 5ef3bd547772..85399e3accda 100644
--- a/drivers/mtd/onenand/onenand_sim.c
+++ b/drivers/mtd/onenand/onenand_sim.c
@@ -539,7 +539,8 @@ static int __init onenand_sim_init(void)
return -ENXIO;
}
- add_mtd_partitions(&info->mtd, info->parts, ARRAY_SIZE(os_partitions));
+ mtd_device_register(&info->mtd, info->parts,
+ ARRAY_SIZE(os_partitions));
return 0;
}
diff --git a/drivers/mtd/onenand/samsung.c b/drivers/mtd/onenand/samsung.c
index a4c74a9ba430..3306b5b3c736 100644
--- a/drivers/mtd/onenand/samsung.c
+++ b/drivers/mtd/onenand/samsung.c
@@ -147,9 +147,7 @@ struct s3c_onenand {
struct resource *dma_res;
unsigned long phys_base;
struct completion complete;
-#ifdef CONFIG_MTD_PARTITIONS
struct mtd_partition *parts;
-#endif
};
#define CMD_MAP_00(dev, addr) (dev->cmd_map(MAP_00, ((addr) << 1)))
@@ -159,9 +157,7 @@ struct s3c_onenand {
static struct s3c_onenand *onenand;
-#ifdef CONFIG_MTD_PARTITIONS
static const char *part_probes[] = { "cmdlinepart", NULL, };
-#endif
static inline int s3c_read_reg(int offset)
{
@@ -1021,15 +1017,13 @@ static int s3c_onenand_probe(struct platform_device *pdev)
if (s3c_read_reg(MEM_CFG_OFFSET) & ONENAND_SYS_CFG1_SYNC_READ)
dev_info(&onenand->pdev->dev, "OneNAND Sync. Burst Read enabled\n");
-#ifdef CONFIG_MTD_PARTITIONS
err = parse_mtd_partitions(mtd, part_probes, &onenand->parts, 0);
if (err > 0)
- add_mtd_partitions(mtd, onenand->parts, err);
+ mtd_device_register(mtd, onenand->parts, err);
else if (err <= 0 && pdata && pdata->parts)
- add_mtd_partitions(mtd, pdata->parts, pdata->nr_parts);
+ mtd_device_register(mtd, pdata->parts, pdata->nr_parts);
else
-#endif
- err = add_mtd_device(mtd);
+ err = mtd_device_register(mtd, NULL, 0);
platform_set_drvdata(pdev, mtd);
diff --git a/drivers/mtd/ubi/gluebi.c b/drivers/mtd/ubi/gluebi.c
index 9aa81584c8a2..941bc3c05d6e 100644
--- a/drivers/mtd/ubi/gluebi.c
+++ b/drivers/mtd/ubi/gluebi.c
@@ -365,7 +365,7 @@ static int gluebi_create(struct ubi_device_info *di,
vi->vol_id);
mutex_unlock(&devices_mutex);
- if (add_mtd_device(mtd)) {
+ if (mtd_device_register(mtd, NULL, 0)) {
err_msg("cannot add MTD device");
kfree(mtd->name);
kfree(gluebi);
@@ -407,7 +407,7 @@ static int gluebi_remove(struct ubi_volume_info *vi)
return err;
mtd = &gluebi->mtd;
- err = del_mtd_device(mtd);
+ err = mtd_device_unregister(mtd);
if (err) {
err_msg("cannot remove fake MTD device %d, UBI device %d, "
"volume %d, error %d", mtd->index, gluebi->ubi_num,
@@ -524,7 +524,7 @@ static void __exit ubi_gluebi_exit(void)
int err;
struct mtd_info *mtd = &gluebi->mtd;
- err = del_mtd_device(mtd);
+ err = mtd_device_unregister(mtd);
if (err)
err_msg("error %d while removing gluebi MTD device %d, "
"UBI device %d, volume %d - ignoring", err,
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 6141667c5fb7..17b4dd94da90 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -113,9 +113,11 @@ MODULE_PARM_DESC(max_bonds, "Max number of bonded devices");
module_param(tx_queues, int, 0);
MODULE_PARM_DESC(tx_queues, "Max number of transmit queues (default = 16)");
module_param_named(num_grat_arp, num_peer_notif, int, 0644);
-MODULE_PARM_DESC(num_grat_arp, "Number of peer notifications to send on failover event (alias of num_unsol_na)");
+MODULE_PARM_DESC(num_grat_arp, "Number of peer notifications to send on "
+ "failover event (alias of num_unsol_na)");
module_param_named(num_unsol_na, num_peer_notif, int, 0644);
-MODULE_PARM_DESC(num_unsol_na, "Number of peer notifications to send on failover event (alias of num_grat_arp)");
+MODULE_PARM_DESC(num_unsol_na, "Number of peer notifications to send on "
+ "failover event (alias of num_grat_arp)");
module_param(miimon, int, 0);
MODULE_PARM_DESC(miimon, "Link check interval in milliseconds");
module_param(updelay, int, 0);
@@ -127,7 +129,7 @@ module_param(use_carrier, int, 0);
MODULE_PARM_DESC(use_carrier, "Use netif_carrier_ok (vs MII ioctls) in miimon; "
"0 for off, 1 for on (default)");
module_param(mode, charp, 0);
-MODULE_PARM_DESC(mode, "Mode of operation : 0 for balance-rr, "
+MODULE_PARM_DESC(mode, "Mode of operation; 0 for balance-rr, "
"1 for active-backup, 2 for balance-xor, "
"3 for broadcast, 4 for 802.3ad, 5 for balance-tlb, "
"6 for balance-alb");
@@ -142,27 +144,35 @@ MODULE_PARM_DESC(primary_reselect, "Reselect primary slave "
"2 for only on active slave "
"failure");
module_param(lacp_rate, charp, 0);
-MODULE_PARM_DESC(lacp_rate, "LACPDU tx rate to request from 802.3ad partner "
- "(slow/fast)");
+MODULE_PARM_DESC(lacp_rate, "LACPDU tx rate to request from 802.3ad partner; "
+ "0 for slow, 1 for fast");
module_param(ad_select, charp, 0);
-MODULE_PARM_DESC(ad_select, "803.ad aggregation selection logic: stable (0, default), bandwidth (1), count (2)");
+MODULE_PARM_DESC(ad_select, "803.ad aggregation selection logic; "
+ "0 for stable (default), 1 for bandwidth, "
+ "2 for count");
module_param(xmit_hash_policy, charp, 0);
-MODULE_PARM_DESC(xmit_hash_policy, "XOR hashing method: 0 for layer 2 (default)"
- ", 1 for layer 3+4");
+MODULE_PARM_DESC(xmit_hash_policy, "balance-xor and 802.3ad hashing method; "
+ "0 for layer 2 (default), 1 for layer 3+4, "
+ "2 for layer 2+3");
module_param(arp_interval, int, 0);
MODULE_PARM_DESC(arp_interval, "arp interval in milliseconds");
module_param_array(arp_ip_target, charp, NULL, 0);
MODULE_PARM_DESC(arp_ip_target, "arp targets in n.n.n.n form");
module_param(arp_validate, charp, 0);
-MODULE_PARM_DESC(arp_validate, "validate src/dst of ARP probes: none (default), active, backup or all");
+MODULE_PARM_DESC(arp_validate, "validate src/dst of ARP probes; "
+ "0 for none (default), 1 for active, "
+ "2 for backup, 3 for all");
module_param(fail_over_mac, charp, 0);
-MODULE_PARM_DESC(fail_over_mac, "For active-backup, do not set all slaves to the same MAC. none (default), active or follow");
+MODULE_PARM_DESC(fail_over_mac, "For active-backup, do not set all slaves to "
+ "the same MAC; 0 for none (default), "
+ "1 for active, 2 for follow");
module_param(all_slaves_active, int, 0);
MODULE_PARM_DESC(all_slaves_active, "Keep all frames received on an interface"
- "by setting active flag for all slaves. "
+ "by setting active flag for all slaves; "
"0 for never (default), 1 for always.");
module_param(resend_igmp, int, 0);
-MODULE_PARM_DESC(resend_igmp, "Number of IGMP membership reports to send on link failure");
+MODULE_PARM_DESC(resend_igmp, "Number of IGMP membership reports to send on "
+ "link failure");
/*----------------------------- Global variables ----------------------------*/
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c
index 807b6bb200eb..29a4f06fbfcf 100644
--- a/drivers/net/davinci_emac.c
+++ b/drivers/net/davinci_emac.c
@@ -1772,7 +1772,7 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
/* obtain emac clock from kernel */
emac_clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(emac_clk)) {
- printk(KERN_ERR "DaVinci EMAC: Failed to get EMAC clock\n");
+ dev_err(&pdev->dev, "failed to get EMAC clock\n");
return -EBUSY;
}
emac_bus_frequency = clk_get_rate(emac_clk);
@@ -1780,7 +1780,7 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
ndev = alloc_etherdev(sizeof(struct emac_priv));
if (!ndev) {
- printk(KERN_ERR "DaVinci EMAC: Error allocating net_device\n");
+ dev_err(&pdev->dev, "error allocating net_device\n");
clk_put(emac_clk);
return -ENOMEM;
}
@@ -1795,7 +1795,7 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
pdata = pdev->dev.platform_data;
if (!pdata) {
- printk(KERN_ERR "DaVinci EMAC: No platform data\n");
+ dev_err(&pdev->dev, "no platform data\n");
return -ENODEV;
}
@@ -1814,7 +1814,7 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
/* Get EMAC platform data */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
- dev_err(emac_dev, "DaVinci EMAC: Error getting res\n");
+ dev_err(&pdev->dev,"error getting res\n");
rc = -ENOENT;
goto probe_quit;
}
@@ -1822,14 +1822,14 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
priv->emac_base_phys = res->start + pdata->ctrl_reg_offset;
size = res->end - res->start + 1;
if (!request_mem_region(res->start, size, ndev->name)) {
- dev_err(emac_dev, "DaVinci EMAC: failed request_mem_region() for regs\n");
+ dev_err(&pdev->dev, "failed request_mem_region() for regs\n");
rc = -ENXIO;
goto probe_quit;
}
priv->remap_addr = ioremap(res->start, size);
if (!priv->remap_addr) {
- dev_err(emac_dev, "Unable to map IO\n");
+ dev_err(&pdev->dev, "unable to map IO\n");
rc = -ENOMEM;
release_mem_region(res->start, size);
goto probe_quit;
@@ -1863,7 +1863,7 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
priv->dma = cpdma_ctlr_create(&dma_params);
if (!priv->dma) {
- dev_err(emac_dev, "DaVinci EMAC: Error initializing DMA\n");
+ dev_err(&pdev->dev, "error initializing DMA\n");
rc = -ENOMEM;
goto no_dma;
}
@@ -1879,7 +1879,7 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!res) {
- dev_err(emac_dev, "DaVinci EMAC: Error getting irq res\n");
+ dev_err(&pdev->dev, "error getting irq res\n");
rc = -ENOENT;
goto no_irq_res;
}
@@ -1888,8 +1888,8 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
if (!is_valid_ether_addr(priv->mac_addr)) {
/* Use random MAC if none passed */
random_ether_addr(priv->mac_addr);
- printk(KERN_WARNING "%s: using random MAC addr: %pM\n",
- __func__, priv->mac_addr);
+ dev_warn(&pdev->dev, "using random MAC addr: %pM\n",
+ priv->mac_addr);
}
ndev->netdev_ops = &emac_netdev_ops;
@@ -1902,7 +1902,7 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
SET_NETDEV_DEV(ndev, &pdev->dev);
rc = register_netdev(ndev);
if (rc) {
- dev_err(emac_dev, "DaVinci EMAC: Error in register_netdev\n");
+ dev_err(&pdev->dev, "error in register_netdev\n");
rc = -ENODEV;
goto netdev_reg_err;
}
diff --git a/drivers/net/sfc/mtd.c b/drivers/net/sfc/mtd.c
index e646bfce2d84..b6304486f244 100644
--- a/drivers/net/sfc/mtd.c
+++ b/drivers/net/sfc/mtd.c
@@ -216,7 +216,7 @@ static void efx_mtd_remove_partition(struct efx_mtd_partition *part)
int rc;
for (;;) {
- rc = del_mtd_device(&part->mtd);
+ rc = mtd_device_unregister(&part->mtd);
if (rc != -EBUSY)
break;
ssleep(1);
@@ -268,7 +268,7 @@ static int efx_mtd_probe_device(struct efx_nic *efx, struct efx_mtd *efx_mtd)
part->mtd.write = efx_mtd->ops->write;
part->mtd.sync = efx_mtd_sync;
- if (add_mtd_device(&part->mtd))
+ if (mtd_device_register(&part->mtd, NULL, 0))
goto fail;
}
@@ -280,7 +280,7 @@ fail:
--part;
efx_mtd_remove_partition(part);
}
- /* add_mtd_device() returns 1 if the MTD table is full */
+ /* mtd_device_register() returns 1 if the MTD table is full */
return -ENOMEM;
}
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 8b63a691a9ed..65200af29c52 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -670,7 +670,7 @@ int __init early_init_dt_scan_chosen(unsigned long node, const char *uname,
pr_debug("search \"chosen\", depth: %d, uname: %s\n", depth, uname);
- if (depth != 1 ||
+ if (depth != 1 || !data ||
(strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0))
return 0;
@@ -679,16 +679,16 @@ int __init early_init_dt_scan_chosen(unsigned long node, const char *uname,
/* Retrieve command line */
p = of_get_flat_dt_prop(node, "bootargs", &l);
if (p != NULL && l > 0)
- strlcpy(cmd_line, p, min((int)l, COMMAND_LINE_SIZE));
+ strlcpy(data, p, min((int)l, COMMAND_LINE_SIZE));
#ifdef CONFIG_CMDLINE
#ifndef CONFIG_CMDLINE_FORCE
if (p == NULL || l == 0 || (l == 1 && (*p) == 0))
#endif
- strlcpy(cmd_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
+ strlcpy(data, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
#endif /* CONFIG_CMDLINE */
- pr_debug("Command line is: %s\n", cmd_line);
+ pr_debug("Command line is: %s\n", (char*)data);
/* break now */
return 1;
diff --git a/drivers/oprofile/event_buffer.h b/drivers/oprofile/event_buffer.h
index 4e70749f8d16..a8d5bb3cba89 100644
--- a/drivers/oprofile/event_buffer.h
+++ b/drivers/oprofile/event_buffer.h
@@ -11,7 +11,7 @@
#define EVENT_BUFFER_H
#include <linux/types.h>
-#include <asm/mutex.h>
+#include <linux/mutex.h>
int alloc_event_buffer(void);
diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
index f9bda64fcd1b..dccd8636095c 100644
--- a/drivers/oprofile/oprof.c
+++ b/drivers/oprofile/oprof.c
@@ -14,7 +14,7 @@
#include <linux/moduleparam.h>
#include <linux/workqueue.h>
#include <linux/time.h>
-#include <asm/mutex.h>
+#include <linux/mutex.h>
#include "oprof.h"
#include "event_buffer.h"
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index dc8c531ed276..e57b50b38565 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -210,6 +210,15 @@ config CHARGER_ISP1704
Say Y to enable support for USB Charger Detection with
ISP1707/ISP1704 USB transceivers.
+config CHARGER_MAX8903
+ tristate "MAX8903 Battery DC-DC Charger for USB and Adapter Power"
+ depends on GENERIC_HARDIRQS
+ help
+ Say Y to enable support for the MAX8903 DC-DC charger and sysfs.
+ The driver supports controlling charger-enable and current-limit
+ pins based on the status of charger connections with interrupt
+ handlers.
+
config CHARGER_TWL4030
tristate "OMAP TWL4030 BCI charger driver"
depends on TWL4030_CORE
diff --git a/drivers/power/Makefile b/drivers/power/Makefile
index 8224990b933d..009a90fa8ac9 100644
--- a/drivers/power/Makefile
+++ b/drivers/power/Makefile
@@ -33,5 +33,6 @@ obj-$(CONFIG_CHARGER_PCF50633) += pcf50633-charger.o
obj-$(CONFIG_BATTERY_JZ4740) += jz4740-battery.o
obj-$(CONFIG_BATTERY_INTEL_MID) += intel_mid_battery.o
obj-$(CONFIG_CHARGER_ISP1704) += isp1704_charger.o
+obj-$(CONFIG_CHARGER_MAX8903) += max8903_charger.o
obj-$(CONFIG_CHARGER_TWL4030) += twl4030_charger.o
obj-$(CONFIG_CHARGER_GPIO) += gpio-charger.o
diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
index 59e68dbd028b..bb16f5b7e167 100644
--- a/drivers/power/bq27x00_battery.c
+++ b/drivers/power/bq27x00_battery.c
@@ -4,6 +4,7 @@
* Copyright (C) 2008 Rodolfo Giometti <giometti@linux.it>
* Copyright (C) 2008 Eurotech S.p.A. <info@eurotech.it>
* Copyright (C) 2010-2011 Lars-Peter Clausen <lars@metafoo.de>
+ * Copyright (C) 2011 Pali Rohár <pali.rohar@gmail.com>
*
* Based on a previous work by Copyright (C) 2008 Texas Instruments, Inc.
*
@@ -76,7 +77,7 @@ struct bq27x00_reg_cache {
int time_to_empty_avg;
int time_to_full;
int charge_full;
- int charge_counter;
+ int cycle_count;
int capacity;
int flags;
@@ -115,7 +116,7 @@ static enum power_supply_property bq27x00_battery_props[] = {
POWER_SUPPLY_PROP_CHARGE_FULL,
POWER_SUPPLY_PROP_CHARGE_NOW,
POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
- POWER_SUPPLY_PROP_CHARGE_COUNTER,
+ POWER_SUPPLY_PROP_CYCLE_COUNT,
POWER_SUPPLY_PROP_ENERGY_NOW,
};
@@ -267,7 +268,7 @@ static void bq27x00_update(struct bq27x00_device_info *di)
cache.time_to_empty_avg = bq27x00_battery_read_time(di, BQ27x00_REG_TTECP);
cache.time_to_full = bq27x00_battery_read_time(di, BQ27x00_REG_TTF);
cache.charge_full = bq27x00_battery_read_lmd(di);
- cache.charge_counter = bq27x00_battery_read_cyct(di);
+ cache.cycle_count = bq27x00_battery_read_cyct(di);
if (!is_bq27500)
cache.current_now = bq27x00_read(di, BQ27x00_REG_AI, false);
@@ -496,8 +497,8 @@ static int bq27x00_battery_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
ret = bq27x00_simple_value(di->charge_design_full, val);
break;
- case POWER_SUPPLY_PROP_CHARGE_COUNTER:
- ret = bq27x00_simple_value(di->cache.charge_counter, val);
+ case POWER_SUPPLY_PROP_CYCLE_COUNT:
+ ret = bq27x00_simple_value(di->cache.cycle_count, val);
break;
case POWER_SUPPLY_PROP_ENERGY_NOW:
ret = bq27x00_battery_energy(di, val);
diff --git a/drivers/power/ds2760_battery.c b/drivers/power/ds2760_battery.c
index e534290f3256..f2c9cc33c0f9 100644
--- a/drivers/power/ds2760_battery.c
+++ b/drivers/power/ds2760_battery.c
@@ -86,7 +86,11 @@ static int rated_capacities[] = {
920, /* NEC */
1440, /* Samsung */
1440, /* BYD */
+#ifdef CONFIG_MACH_H4700
+ 1800, /* HP iPAQ hx4700 3.7V 1800mAh (359113-001) */
+#else
1440, /* Lishen */
+#endif
1440, /* NEC */
2880, /* Samsung */
2880, /* BYD */
@@ -186,7 +190,7 @@ static int ds2760_battery_read_status(struct ds2760_device_info *di)
scale[0] = di->full_active_uAh;
for (i = 1; i < 5; i++)
- scale[i] = scale[i - 1] + di->raw[DS2760_ACTIVE_FULL + 2 + i];
+ scale[i] = scale[i - 1] + di->raw[DS2760_ACTIVE_FULL + 1 + i];
di->full_active_uAh = battery_interpolate(scale, di->temp_C / 10);
di->full_active_uAh *= 1000; /* convert to µAh */
diff --git a/drivers/power/gpio-charger.c b/drivers/power/gpio-charger.c
index 25b88ac1d44c..718f2c537827 100644
--- a/drivers/power/gpio-charger.c
+++ b/drivers/power/gpio-charger.c
@@ -161,12 +161,27 @@ static int __devexit gpio_charger_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int gpio_charger_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct gpio_charger *gpio_charger = platform_get_drvdata(pdev);
+
+ power_supply_changed(&gpio_charger->charger);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(gpio_charger_pm_ops, NULL, gpio_charger_resume);
+
static struct platform_driver gpio_charger_driver = {
.probe = gpio_charger_probe,
.remove = __devexit_p(gpio_charger_remove),
.driver = {
.name = "gpio-charger",
.owner = THIS_MODULE,
+ .pm = &gpio_charger_pm_ops,
},
};
diff --git a/drivers/power/isp1704_charger.c b/drivers/power/isp1704_charger.c
index 2ad9b14a5ce3..f6d72b402a8e 100644
--- a/drivers/power/isp1704_charger.c
+++ b/drivers/power/isp1704_charger.c
@@ -33,6 +33,7 @@
#include <linux/usb/ulpi.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
+#include <linux/power/isp1704_charger.h>
/* Vendor specific Power Control register */
#define ISP1704_PWR_CTRL 0x3d
@@ -71,6 +72,18 @@ struct isp1704_charger {
};
/*
+ * Disable/enable the power from the isp1704 if a function for it
+ * has been provided with platform data.
+ */
+static void isp1704_charger_set_power(struct isp1704_charger *isp, bool on)
+{
+ struct isp1704_charger_data *board = isp->dev->platform_data;
+
+ if (board->set_power)
+ board->set_power(on);
+}
+
+/*
* Determine is the charging port DCP (dedicated charger) or CDP (Host/HUB
* chargers).
*
@@ -222,6 +235,9 @@ static void isp1704_charger_work(struct work_struct *data)
mutex_lock(&lock);
+ if (event != USB_EVENT_NONE)
+ isp1704_charger_set_power(isp, 1);
+
switch (event) {
case USB_EVENT_VBUS:
isp->online = true;
@@ -269,6 +285,8 @@ static void isp1704_charger_work(struct work_struct *data)
*/
if (isp->otg->gadget)
usb_gadget_disconnect(isp->otg->gadget);
+
+ isp1704_charger_set_power(isp, 0);
break;
case USB_EVENT_ENUMERATED:
if (isp->present)
@@ -394,6 +412,8 @@ static int __devinit isp1704_charger_probe(struct platform_device *pdev)
isp->dev = &pdev->dev;
platform_set_drvdata(pdev, isp);
+ isp1704_charger_set_power(isp, 1);
+
ret = isp1704_test_ulpi(isp);
if (ret < 0)
goto fail1;
@@ -434,6 +454,7 @@ static int __devinit isp1704_charger_probe(struct platform_device *pdev)
/* Detect charger if VBUS is valid (the cable was already plugged). */
ret = otg_io_read(isp->otg, ULPI_USB_INT_STS);
+ isp1704_charger_set_power(isp, 0);
if ((ret & ULPI_INT_VBUS_VALID) && !isp->otg->default_a) {
isp->event = USB_EVENT_VBUS;
schedule_work(&isp->work);
@@ -459,6 +480,7 @@ static int __devexit isp1704_charger_remove(struct platform_device *pdev)
otg_unregister_notifier(isp->otg, &isp->nb);
power_supply_unregister(&isp->psy);
otg_put_transceiver(isp->otg);
+ isp1704_charger_set_power(isp, 0);
kfree(isp);
return 0;
diff --git a/drivers/power/max8903_charger.c b/drivers/power/max8903_charger.c
new file mode 100644
index 000000000000..33ff0e37809e
--- /dev/null
+++ b/drivers/power/max8903_charger.c
@@ -0,0 +1,391 @@
+/*
+ * max8903_charger.c - Maxim 8903 USB/Adapter Charger Driver
+ *
+ * Copyright (C) 2011 Samsung Electronics
+ * MyungJoo Ham <myungjoo.ham@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/power_supply.h>
+#include <linux/platform_device.h>
+#include <linux/power/max8903_charger.h>
+
+struct max8903_data {
+ struct max8903_pdata *pdata;
+ struct device *dev;
+ struct power_supply psy;
+ bool fault;
+ bool usb_in;
+ bool ta_in;
+};
+
+static enum power_supply_property max8903_charger_props[] = {
+ POWER_SUPPLY_PROP_STATUS, /* Charger status output */
+ POWER_SUPPLY_PROP_ONLINE, /* External power source */
+ POWER_SUPPLY_PROP_HEALTH, /* Fault or OK */
+};
+
+static int max8903_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct max8903_data *data = container_of(psy,
+ struct max8903_data, psy);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ val->intval = POWER_SUPPLY_STATUS_UNKNOWN;
+ if (data->pdata->chg) {
+ if (gpio_get_value(data->pdata->chg) == 0)
+ val->intval = POWER_SUPPLY_STATUS_CHARGING;
+ else if (data->usb_in || data->ta_in)
+ val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ else
+ val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
+ }
+ break;
+ case POWER_SUPPLY_PROP_ONLINE:
+ val->intval = 0;
+ if (data->usb_in || data->ta_in)
+ val->intval = 1;
+ break;
+ case POWER_SUPPLY_PROP_HEALTH:
+ val->intval = POWER_SUPPLY_HEALTH_GOOD;
+ if (data->fault)
+ val->intval = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static irqreturn_t max8903_dcin(int irq, void *_data)
+{
+ struct max8903_data *data = _data;
+ struct max8903_pdata *pdata = data->pdata;
+ bool ta_in;
+ enum power_supply_type old_type;
+
+ ta_in = gpio_get_value(pdata->dok) ? false : true;
+
+ if (ta_in == data->ta_in)
+ return IRQ_HANDLED;
+
+ data->ta_in = ta_in;
+
+ /* Set Current-Limit-Mode 1:DC 0:USB */
+ if (pdata->dcm)
+ gpio_set_value(pdata->dcm, ta_in ? 1 : 0);
+
+ /* Charger Enable / Disable (cen is negated) */
+ if (pdata->cen)
+ gpio_set_value(pdata->cen, ta_in ? 0 :
+ (data->usb_in ? 0 : 1));
+
+ dev_dbg(data->dev, "TA(DC-IN) Charger %s.\n", ta_in ?
+ "Connected" : "Disconnected");
+
+ old_type = data->psy.type;
+
+ if (data->ta_in)
+ data->psy.type = POWER_SUPPLY_TYPE_MAINS;
+ else if (data->usb_in)
+ data->psy.type = POWER_SUPPLY_TYPE_USB;
+ else
+ data->psy.type = POWER_SUPPLY_TYPE_BATTERY;
+
+ if (old_type != data->psy.type)
+ power_supply_changed(&data->psy);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t max8903_usbin(int irq, void *_data)
+{
+ struct max8903_data *data = _data;
+ struct max8903_pdata *pdata = data->pdata;
+ bool usb_in;
+ enum power_supply_type old_type;
+
+ usb_in = gpio_get_value(pdata->uok) ? false : true;
+
+ if (usb_in == data->usb_in)
+ return IRQ_HANDLED;
+
+ data->usb_in = usb_in;
+
+ /* Do not touch Current-Limit-Mode */
+
+ /* Charger Enable / Disable (cen is negated) */
+ if (pdata->cen)
+ gpio_set_value(pdata->cen, usb_in ? 0 :
+ (data->ta_in ? 0 : 1));
+
+ dev_dbg(data->dev, "USB Charger %s.\n", usb_in ?
+ "Connected" : "Disconnected");
+
+ old_type = data->psy.type;
+
+ if (data->ta_in)
+ data->psy.type = POWER_SUPPLY_TYPE_MAINS;
+ else if (data->usb_in)
+ data->psy.type = POWER_SUPPLY_TYPE_USB;
+ else
+ data->psy.type = POWER_SUPPLY_TYPE_BATTERY;
+
+ if (old_type != data->psy.type)
+ power_supply_changed(&data->psy);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t max8903_fault(int irq, void *_data)
+{
+ struct max8903_data *data = _data;
+ struct max8903_pdata *pdata = data->pdata;
+ bool fault;
+
+ fault = gpio_get_value(pdata->flt) ? false : true;
+
+ if (fault == data->fault)
+ return IRQ_HANDLED;
+
+ data->fault = fault;
+
+ if (fault)
+ dev_err(data->dev, "Charger suffers a fault and stops.\n");
+ else
+ dev_err(data->dev, "Charger recovered from a fault.\n");
+
+ return IRQ_HANDLED;
+}
+
+static __devinit int max8903_probe(struct platform_device *pdev)
+{
+ struct max8903_data *data;
+ struct device *dev = &pdev->dev;
+ struct max8903_pdata *pdata = pdev->dev.platform_data;
+ int ret = 0;
+ int gpio;
+ int ta_in = 0;
+ int usb_in = 0;
+
+ data = kzalloc(sizeof(struct max8903_data), GFP_KERNEL);
+ if (data == NULL) {
+ dev_err(dev, "Cannot allocate memory.\n");
+ return -ENOMEM;
+ }
+ data->pdata = pdata;
+ data->dev = dev;
+ platform_set_drvdata(pdev, data);
+
+ if (pdata->dc_valid == false && pdata->usb_valid == false) {
+ dev_err(dev, "No valid power sources.\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (pdata->dc_valid) {
+ if (pdata->dok && gpio_is_valid(pdata->dok) &&
+ pdata->dcm && gpio_is_valid(pdata->dcm)) {
+ gpio = pdata->dok; /* PULL_UPed Interrupt */
+ ta_in = gpio_get_value(gpio) ? 0 : 1;
+
+ gpio = pdata->dcm; /* Output */
+ gpio_set_value(gpio, ta_in);
+ } else {
+ dev_err(dev, "When DC is wired, DOK and DCM should"
+ " be wired as well.\n");
+ ret = -EINVAL;
+ goto err;
+ }
+ } else {
+ if (pdata->dcm) {
+ if (gpio_is_valid(pdata->dcm))
+ gpio_set_value(pdata->dcm, 0);
+ else {
+ dev_err(dev, "Invalid pin: dcm.\n");
+ ret = -EINVAL;
+ goto err;
+ }
+ }
+ }
+
+ if (pdata->usb_valid) {
+ if (pdata->uok && gpio_is_valid(pdata->uok)) {
+ gpio = pdata->uok;
+ usb_in = gpio_get_value(gpio) ? 0 : 1;
+ } else {
+ dev_err(dev, "When USB is wired, UOK should be wired."
+ "as well.\n");
+ ret = -EINVAL;
+ goto err;
+ }
+ }
+
+ if (pdata->cen) {
+ if (gpio_is_valid(pdata->cen)) {
+ gpio_set_value(pdata->cen, (ta_in || usb_in) ? 0 : 1);
+ } else {
+ dev_err(dev, "Invalid pin: cen.\n");
+ ret = -EINVAL;
+ goto err;
+ }
+ }
+
+ if (pdata->chg) {
+ if (!gpio_is_valid(pdata->chg)) {
+ dev_err(dev, "Invalid pin: chg.\n");
+ ret = -EINVAL;
+ goto err;
+ }
+ }
+
+ if (pdata->flt) {
+ if (!gpio_is_valid(pdata->flt)) {
+ dev_err(dev, "Invalid pin: flt.\n");
+ ret = -EINVAL;
+ goto err;
+ }
+ }
+
+ if (pdata->usus) {
+ if (!gpio_is_valid(pdata->usus)) {
+ dev_err(dev, "Invalid pin: usus.\n");
+ ret = -EINVAL;
+ goto err;
+ }
+ }
+
+ data->fault = false;
+ data->ta_in = ta_in;
+ data->usb_in = usb_in;
+
+ data->psy.name = "max8903_charger";
+ data->psy.type = (ta_in) ? POWER_SUPPLY_TYPE_MAINS :
+ ((usb_in) ? POWER_SUPPLY_TYPE_USB :
+ POWER_SUPPLY_TYPE_BATTERY);
+ data->psy.get_property = max8903_get_property;
+ data->psy.properties = max8903_charger_props;
+ data->psy.num_properties = ARRAY_SIZE(max8903_charger_props);
+
+ ret = power_supply_register(dev, &data->psy);
+ if (ret) {
+ dev_err(dev, "failed: power supply register.\n");
+ goto err;
+ }
+
+ if (pdata->dc_valid) {
+ ret = request_threaded_irq(gpio_to_irq(pdata->dok),
+ NULL, max8903_dcin,
+ IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
+ "MAX8903 DC IN", data);
+ if (ret) {
+ dev_err(dev, "Cannot request irq %d for DC (%d)\n",
+ gpio_to_irq(pdata->dok), ret);
+ goto err_psy;
+ }
+ }
+
+ if (pdata->usb_valid) {
+ ret = request_threaded_irq(gpio_to_irq(pdata->uok),
+ NULL, max8903_usbin,
+ IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
+ "MAX8903 USB IN", data);
+ if (ret) {
+ dev_err(dev, "Cannot request irq %d for USB (%d)\n",
+ gpio_to_irq(pdata->uok), ret);
+ goto err_dc_irq;
+ }
+ }
+
+ if (pdata->flt) {
+ ret = request_threaded_irq(gpio_to_irq(pdata->flt),
+ NULL, max8903_fault,
+ IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
+ "MAX8903 Fault", data);
+ if (ret) {
+ dev_err(dev, "Cannot request irq %d for Fault (%d)\n",
+ gpio_to_irq(pdata->flt), ret);
+ goto err_usb_irq;
+ }
+ }
+
+ return 0;
+
+err_usb_irq:
+ if (pdata->usb_valid)
+ free_irq(gpio_to_irq(pdata->uok), data);
+err_dc_irq:
+ if (pdata->dc_valid)
+ free_irq(gpio_to_irq(pdata->dok), data);
+err_psy:
+ power_supply_unregister(&data->psy);
+err:
+ kfree(data);
+ return ret;
+}
+
+static __devexit int max8903_remove(struct platform_device *pdev)
+{
+ struct max8903_data *data = platform_get_drvdata(pdev);
+
+ if (data) {
+ struct max8903_pdata *pdata = data->pdata;
+
+ if (pdata->flt)
+ free_irq(gpio_to_irq(pdata->flt), data);
+ if (pdata->usb_valid)
+ free_irq(gpio_to_irq(pdata->uok), data);
+ if (pdata->dc_valid)
+ free_irq(gpio_to_irq(pdata->dok), data);
+ power_supply_unregister(&data->psy);
+ kfree(data);
+ }
+
+ return 0;
+}
+
+static struct platform_driver max8903_driver = {
+ .probe = max8903_probe,
+ .remove = __devexit_p(max8903_remove),
+ .driver = {
+ .name = "max8903-charger",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init max8903_init(void)
+{
+ return platform_driver_register(&max8903_driver);
+}
+module_init(max8903_init);
+
+static void __exit max8903_exit(void)
+{
+ platform_driver_unregister(&max8903_driver);
+}
+module_exit(max8903_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("MAX8903 Charger Driver");
+MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
+MODULE_ALIAS("max8903-charger");
diff --git a/drivers/power/test_power.c b/drivers/power/test_power.c
index 0cd9f67d33e5..b527c93bf2f3 100644
--- a/drivers/power/test_power.c
+++ b/drivers/power/test_power.c
@@ -3,6 +3,12 @@
*
* Copyright 2010 Anton Vorontsov <cbouatmailru@gmail.com>
*
+ * Dynamic module parameter code from the Virtual Battery Driver
+ * Copyright (C) 2008 Pylone, Inc.
+ * By: Masashi YOKOTA <yokota@pylone.jp>
+ * Originally found here:
+ * http://downloads.pylone.jp/src/virtual_battery/virtual_battery-0.0.1.tar.bz2
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
@@ -15,8 +21,12 @@
#include <linux/delay.h>
#include <linux/vermagic.h>
-static int test_power_ac_online = 1;
-static int test_power_battery_status = POWER_SUPPLY_STATUS_CHARGING;
+static int ac_online = 1;
+static int battery_status = POWER_SUPPLY_STATUS_DISCHARGING;
+static int battery_health = POWER_SUPPLY_HEALTH_GOOD;
+static int battery_present = 1; /* true */
+static int battery_technology = POWER_SUPPLY_TECHNOLOGY_LION;
+static int battery_capacity = 50;
static int test_power_get_ac_property(struct power_supply *psy,
enum power_supply_property psp,
@@ -24,7 +34,7 @@ static int test_power_get_ac_property(struct power_supply *psy,
{
switch (psp) {
case POWER_SUPPLY_PROP_ONLINE:
- val->intval = test_power_ac_online;
+ val->intval = ac_online;
break;
default:
return -EINVAL;
@@ -47,22 +57,30 @@ static int test_power_get_battery_property(struct power_supply *psy,
val->strval = UTS_RELEASE;
break;
case POWER_SUPPLY_PROP_STATUS:
- val->intval = test_power_battery_status;
+ val->intval = battery_status;
break;
case POWER_SUPPLY_PROP_CHARGE_TYPE:
val->intval = POWER_SUPPLY_CHARGE_TYPE_FAST;
break;
case POWER_SUPPLY_PROP_HEALTH:
- val->intval = POWER_SUPPLY_HEALTH_GOOD;
+ val->intval = battery_health;
+ break;
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = battery_present;
break;
case POWER_SUPPLY_PROP_TECHNOLOGY:
- val->intval = POWER_SUPPLY_TECHNOLOGY_LION;
+ val->intval = battery_technology;
break;
case POWER_SUPPLY_PROP_CAPACITY_LEVEL:
val->intval = POWER_SUPPLY_CAPACITY_LEVEL_NORMAL;
break;
case POWER_SUPPLY_PROP_CAPACITY:
- val->intval = 50;
+ case POWER_SUPPLY_PROP_CHARGE_NOW:
+ val->intval = battery_capacity;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
+ case POWER_SUPPLY_PROP_CHARGE_FULL:
+ val->intval = 100;
break;
case POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG:
case POWER_SUPPLY_PROP_TIME_TO_FULL_NOW:
@@ -84,9 +102,11 @@ static enum power_supply_property test_power_battery_props[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_CHARGE_TYPE,
POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_PRESENT,
POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
POWER_SUPPLY_PROP_CHARGE_FULL,
- POWER_SUPPLY_PROP_CHARGE_EMPTY,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
POWER_SUPPLY_PROP_CAPACITY,
POWER_SUPPLY_PROP_CAPACITY_LEVEL,
POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG,
@@ -118,6 +138,7 @@ static struct power_supply test_power_supplies[] = {
},
};
+
static int __init test_power_init(void)
{
int i;
@@ -145,8 +166,8 @@ static void __exit test_power_exit(void)
int i;
/* Let's see how we handle changes... */
- test_power_ac_online = 0;
- test_power_battery_status = POWER_SUPPLY_STATUS_DISCHARGING;
+ ac_online = 0;
+ battery_status = POWER_SUPPLY_STATUS_DISCHARGING;
for (i = 0; i < ARRAY_SIZE(test_power_supplies); i++)
power_supply_changed(&test_power_supplies[i]);
pr_info("%s: 'changed' event sent, sleeping for 10 seconds...\n",
@@ -158,6 +179,241 @@ static void __exit test_power_exit(void)
}
module_exit(test_power_exit);
+
+
+#define MAX_KEYLENGTH 256
+struct battery_property_map {
+ int value;
+ char const *key;
+};
+
+static struct battery_property_map map_ac_online[] = {
+ { 0, "on" },
+ { 1, "off" },
+ { -1, NULL },
+};
+
+static struct battery_property_map map_status[] = {
+ { POWER_SUPPLY_STATUS_CHARGING, "charging" },
+ { POWER_SUPPLY_STATUS_DISCHARGING, "discharging" },
+ { POWER_SUPPLY_STATUS_NOT_CHARGING, "not-charging" },
+ { POWER_SUPPLY_STATUS_FULL, "full" },
+ { -1, NULL },
+};
+
+static struct battery_property_map map_health[] = {
+ { POWER_SUPPLY_HEALTH_GOOD, "good" },
+ { POWER_SUPPLY_HEALTH_OVERHEAT, "overheat" },
+ { POWER_SUPPLY_HEALTH_DEAD, "dead" },
+ { POWER_SUPPLY_HEALTH_OVERVOLTAGE, "overvoltage" },
+ { POWER_SUPPLY_HEALTH_UNSPEC_FAILURE, "failure" },
+ { -1, NULL },
+};
+
+static struct battery_property_map map_present[] = {
+ { 0, "false" },
+ { 1, "true" },
+ { -1, NULL },
+};
+
+static struct battery_property_map map_technology[] = {
+ { POWER_SUPPLY_TECHNOLOGY_NiMH, "NiMH" },
+ { POWER_SUPPLY_TECHNOLOGY_LION, "LION" },
+ { POWER_SUPPLY_TECHNOLOGY_LIPO, "LIPO" },
+ { POWER_SUPPLY_TECHNOLOGY_LiFe, "LiFe" },
+ { POWER_SUPPLY_TECHNOLOGY_NiCd, "NiCd" },
+ { POWER_SUPPLY_TECHNOLOGY_LiMn, "LiMn" },
+ { -1, NULL },
+};
+
+
+static int map_get_value(struct battery_property_map *map, const char *key,
+ int def_val)
+{
+ char buf[MAX_KEYLENGTH];
+ int cr;
+
+ strncpy(buf, key, MAX_KEYLENGTH);
+ buf[MAX_KEYLENGTH-1] = '\0';
+
+ cr = strnlen(buf, MAX_KEYLENGTH) - 1;
+ if (buf[cr] == '\n')
+ buf[cr] = '\0';
+
+ while (map->key) {
+ if (strncasecmp(map->key, buf, MAX_KEYLENGTH) == 0)
+ return map->value;
+ map++;
+ }
+
+ return def_val;
+}
+
+
+static const char *map_get_key(struct battery_property_map *map, int value,
+ const char *def_key)
+{
+ while (map->key) {
+ if (map->value == value)
+ return map->key;
+ map++;
+ }
+
+ return def_key;
+}
+
+static int param_set_ac_online(const char *key, const struct kernel_param *kp)
+{
+ ac_online = map_get_value(map_ac_online, key, ac_online);
+ power_supply_changed(&test_power_supplies[0]);
+ return 0;
+}
+
+static int param_get_ac_online(char *buffer, const struct kernel_param *kp)
+{
+ strcpy(buffer, map_get_key(map_ac_online, ac_online, "unknown"));
+ return strlen(buffer);
+}
+
+static int param_set_battery_status(const char *key,
+ const struct kernel_param *kp)
+{
+ battery_status = map_get_value(map_status, key, battery_status);
+ power_supply_changed(&test_power_supplies[1]);
+ return 0;
+}
+
+static int param_get_battery_status(char *buffer, const struct kernel_param *kp)
+{
+ strcpy(buffer, map_get_key(map_status, battery_status, "unknown"));
+ return strlen(buffer);
+}
+
+static int param_set_battery_health(const char *key,
+ const struct kernel_param *kp)
+{
+ battery_health = map_get_value(map_health, key, battery_health);
+ power_supply_changed(&test_power_supplies[1]);
+ return 0;
+}
+
+static int param_get_battery_health(char *buffer, const struct kernel_param *kp)
+{
+ strcpy(buffer, map_get_key(map_health, battery_health, "unknown"));
+ return strlen(buffer);
+}
+
+static int param_set_battery_present(const char *key,
+ const struct kernel_param *kp)
+{
+ battery_present = map_get_value(map_present, key, battery_present);
+ power_supply_changed(&test_power_supplies[0]);
+ return 0;
+}
+
+static int param_get_battery_present(char *buffer,
+ const struct kernel_param *kp)
+{
+ strcpy(buffer, map_get_key(map_present, battery_present, "unknown"));
+ return strlen(buffer);
+}
+
+static int param_set_battery_technology(const char *key,
+ const struct kernel_param *kp)
+{
+ battery_technology = map_get_value(map_technology, key,
+ battery_technology);
+ power_supply_changed(&test_power_supplies[1]);
+ return 0;
+}
+
+static int param_get_battery_technology(char *buffer,
+ const struct kernel_param *kp)
+{
+ strcpy(buffer,
+ map_get_key(map_technology, battery_technology, "unknown"));
+ return strlen(buffer);
+}
+
+static int param_set_battery_capacity(const char *key,
+ const struct kernel_param *kp)
+{
+ int tmp;
+
+ if (1 != sscanf(key, "%d", &tmp))
+ return -EINVAL;
+
+ battery_capacity = tmp;
+ power_supply_changed(&test_power_supplies[1]);
+ return 0;
+}
+
+#define param_get_battery_capacity param_get_int
+
+
+
+static struct kernel_param_ops param_ops_ac_online = {
+ .set = param_set_ac_online,
+ .get = param_get_ac_online,
+};
+
+static struct kernel_param_ops param_ops_battery_status = {
+ .set = param_set_battery_status,
+ .get = param_get_battery_status,
+};
+
+static struct kernel_param_ops param_ops_battery_present = {
+ .set = param_set_battery_present,
+ .get = param_get_battery_present,
+};
+
+static struct kernel_param_ops param_ops_battery_technology = {
+ .set = param_set_battery_technology,
+ .get = param_get_battery_technology,
+};
+
+static struct kernel_param_ops param_ops_battery_health = {
+ .set = param_set_battery_health,
+ .get = param_get_battery_health,
+};
+
+static struct kernel_param_ops param_ops_battery_capacity = {
+ .set = param_set_battery_capacity,
+ .get = param_get_battery_capacity,
+};
+
+
+#define param_check_ac_online(name, p) __param_check(name, p, void);
+#define param_check_battery_status(name, p) __param_check(name, p, void);
+#define param_check_battery_present(name, p) __param_check(name, p, void);
+#define param_check_battery_technology(name, p) __param_check(name, p, void);
+#define param_check_battery_health(name, p) __param_check(name, p, void);
+#define param_check_battery_capacity(name, p) __param_check(name, p, void);
+
+
+module_param(ac_online, ac_online, 0644);
+MODULE_PARM_DESC(ac_online, "AC charging state <on|off>");
+
+module_param(battery_status, battery_status, 0644);
+MODULE_PARM_DESC(battery_status,
+ "battery status <charging|discharging|not-charging|full>");
+
+module_param(battery_present, battery_present, 0644);
+MODULE_PARM_DESC(battery_present,
+ "battery presence state <good|overheat|dead|overvoltage|failure>");
+
+module_param(battery_technology, battery_technology, 0644);
+MODULE_PARM_DESC(battery_technology,
+ "battery technology <NiMH|LION|LIPO|LiFe|NiCd|LiMn>");
+
+module_param(battery_health, battery_health, 0644);
+MODULE_PARM_DESC(battery_health,
+ "battery health state <good|overheat|dead|overvoltage|failure>");
+
+module_param(battery_capacity, battery_capacity, 0644);
+MODULE_PARM_DESC(battery_capacity, "battery capacity (percentage)");
+
+
MODULE_DESCRIPTION("Power supply driver for testing");
MODULE_AUTHOR("Anton Vorontsov <cbouatmailru@gmail.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/power/z2_battery.c b/drivers/power/z2_battery.c
index e5ced3a4c1ed..d119c38b3ff6 100644
--- a/drivers/power/z2_battery.c
+++ b/drivers/power/z2_battery.c
@@ -271,24 +271,33 @@ static int __devexit z2_batt_remove(struct i2c_client *client)
}
#ifdef CONFIG_PM
-static int z2_batt_suspend(struct i2c_client *client, pm_message_t state)
+static int z2_batt_suspend(struct device *dev)
{
+ struct i2c_client *client = to_i2c_client(dev);
struct z2_charger *charger = i2c_get_clientdata(client);
flush_work_sync(&charger->bat_work);
return 0;
}
-static int z2_batt_resume(struct i2c_client *client)
+static int z2_batt_resume(struct device *dev)
{
+ struct i2c_client *client = to_i2c_client(dev);
struct z2_charger *charger = i2c_get_clientdata(client);
schedule_work(&charger->bat_work);
return 0;
}
+
+static const struct dev_pm_ops z2_battery_pm_ops = {
+ .suspend = z2_batt_suspend,
+ .resume = z2_batt_resume,
+};
+
+#define Z2_BATTERY_PM_OPS (&z2_battery_pm_ops)
+
#else
-#define z2_batt_suspend NULL
-#define z2_batt_resume NULL
+#define Z2_BATTERY_PM_OPS (NULL)
#endif
static const struct i2c_device_id z2_batt_id[] = {
@@ -301,11 +310,10 @@ static struct i2c_driver z2_batt_driver = {
.driver = {
.name = "z2-battery",
.owner = THIS_MODULE,
+ .pm = Z2_BATTERY_PM_OPS
},
.probe = z2_batt_probe,
.remove = z2_batt_remove,
- .suspend = z2_batt_suspend,
- .resume = z2_batt_resume,
.id_table = z2_batt_id,
};
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index f0b13a0d1851..d7ed20f293d7 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -297,5 +297,11 @@ config REGULATOR_TPS6524X
serial interface currently supported on the sequencer serial
port controller.
+config REGULATOR_TPS65910
+ tristate "TI TPS65910 Power Regulator"
+ depends on MFD_TPS65910
+ help
+ This driver supports TPS65910 voltage regulator chips.
+
endif
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 165ff5371e9e..3932d2ec38f3 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -42,5 +42,6 @@ obj-$(CONFIG_REGULATOR_88PM8607) += 88pm8607.o
obj-$(CONFIG_REGULATOR_ISL6271A) += isl6271a-regulator.o
obj-$(CONFIG_REGULATOR_AB8500) += ab8500.o
obj-$(CONFIG_REGULATOR_DB8500_PRCMU) += db8500-prcmu.o
+obj-$(CONFIG_REGULATOR_TPS65910) += tps65910-regulator.o
ccflags-$(CONFIG_REGULATOR_DEBUG) += -DDEBUG
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 0fae51c4845a..d3e38790906e 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -158,6 +158,13 @@ static int regulator_check_consumers(struct regulator_dev *rdev,
struct regulator *regulator;
list_for_each_entry(regulator, &rdev->consumer_list, list) {
+ /*
+ * Assume consumers that didn't say anything are OK
+ * with anything in the constraint range.
+ */
+ if (!regulator->min_uV && !regulator->max_uV)
+ continue;
+
if (*max_uV > regulator->max_uV)
*max_uV = regulator->max_uV;
if (*min_uV < regulator->min_uV)
@@ -197,9 +204,9 @@ static int regulator_check_current_limit(struct regulator_dev *rdev,
}
/* operating mode constraint check */
-static int regulator_check_mode(struct regulator_dev *rdev, int mode)
+static int regulator_mode_constrain(struct regulator_dev *rdev, int *mode)
{
- switch (mode) {
+ switch (*mode) {
case REGULATOR_MODE_FAST:
case REGULATOR_MODE_NORMAL:
case REGULATOR_MODE_IDLE:
@@ -217,11 +224,17 @@ static int regulator_check_mode(struct regulator_dev *rdev, int mode)
rdev_err(rdev, "operation not allowed\n");
return -EPERM;
}
- if (!(rdev->constraints->valid_modes_mask & mode)) {
- rdev_err(rdev, "invalid mode %x\n", mode);
- return -EINVAL;
+
+ /* The modes are bitmasks, the most power hungry modes having
+ * the lowest values. If the requested mode isn't supported
+ * try higher modes. */
+ while (*mode) {
+ if (rdev->constraints->valid_modes_mask & *mode)
+ return 0;
+ *mode /= 2;
}
- return 0;
+
+ return -EINVAL;
}
/* dynamic regulator mode switching constraint check */
@@ -612,7 +625,7 @@ static void drms_uA_update(struct regulator_dev *rdev)
output_uV, current_uA);
/* check the new mode is allowed */
- err = regulator_check_mode(rdev, mode);
+ err = regulator_mode_constrain(rdev, &mode);
if (err == 0)
rdev->desc->ops->set_mode(rdev, mode);
}
@@ -718,6 +731,10 @@ static void print_constraints(struct regulator_dev *rdev)
count += sprintf(buf + count, "at %d mV ", ret / 1000);
}
+ if (constraints->uV_offset)
+ count += sprintf(buf, "%dmV offset ",
+ constraints->uV_offset / 1000);
+
if (constraints->min_uA && constraints->max_uA) {
if (constraints->min_uA == constraints->max_uA)
count += sprintf(buf + count, "%d mA ",
@@ -1498,13 +1515,14 @@ static int _regulator_force_disable(struct regulator_dev *rdev,
*/
int regulator_force_disable(struct regulator *regulator)
{
+ struct regulator_dev *rdev = regulator->rdev;
struct regulator_dev *supply_rdev = NULL;
int ret;
- mutex_lock(&regulator->rdev->mutex);
+ mutex_lock(&rdev->mutex);
regulator->uA_load = 0;
- ret = _regulator_force_disable(regulator->rdev, &supply_rdev);
- mutex_unlock(&regulator->rdev->mutex);
+ ret = _regulator_force_disable(rdev, &supply_rdev);
+ mutex_unlock(&rdev->mutex);
if (supply_rdev)
regulator_disable(get_device_regulator(rdev_get_dev(supply_rdev)));
@@ -1634,6 +1652,9 @@ static int _regulator_do_set_voltage(struct regulator_dev *rdev,
trace_regulator_set_voltage(rdev_get_name(rdev), min_uV, max_uV);
+ min_uV += rdev->constraints->uV_offset;
+ max_uV += rdev->constraints->uV_offset;
+
if (rdev->desc->ops->set_voltage) {
ret = rdev->desc->ops->set_voltage(rdev, min_uV, max_uV,
&selector);
@@ -1858,18 +1879,22 @@ EXPORT_SYMBOL_GPL(regulator_sync_voltage);
static int _regulator_get_voltage(struct regulator_dev *rdev)
{
- int sel;
+ int sel, ret;
if (rdev->desc->ops->get_voltage_sel) {
sel = rdev->desc->ops->get_voltage_sel(rdev);
if (sel < 0)
return sel;
- return rdev->desc->ops->list_voltage(rdev, sel);
- }
- if (rdev->desc->ops->get_voltage)
- return rdev->desc->ops->get_voltage(rdev);
- else
+ ret = rdev->desc->ops->list_voltage(rdev, sel);
+ } else if (rdev->desc->ops->get_voltage) {
+ ret = rdev->desc->ops->get_voltage(rdev);
+ } else {
return -EINVAL;
+ }
+
+ if (ret < 0)
+ return ret;
+ return ret - rdev->constraints->uV_offset;
}
/**
@@ -2005,7 +2030,7 @@ int regulator_set_mode(struct regulator *regulator, unsigned int mode)
}
/* constraints check */
- ret = regulator_check_mode(rdev, mode);
+ ret = regulator_mode_constrain(rdev, &mode);
if (ret < 0)
goto out;
@@ -2081,16 +2106,26 @@ int regulator_set_optimum_mode(struct regulator *regulator, int uA_load)
mutex_lock(&rdev->mutex);
+ /*
+ * first check to see if we can set modes at all, otherwise just
+ * tell the consumer everything is OK.
+ */
regulator->uA_load = uA_load;
ret = regulator_check_drms(rdev);
- if (ret < 0)
+ if (ret < 0) {
+ ret = 0;
goto out;
- ret = -EINVAL;
+ }
- /* sanity check */
if (!rdev->desc->ops->get_optimum_mode)
goto out;
+ /*
+ * we can actually do this so any errors are indicators of
+ * potential real failure.
+ */
+ ret = -EINVAL;
+
/* get output voltage */
output_uV = _regulator_get_voltage(rdev);
if (output_uV <= 0) {
@@ -2116,7 +2151,7 @@ int regulator_set_optimum_mode(struct regulator *regulator, int uA_load)
mode = rdev->desc->ops->get_optimum_mode(rdev,
input_uV, output_uV,
total_uA_load);
- ret = regulator_check_mode(rdev, mode);
+ ret = regulator_mode_constrain(rdev, &mode);
if (ret < 0) {
rdev_err(rdev, "failed to get optimum mode @ %d uA %d -> %d uV\n",
total_uA_load, input_uV, output_uV);
@@ -2589,14 +2624,6 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
if (ret < 0)
goto scrub;
- /* set supply regulator if it exists */
- if (init_data->supply_regulator && init_data->supply_regulator_dev) {
- dev_err(dev,
- "Supply regulator specified by both name and dev\n");
- ret = -EINVAL;
- goto scrub;
- }
-
if (init_data->supply_regulator) {
struct regulator_dev *r;
int found = 0;
@@ -2621,14 +2648,6 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
goto scrub;
}
- if (init_data->supply_regulator_dev) {
- dev_warn(dev, "Uses supply_regulator_dev instead of regulator_supply\n");
- ret = set_supply(rdev,
- dev_get_drvdata(init_data->supply_regulator_dev));
- if (ret < 0)
- goto scrub;
- }
-
/* add consumers devices */
for (i = 0; i < init_data->num_consumer_supplies; i++) {
ret = set_consumer_device_supply(rdev,
diff --git a/drivers/regulator/max8997.c b/drivers/regulator/max8997.c
index 77e0cfb30b23..10d5a1d9768e 100644
--- a/drivers/regulator/max8997.c
+++ b/drivers/regulator/max8997.c
@@ -267,7 +267,6 @@ static int max8997_get_enable_register(struct regulator_dev *rdev,
default:
/* Not controllable or not exists */
return -EINVAL;
- break;
}
return 0;
@@ -1033,11 +1032,11 @@ static __devinit int max8997_pmic_probe(struct platform_device *pdev)
/* For the safety, set max voltage before setting up */
for (i = 0; i < 8; i++) {
- max8997_update_reg(i2c, MAX8997_REG_BUCK1DVS(i + 1),
+ max8997_update_reg(i2c, MAX8997_REG_BUCK1DVS1 + i,
max_buck1, 0x3f);
- max8997_update_reg(i2c, MAX8997_REG_BUCK2DVS(i + 1),
+ max8997_update_reg(i2c, MAX8997_REG_BUCK2DVS1 + i,
max_buck2, 0x3f);
- max8997_update_reg(i2c, MAX8997_REG_BUCK5DVS(i + 1),
+ max8997_update_reg(i2c, MAX8997_REG_BUCK5DVS1 + i,
max_buck5, 0x3f);
}
@@ -1114,13 +1113,13 @@ static __devinit int max8997_pmic_probe(struct platform_device *pdev)
/* Initialize all the DVS related BUCK registers */
for (i = 0; i < 8; i++) {
- max8997_update_reg(i2c, MAX8997_REG_BUCK1DVS(i + 1),
+ max8997_update_reg(i2c, MAX8997_REG_BUCK1DVS1 + i,
max8997->buck1_vol[i],
0x3f);
- max8997_update_reg(i2c, MAX8997_REG_BUCK2DVS(i + 1),
+ max8997_update_reg(i2c, MAX8997_REG_BUCK2DVS1 + i,
max8997->buck2_vol[i],
0x3f);
- max8997_update_reg(i2c, MAX8997_REG_BUCK5DVS(i + 1),
+ max8997_update_reg(i2c, MAX8997_REG_BUCK5DVS1 + i,
max8997->buck5_vol[i],
0x3f);
}
diff --git a/drivers/regulator/max8998.c b/drivers/regulator/max8998.c
index f57e9c42fdb4..41a1495eec2b 100644
--- a/drivers/regulator/max8998.c
+++ b/drivers/regulator/max8998.c
@@ -732,13 +732,15 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
if (!pdata->buck1_set1) {
printk(KERN_ERR "MAX8998 SET1 GPIO defined as 0 !\n");
WARN_ON(!pdata->buck1_set1);
- return -EIO;
+ ret = -EIO;
+ goto err_free_mem;
}
/* Check if SET2 is not equal to 0 */
if (!pdata->buck1_set2) {
printk(KERN_ERR "MAX8998 SET2 GPIO defined as 0 !\n");
WARN_ON(!pdata->buck1_set2);
- return -EIO;
+ ret = -EIO;
+ goto err_free_mem;
}
gpio_request(pdata->buck1_set1, "MAX8998 BUCK1_SET1");
@@ -758,7 +760,7 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
max8998->buck1_vol[0] = i;
ret = max8998_write_reg(i2c, MAX8998_REG_BUCK1_VOLTAGE1, i);
if (ret)
- return ret;
+ goto err_free_mem;
/* Set predefined value for BUCK1 register 2 */
i = 0;
@@ -770,7 +772,7 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
max8998->buck1_vol[1] = i;
ret = max8998_write_reg(i2c, MAX8998_REG_BUCK1_VOLTAGE2, i);
if (ret)
- return ret;
+ goto err_free_mem;
/* Set predefined value for BUCK1 register 3 */
i = 0;
@@ -782,7 +784,7 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
max8998->buck1_vol[2] = i;
ret = max8998_write_reg(i2c, MAX8998_REG_BUCK1_VOLTAGE3, i);
if (ret)
- return ret;
+ goto err_free_mem;
/* Set predefined value for BUCK1 register 4 */
i = 0;
@@ -794,7 +796,7 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
max8998->buck1_vol[3] = i;
ret = max8998_write_reg(i2c, MAX8998_REG_BUCK1_VOLTAGE4, i);
if (ret)
- return ret;
+ goto err_free_mem;
}
@@ -803,7 +805,8 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
if (!pdata->buck2_set3) {
printk(KERN_ERR "MAX8998 SET3 GPIO defined as 0 !\n");
WARN_ON(!pdata->buck2_set3);
- return -EIO;
+ ret = -EIO;
+ goto err_free_mem;
}
gpio_request(pdata->buck2_set3, "MAX8998 BUCK2_SET3");
gpio_direction_output(pdata->buck2_set3,
@@ -818,7 +821,7 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
max8998->buck2_vol[0] = i;
ret = max8998_write_reg(i2c, MAX8998_REG_BUCK2_VOLTAGE1, i);
if (ret)
- return ret;
+ goto err_free_mem;
/* BUCK2 register 2 */
i = 0;
@@ -830,7 +833,7 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
max8998->buck2_vol[1] = i;
ret = max8998_write_reg(i2c, MAX8998_REG_BUCK2_VOLTAGE2, i);
if (ret)
- return ret;
+ goto err_free_mem;
}
for (i = 0; i < pdata->num_regulators; i++) {
@@ -860,6 +863,7 @@ err:
if (rdev[i])
regulator_unregister(rdev[i]);
+err_free_mem:
kfree(max8998->rdev);
kfree(max8998);
diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
index 1b8f7398a4a8..3285d41842f2 100644
--- a/drivers/regulator/mc13892-regulator.c
+++ b/drivers/regulator/mc13892-regulator.c
@@ -431,7 +431,8 @@ static int mc13892_sw_regulator_set_voltage(struct regulator_dev *rdev,
int min_uV, int max_uV, unsigned *selector)
{
struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
- int hi, value, val, mask, id = rdev_get_id(rdev);
+ int hi, value, mask, id = rdev_get_id(rdev);
+ u32 valread;
int ret;
dev_dbg(rdev_get_dev(rdev), "%s id: %d min_uV: %d max_uV: %d\n",
@@ -447,15 +448,16 @@ static int mc13892_sw_regulator_set_voltage(struct regulator_dev *rdev,
mc13xxx_lock(priv->mc13xxx);
ret = mc13xxx_reg_read(priv->mc13xxx,
- mc13892_regulators[id].vsel_reg, &val);
+ mc13892_regulators[id].vsel_reg, &valread);
if (ret)
goto err;
- hi = val & MC13892_SWITCHERS0_SWxHI;
- if (value > 1375)
+ if (value > 1375000)
hi = 1;
- if (value < 1100)
+ else if (value < 1100000)
hi = 0;
+ else
+ hi = valread & MC13892_SWITCHERS0_SWxHI;
if (hi) {
value = (value - 1100000) / 25000;
@@ -464,8 +466,10 @@ static int mc13892_sw_regulator_set_voltage(struct regulator_dev *rdev,
value = (value - 600000) / 25000;
mask = mc13892_regulators[id].vsel_mask | MC13892_SWITCHERS0_SWxHI;
- ret = mc13xxx_reg_rmw(priv->mc13xxx, mc13892_regulators[id].vsel_reg,
- mask, value << mc13892_regulators[id].vsel_shift);
+ valread = (valread & ~mask) |
+ (value << mc13892_regulators[id].vsel_shift);
+ ret = mc13xxx_reg_write(priv->mc13xxx, mc13892_regulators[id].vsel_reg,
+ valread);
err:
mc13xxx_unlock(priv->mc13xxx);
diff --git a/drivers/regulator/mc13xxx-regulator-core.c b/drivers/regulator/mc13xxx-regulator-core.c
index 2bb5de1f2421..bc27ab136378 100644
--- a/drivers/regulator/mc13xxx-regulator-core.c
+++ b/drivers/regulator/mc13xxx-regulator-core.c
@@ -174,7 +174,7 @@ static int mc13xxx_regulator_get_voltage(struct regulator_dev *rdev)
dev_dbg(rdev_get_dev(rdev), "%s id: %d val: %d\n", __func__, id, val);
- BUG_ON(val > mc13xxx_regulators[id].desc.n_voltages);
+ BUG_ON(val >= mc13xxx_regulators[id].desc.n_voltages);
return mc13xxx_regulators[id].voltages[val];
}
diff --git a/drivers/regulator/tps6105x-regulator.c b/drivers/regulator/tps6105x-regulator.c
index a4d7f4540c18..1011873896dc 100644
--- a/drivers/regulator/tps6105x-regulator.c
+++ b/drivers/regulator/tps6105x-regulator.c
@@ -158,6 +158,7 @@ static int __devinit tps6105x_regulator_probe(struct platform_device *pdev)
"failed to register regulator\n");
return ret;
}
+ platform_set_drvdata(pdev, tps6105x);
return 0;
}
diff --git a/drivers/regulator/tps65023-regulator.c b/drivers/regulator/tps65023-regulator.c
index 60a7ca5409e9..fbddc15e1811 100644
--- a/drivers/regulator/tps65023-regulator.c
+++ b/drivers/regulator/tps65023-regulator.c
@@ -466,7 +466,6 @@ static struct regulator_ops tps65023_ldo_ops = {
static int __devinit tps_65023_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- static int desc_id;
const struct tps_info *info = (void *)id->driver_data;
struct regulator_init_data *init_data;
struct regulator_dev *rdev;
@@ -499,7 +498,7 @@ static int __devinit tps_65023_probe(struct i2c_client *client,
tps->info[i] = info;
tps->desc[i].name = info->name;
- tps->desc[i].id = desc_id++;
+ tps->desc[i].id = i;
tps->desc[i].n_voltages = num_voltages[i];
tps->desc[i].ops = (i > TPS65023_DCDC_3 ?
&tps65023_ldo_ops : &tps65023_dcdc_ops);
diff --git a/drivers/regulator/tps6507x-regulator.c b/drivers/regulator/tps6507x-regulator.c
index 064755290599..bfffabc21eda 100644
--- a/drivers/regulator/tps6507x-regulator.c
+++ b/drivers/regulator/tps6507x-regulator.c
@@ -553,7 +553,6 @@ static __devinit
int tps6507x_pmic_probe(struct platform_device *pdev)
{
struct tps6507x_dev *tps6507x_dev = dev_get_drvdata(pdev->dev.parent);
- static int desc_id;
struct tps_info *info = &tps6507x_pmic_regs[0];
struct regulator_init_data *init_data;
struct regulator_dev *rdev;
@@ -598,7 +597,7 @@ int tps6507x_pmic_probe(struct platform_device *pdev)
}
tps->desc[i].name = info->name;
- tps->desc[i].id = desc_id++;
+ tps->desc[i].id = i;
tps->desc[i].n_voltages = num_voltages[i];
tps->desc[i].ops = (i > TPS6507X_DCDC_3 ?
&tps6507x_pmic_ldo_ops : &tps6507x_pmic_dcdc_ops);
diff --git a/drivers/regulator/tps65910-regulator.c b/drivers/regulator/tps65910-regulator.c
new file mode 100644
index 000000000000..55dd4e6650db
--- /dev/null
+++ b/drivers/regulator/tps65910-regulator.c
@@ -0,0 +1,993 @@
+/*
+ * tps65910.c -- TI tps65910
+ *
+ * Copyright 2010 Texas Instruments Inc.
+ *
+ * Author: Graeme Gregory <gg@slimlogic.co.uk>
+ * Author: Jorge Eduardo Candelaria <jedu@slimlogic.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/mfd/tps65910.h>
+
+#define TPS65910_REG_VRTC 0
+#define TPS65910_REG_VIO 1
+#define TPS65910_REG_VDD1 2
+#define TPS65910_REG_VDD2 3
+#define TPS65910_REG_VDD3 4
+#define TPS65910_REG_VDIG1 5
+#define TPS65910_REG_VDIG2 6
+#define TPS65910_REG_VPLL 7
+#define TPS65910_REG_VDAC 8
+#define TPS65910_REG_VAUX1 9
+#define TPS65910_REG_VAUX2 10
+#define TPS65910_REG_VAUX33 11
+#define TPS65910_REG_VMMC 12
+
+#define TPS65911_REG_VDDCTRL 4
+#define TPS65911_REG_LDO1 5
+#define TPS65911_REG_LDO2 6
+#define TPS65911_REG_LDO3 7
+#define TPS65911_REG_LDO4 8
+#define TPS65911_REG_LDO5 9
+#define TPS65911_REG_LDO6 10
+#define TPS65911_REG_LDO7 11
+#define TPS65911_REG_LDO8 12
+
+#define TPS65910_NUM_REGULATOR 13
+#define TPS65910_SUPPLY_STATE_ENABLED 0x1
+
+/* supported VIO voltages in milivolts */
+static const u16 VIO_VSEL_table[] = {
+ 1500, 1800, 2500, 3300,
+};
+
+/* VSEL tables for TPS65910 specific LDOs and dcdc's */
+
+/* supported VDD3 voltages in milivolts */
+static const u16 VDD3_VSEL_table[] = {
+ 5000,
+};
+
+/* supported VDIG1 voltages in milivolts */
+static const u16 VDIG1_VSEL_table[] = {
+ 1200, 1500, 1800, 2700,
+};
+
+/* supported VDIG2 voltages in milivolts */
+static const u16 VDIG2_VSEL_table[] = {
+ 1000, 1100, 1200, 1800,
+};
+
+/* supported VPLL voltages in milivolts */
+static const u16 VPLL_VSEL_table[] = {
+ 1000, 1100, 1800, 2500,
+};
+
+/* supported VDAC voltages in milivolts */
+static const u16 VDAC_VSEL_table[] = {
+ 1800, 2600, 2800, 2850,
+};
+
+/* supported VAUX1 voltages in milivolts */
+static const u16 VAUX1_VSEL_table[] = {
+ 1800, 2500, 2800, 2850,
+};
+
+/* supported VAUX2 voltages in milivolts */
+static const u16 VAUX2_VSEL_table[] = {
+ 1800, 2800, 2900, 3300,
+};
+
+/* supported VAUX33 voltages in milivolts */
+static const u16 VAUX33_VSEL_table[] = {
+ 1800, 2000, 2800, 3300,
+};
+
+/* supported VMMC voltages in milivolts */
+static const u16 VMMC_VSEL_table[] = {
+ 1800, 2800, 3000, 3300,
+};
+
+struct tps_info {
+ const char *name;
+ unsigned min_uV;
+ unsigned max_uV;
+ u8 table_len;
+ const u16 *table;
+};
+
+static struct tps_info tps65910_regs[] = {
+ {
+ .name = "VRTC",
+ },
+ {
+ .name = "VIO",
+ .min_uV = 1500000,
+ .max_uV = 3300000,
+ .table_len = ARRAY_SIZE(VIO_VSEL_table),
+ .table = VIO_VSEL_table,
+ },
+ {
+ .name = "VDD1",
+ .min_uV = 600000,
+ .max_uV = 4500000,
+ },
+ {
+ .name = "VDD2",
+ .min_uV = 600000,
+ .max_uV = 4500000,
+ },
+ {
+ .name = "VDD3",
+ .min_uV = 5000000,
+ .max_uV = 5000000,
+ .table_len = ARRAY_SIZE(VDD3_VSEL_table),
+ .table = VDD3_VSEL_table,
+ },
+ {
+ .name = "VDIG1",
+ .min_uV = 1200000,
+ .max_uV = 2700000,
+ .table_len = ARRAY_SIZE(VDIG1_VSEL_table),
+ .table = VDIG1_VSEL_table,
+ },
+ {
+ .name = "VDIG2",
+ .min_uV = 1000000,
+ .max_uV = 1800000,
+ .table_len = ARRAY_SIZE(VDIG2_VSEL_table),
+ .table = VDIG2_VSEL_table,
+ },
+ {
+ .name = "VPLL",
+ .min_uV = 1000000,
+ .max_uV = 2500000,
+ .table_len = ARRAY_SIZE(VPLL_VSEL_table),
+ .table = VPLL_VSEL_table,
+ },
+ {
+ .name = "VDAC",
+ .min_uV = 1800000,
+ .max_uV = 2850000,
+ .table_len = ARRAY_SIZE(VDAC_VSEL_table),
+ .table = VDAC_VSEL_table,
+ },
+ {
+ .name = "VAUX1",
+ .min_uV = 1800000,
+ .max_uV = 2850000,
+ .table_len = ARRAY_SIZE(VAUX1_VSEL_table),
+ .table = VAUX1_VSEL_table,
+ },
+ {
+ .name = "VAUX2",
+ .min_uV = 1800000,
+ .max_uV = 3300000,
+ .table_len = ARRAY_SIZE(VAUX2_VSEL_table),
+ .table = VAUX2_VSEL_table,
+ },
+ {
+ .name = "VAUX33",
+ .min_uV = 1800000,
+ .max_uV = 3300000,
+ .table_len = ARRAY_SIZE(VAUX33_VSEL_table),
+ .table = VAUX33_VSEL_table,
+ },
+ {
+ .name = "VMMC",
+ .min_uV = 1800000,
+ .max_uV = 3300000,
+ .table_len = ARRAY_SIZE(VMMC_VSEL_table),
+ .table = VMMC_VSEL_table,
+ },
+};
+
+static struct tps_info tps65911_regs[] = {
+ {
+ .name = "VIO",
+ .min_uV = 1500000,
+ .max_uV = 3300000,
+ .table_len = ARRAY_SIZE(VIO_VSEL_table),
+ .table = VIO_VSEL_table,
+ },
+ {
+ .name = "VDD1",
+ .min_uV = 600000,
+ .max_uV = 4500000,
+ },
+ {
+ .name = "VDD2",
+ .min_uV = 600000,
+ .max_uV = 4500000,
+ },
+ {
+ .name = "VDDCTRL",
+ .min_uV = 600000,
+ .max_uV = 1400000,
+ },
+ {
+ .name = "LDO1",
+ .min_uV = 1000000,
+ .max_uV = 3300000,
+ },
+ {
+ .name = "LDO2",
+ .min_uV = 1000000,
+ .max_uV = 3300000,
+ },
+ {
+ .name = "LDO3",
+ .min_uV = 1000000,
+ .max_uV = 3300000,
+ },
+ {
+ .name = "LDO4",
+ .min_uV = 1000000,
+ .max_uV = 3300000,
+ },
+ {
+ .name = "LDO5",
+ .min_uV = 1000000,
+ .max_uV = 3300000,
+ },
+ {
+ .name = "LDO6",
+ .min_uV = 1000000,
+ .max_uV = 3300000,
+ },
+ {
+ .name = "LDO7",
+ .min_uV = 1000000,
+ .max_uV = 3300000,
+ },
+ {
+ .name = "LDO8",
+ .min_uV = 1000000,
+ .max_uV = 3300000,
+ },
+};
+
+struct tps65910_reg {
+ struct regulator_desc desc[TPS65910_NUM_REGULATOR];
+ struct tps65910 *mfd;
+ struct regulator_dev *rdev[TPS65910_NUM_REGULATOR];
+ struct tps_info *info[TPS65910_NUM_REGULATOR];
+ struct mutex mutex;
+ int mode;
+ int (*get_ctrl_reg)(int);
+};
+
+static inline int tps65910_read(struct tps65910_reg *pmic, u8 reg)
+{
+ u8 val;
+ int err;
+
+ err = pmic->mfd->read(pmic->mfd, reg, 1, &val);
+ if (err)
+ return err;
+
+ return val;
+}
+
+static inline int tps65910_write(struct tps65910_reg *pmic, u8 reg, u8 val)
+{
+ return pmic->mfd->write(pmic->mfd, reg, 1, &val);
+}
+
+static int tps65910_modify_bits(struct tps65910_reg *pmic, u8 reg,
+ u8 set_mask, u8 clear_mask)
+{
+ int err, data;
+
+ mutex_lock(&pmic->mutex);
+
+ data = tps65910_read(pmic, reg);
+ if (data < 0) {
+ dev_err(pmic->mfd->dev, "Read from reg 0x%x failed\n", reg);
+ err = data;
+ goto out;
+ }
+
+ data &= ~clear_mask;
+ data |= set_mask;
+ err = tps65910_write(pmic, reg, data);
+ if (err)
+ dev_err(pmic->mfd->dev, "Write for reg 0x%x failed\n", reg);
+
+out:
+ mutex_unlock(&pmic->mutex);
+ return err;
+}
+
+static int tps65910_reg_read(struct tps65910_reg *pmic, u8 reg)
+{
+ int data;
+
+ mutex_lock(&pmic->mutex);
+
+ data = tps65910_read(pmic, reg);
+ if (data < 0)
+ dev_err(pmic->mfd->dev, "Read from reg 0x%x failed\n", reg);
+
+ mutex_unlock(&pmic->mutex);
+ return data;
+}
+
+static int tps65910_reg_write(struct tps65910_reg *pmic, u8 reg, u8 val)
+{
+ int err;
+
+ mutex_lock(&pmic->mutex);
+
+ err = tps65910_write(pmic, reg, val);
+ if (err < 0)
+ dev_err(pmic->mfd->dev, "Write for reg 0x%x failed\n", reg);
+
+ mutex_unlock(&pmic->mutex);
+ return err;
+}
+
+static int tps65910_get_ctrl_register(int id)
+{
+ switch (id) {
+ case TPS65910_REG_VRTC:
+ return TPS65910_VRTC;
+ case TPS65910_REG_VIO:
+ return TPS65910_VIO;
+ case TPS65910_REG_VDD1:
+ return TPS65910_VDD1;
+ case TPS65910_REG_VDD2:
+ return TPS65910_VDD2;
+ case TPS65910_REG_VDD3:
+ return TPS65910_VDD3;
+ case TPS65910_REG_VDIG1:
+ return TPS65910_VDIG1;
+ case TPS65910_REG_VDIG2:
+ return TPS65910_VDIG2;
+ case TPS65910_REG_VPLL:
+ return TPS65910_VPLL;
+ case TPS65910_REG_VDAC:
+ return TPS65910_VDAC;
+ case TPS65910_REG_VAUX1:
+ return TPS65910_VAUX1;
+ case TPS65910_REG_VAUX2:
+ return TPS65910_VAUX2;
+ case TPS65910_REG_VAUX33:
+ return TPS65910_VAUX33;
+ case TPS65910_REG_VMMC:
+ return TPS65910_VMMC;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int tps65911_get_ctrl_register(int id)
+{
+ switch (id) {
+ case TPS65910_REG_VRTC:
+ return TPS65910_VRTC;
+ case TPS65910_REG_VIO:
+ return TPS65910_VIO;
+ case TPS65910_REG_VDD1:
+ return TPS65910_VDD1;
+ case TPS65910_REG_VDD2:
+ return TPS65910_VDD2;
+ case TPS65911_REG_VDDCTRL:
+ return TPS65911_VDDCTRL;
+ case TPS65911_REG_LDO1:
+ return TPS65911_LDO1;
+ case TPS65911_REG_LDO2:
+ return TPS65911_LDO2;
+ case TPS65911_REG_LDO3:
+ return TPS65911_LDO3;
+ case TPS65911_REG_LDO4:
+ return TPS65911_LDO4;
+ case TPS65911_REG_LDO5:
+ return TPS65911_LDO5;
+ case TPS65911_REG_LDO6:
+ return TPS65911_LDO6;
+ case TPS65911_REG_LDO7:
+ return TPS65911_LDO7;
+ case TPS65911_REG_LDO8:
+ return TPS65911_LDO8;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int tps65910_is_enabled(struct regulator_dev *dev)
+{
+ struct tps65910_reg *pmic = rdev_get_drvdata(dev);
+ int reg, value, id = rdev_get_id(dev);
+
+ reg = pmic->get_ctrl_reg(id);
+ if (reg < 0)
+ return reg;
+
+ value = tps65910_reg_read(pmic, reg);
+ if (value < 0)
+ return value;
+
+ return value & TPS65910_SUPPLY_STATE_ENABLED;
+}
+
+static int tps65910_enable(struct regulator_dev *dev)
+{
+ struct tps65910_reg *pmic = rdev_get_drvdata(dev);
+ struct tps65910 *mfd = pmic->mfd;
+ int reg, id = rdev_get_id(dev);
+
+ reg = pmic->get_ctrl_reg(id);
+ if (reg < 0)
+ return reg;
+
+ return tps65910_set_bits(mfd, reg, TPS65910_SUPPLY_STATE_ENABLED);
+}
+
+static int tps65910_disable(struct regulator_dev *dev)
+{
+ struct tps65910_reg *pmic = rdev_get_drvdata(dev);
+ struct tps65910 *mfd = pmic->mfd;
+ int reg, id = rdev_get_id(dev);
+
+ reg = pmic->get_ctrl_reg(id);
+ if (reg < 0)
+ return reg;
+
+ return tps65910_clear_bits(mfd, reg, TPS65910_SUPPLY_STATE_ENABLED);
+}
+
+
+static int tps65910_set_mode(struct regulator_dev *dev, unsigned int mode)
+{
+ struct tps65910_reg *pmic = rdev_get_drvdata(dev);
+ struct tps65910 *mfd = pmic->mfd;
+ int reg, value, id = rdev_get_id(dev);
+
+ reg = pmic->get_ctrl_reg(id);
+ if (reg < 0)
+ return reg;
+
+ switch (mode) {
+ case REGULATOR_MODE_NORMAL:
+ return tps65910_modify_bits(pmic, reg, LDO_ST_ON_BIT,
+ LDO_ST_MODE_BIT);
+ case REGULATOR_MODE_IDLE:
+ value = LDO_ST_ON_BIT | LDO_ST_MODE_BIT;
+ return tps65910_set_bits(mfd, reg, value);
+ case REGULATOR_MODE_STANDBY:
+ return tps65910_clear_bits(mfd, reg, LDO_ST_ON_BIT);
+ }
+
+ return -EINVAL;
+}
+
+static unsigned int tps65910_get_mode(struct regulator_dev *dev)
+{
+ struct tps65910_reg *pmic = rdev_get_drvdata(dev);
+ int reg, value, id = rdev_get_id(dev);
+
+ reg = pmic->get_ctrl_reg(id);
+ if (reg < 0)
+ return reg;
+
+ value = tps65910_reg_read(pmic, reg);
+ if (value < 0)
+ return value;
+
+ if (value & LDO_ST_ON_BIT)
+ return REGULATOR_MODE_STANDBY;
+ else if (value & LDO_ST_MODE_BIT)
+ return REGULATOR_MODE_IDLE;
+ else
+ return REGULATOR_MODE_NORMAL;
+}
+
+static int tps65910_get_voltage_dcdc(struct regulator_dev *dev)
+{
+ struct tps65910_reg *pmic = rdev_get_drvdata(dev);
+ int id = rdev_get_id(dev), voltage = 0;
+ int opvsel = 0, srvsel = 0, vselmax = 0, mult = 0, sr = 0;
+
+ switch (id) {
+ case TPS65910_REG_VDD1:
+ opvsel = tps65910_reg_read(pmic, TPS65910_VDD1_OP);
+ mult = tps65910_reg_read(pmic, TPS65910_VDD1);
+ mult = (mult & VDD1_VGAIN_SEL_MASK) >> VDD1_VGAIN_SEL_SHIFT;
+ srvsel = tps65910_reg_read(pmic, TPS65910_VDD1_SR);
+ sr = opvsel & VDD1_OP_CMD_MASK;
+ opvsel &= VDD1_OP_SEL_MASK;
+ srvsel &= VDD1_SR_SEL_MASK;
+ vselmax = 75;
+ break;
+ case TPS65910_REG_VDD2:
+ opvsel = tps65910_reg_read(pmic, TPS65910_VDD2_OP);
+ mult = tps65910_reg_read(pmic, TPS65910_VDD2);
+ mult = (mult & VDD2_VGAIN_SEL_MASK) >> VDD2_VGAIN_SEL_SHIFT;
+ srvsel = tps65910_reg_read(pmic, TPS65910_VDD2_SR);
+ sr = opvsel & VDD2_OP_CMD_MASK;
+ opvsel &= VDD2_OP_SEL_MASK;
+ srvsel &= VDD2_SR_SEL_MASK;
+ vselmax = 75;
+ break;
+ case TPS65911_REG_VDDCTRL:
+ opvsel = tps65910_reg_read(pmic, TPS65911_VDDCTRL_OP);
+ srvsel = tps65910_reg_read(pmic, TPS65911_VDDCTRL_SR);
+ sr = opvsel & VDDCTRL_OP_CMD_MASK;
+ opvsel &= VDDCTRL_OP_SEL_MASK;
+ srvsel &= VDDCTRL_SR_SEL_MASK;
+ vselmax = 64;
+ break;
+ }
+
+ /* multiplier 0 == 1 but 2,3 normal */
+ if (!mult)
+ mult=1;
+
+ if (sr) {
+ /* normalise to valid range */
+ if (srvsel < 3)
+ srvsel = 3;
+ if (srvsel > vselmax)
+ srvsel = vselmax;
+ srvsel -= 3;
+
+ voltage = (srvsel * VDD1_2_OFFSET + VDD1_2_MIN_VOLT) * 100;
+ } else {
+
+ /* normalise to valid range*/
+ if (opvsel < 3)
+ opvsel = 3;
+ if (opvsel > vselmax)
+ opvsel = vselmax;
+ opvsel -= 3;
+
+ voltage = (opvsel * VDD1_2_OFFSET + VDD1_2_MIN_VOLT) * 100;
+ }
+
+ voltage *= mult;
+
+ return voltage;
+}
+
+static int tps65910_get_voltage(struct regulator_dev *dev)
+{
+ struct tps65910_reg *pmic = rdev_get_drvdata(dev);
+ int reg, value, id = rdev_get_id(dev), voltage = 0;
+
+ reg = pmic->get_ctrl_reg(id);
+ if (reg < 0)
+ return reg;
+
+ value = tps65910_reg_read(pmic, reg);
+ if (value < 0)
+ return value;
+
+ switch (id) {
+ case TPS65910_REG_VIO:
+ case TPS65910_REG_VDIG1:
+ case TPS65910_REG_VDIG2:
+ case TPS65910_REG_VPLL:
+ case TPS65910_REG_VDAC:
+ case TPS65910_REG_VAUX1:
+ case TPS65910_REG_VAUX2:
+ case TPS65910_REG_VAUX33:
+ case TPS65910_REG_VMMC:
+ value &= LDO_SEL_MASK;
+ value >>= LDO_SEL_SHIFT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ voltage = pmic->info[id]->table[value] * 1000;
+
+ return voltage;
+}
+
+static int tps65910_get_voltage_vdd3(struct regulator_dev *dev)
+{
+ return 5 * 1000 * 1000;
+}
+
+static int tps65911_get_voltage(struct regulator_dev *dev)
+{
+ struct tps65910_reg *pmic = rdev_get_drvdata(dev);
+ int step_mv, id = rdev_get_id(dev);
+ u8 value, reg;
+
+ reg = pmic->get_ctrl_reg(id);
+
+ value = tps65910_reg_read(pmic, reg);
+
+ switch (id) {
+ case TPS65911_REG_LDO1:
+ case TPS65911_REG_LDO2:
+ case TPS65911_REG_LDO4:
+ value &= LDO1_SEL_MASK;
+ value >>= LDO_SEL_SHIFT;
+ /* The first 5 values of the selector correspond to 1V */
+ if (value < 5)
+ value = 0;
+ else
+ value -= 4;
+
+ step_mv = 50;
+ break;
+ case TPS65911_REG_LDO3:
+ case TPS65911_REG_LDO5:
+ case TPS65911_REG_LDO6:
+ case TPS65911_REG_LDO7:
+ case TPS65911_REG_LDO8:
+ value &= LDO3_SEL_MASK;
+ value >>= LDO_SEL_SHIFT;
+ /* The first 3 values of the selector correspond to 1V */
+ if (value < 3)
+ value = 0;
+ else
+ value -= 2;
+
+ step_mv = 100;
+ break;
+ case TPS65910_REG_VIO:
+ return pmic->info[id]->table[value] * 1000;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return (LDO_MIN_VOLT + value * step_mv) * 1000;
+}
+
+static int tps65910_set_voltage_dcdc(struct regulator_dev *dev,
+ unsigned selector)
+{
+ struct tps65910_reg *pmic = rdev_get_drvdata(dev);
+ int id = rdev_get_id(dev), vsel;
+ int dcdc_mult = 0;
+
+ switch (id) {
+ case TPS65910_REG_VDD1:
+ dcdc_mult = (selector / VDD1_2_NUM_VOLTS) + 1;
+ if (dcdc_mult == 1)
+ dcdc_mult--;
+ vsel = (selector % VDD1_2_NUM_VOLTS) + 3;
+
+ tps65910_modify_bits(pmic, TPS65910_VDD1,
+ (dcdc_mult << VDD1_VGAIN_SEL_SHIFT),
+ VDD1_VGAIN_SEL_MASK);
+ tps65910_reg_write(pmic, TPS65910_VDD1_OP, vsel);
+ break;
+ case TPS65910_REG_VDD2:
+ dcdc_mult = (selector / VDD1_2_NUM_VOLTS) + 1;
+ if (dcdc_mult == 1)
+ dcdc_mult--;
+ vsel = (selector % VDD1_2_NUM_VOLTS) + 3;
+
+ tps65910_modify_bits(pmic, TPS65910_VDD2,
+ (dcdc_mult << VDD2_VGAIN_SEL_SHIFT),
+ VDD1_VGAIN_SEL_MASK);
+ tps65910_reg_write(pmic, TPS65910_VDD2_OP, vsel);
+ break;
+ case TPS65911_REG_VDDCTRL:
+ vsel = selector;
+ tps65910_reg_write(pmic, TPS65911_VDDCTRL_OP, vsel);
+ }
+
+ return 0;
+}
+
+static int tps65910_set_voltage(struct regulator_dev *dev, unsigned selector)
+{
+ struct tps65910_reg *pmic = rdev_get_drvdata(dev);
+ int reg, id = rdev_get_id(dev);
+
+ reg = pmic->get_ctrl_reg(id);
+ if (reg < 0)
+ return reg;
+
+ switch (id) {
+ case TPS65910_REG_VIO:
+ case TPS65910_REG_VDIG1:
+ case TPS65910_REG_VDIG2:
+ case TPS65910_REG_VPLL:
+ case TPS65910_REG_VDAC:
+ case TPS65910_REG_VAUX1:
+ case TPS65910_REG_VAUX2:
+ case TPS65910_REG_VAUX33:
+ case TPS65910_REG_VMMC:
+ return tps65910_modify_bits(pmic, reg,
+ (selector << LDO_SEL_SHIFT), LDO_SEL_MASK);
+ }
+
+ return -EINVAL;
+}
+
+static int tps65911_set_voltage(struct regulator_dev *dev, unsigned selector)
+{
+ struct tps65910_reg *pmic = rdev_get_drvdata(dev);
+ int reg, id = rdev_get_id(dev);
+
+ reg = pmic->get_ctrl_reg(id);
+ if (reg < 0)
+ return reg;
+
+ switch (id) {
+ case TPS65911_REG_LDO1:
+ case TPS65911_REG_LDO2:
+ case TPS65911_REG_LDO4:
+ return tps65910_modify_bits(pmic, reg,
+ (selector << LDO_SEL_SHIFT), LDO1_SEL_MASK);
+ case TPS65911_REG_LDO3:
+ case TPS65911_REG_LDO5:
+ case TPS65911_REG_LDO6:
+ case TPS65911_REG_LDO7:
+ case TPS65911_REG_LDO8:
+ case TPS65910_REG_VIO:
+ return tps65910_modify_bits(pmic, reg,
+ (selector << LDO_SEL_SHIFT), LDO3_SEL_MASK);
+ }
+
+ return -EINVAL;
+}
+
+
+static int tps65910_list_voltage_dcdc(struct regulator_dev *dev,
+ unsigned selector)
+{
+ int volt, mult = 1, id = rdev_get_id(dev);
+
+ switch (id) {
+ case TPS65910_REG_VDD1:
+ case TPS65910_REG_VDD2:
+ mult = (selector / VDD1_2_NUM_VOLTS) + 1;
+ volt = VDD1_2_MIN_VOLT +
+ (selector % VDD1_2_NUM_VOLTS) * VDD1_2_OFFSET;
+ case TPS65911_REG_VDDCTRL:
+ volt = VDDCTRL_MIN_VOLT + (selector * VDDCTRL_OFFSET);
+ }
+
+ return volt * 100 * mult;
+}
+
+static int tps65910_list_voltage(struct regulator_dev *dev,
+ unsigned selector)
+{
+ struct tps65910_reg *pmic = rdev_get_drvdata(dev);
+ int id = rdev_get_id(dev), voltage;
+
+ if (id < TPS65910_REG_VIO || id > TPS65910_REG_VMMC)
+ return -EINVAL;
+
+ if (selector >= pmic->info[id]->table_len)
+ return -EINVAL;
+ else
+ voltage = pmic->info[id]->table[selector] * 1000;
+
+ return voltage;
+}
+
+static int tps65911_list_voltage(struct regulator_dev *dev, unsigned selector)
+{
+ struct tps65910_reg *pmic = rdev_get_drvdata(dev);
+ int step_mv = 0, id = rdev_get_id(dev);
+
+ switch(id) {
+ case TPS65911_REG_LDO1:
+ case TPS65911_REG_LDO2:
+ case TPS65911_REG_LDO4:
+ /* The first 5 values of the selector correspond to 1V */
+ if (selector < 5)
+ selector = 0;
+ else
+ selector -= 4;
+
+ step_mv = 50;
+ break;
+ case TPS65911_REG_LDO3:
+ case TPS65911_REG_LDO5:
+ case TPS65911_REG_LDO6:
+ case TPS65911_REG_LDO7:
+ case TPS65911_REG_LDO8:
+ /* The first 3 values of the selector correspond to 1V */
+ if (selector < 3)
+ selector = 0;
+ else
+ selector -= 2;
+
+ step_mv = 100;
+ break;
+ case TPS65910_REG_VIO:
+ return pmic->info[id]->table[selector] * 1000;
+ default:
+ return -EINVAL;
+ }
+
+ return (LDO_MIN_VOLT + selector * step_mv) * 1000;
+}
+
+/* Regulator ops (except VRTC) */
+static struct regulator_ops tps65910_ops_dcdc = {
+ .is_enabled = tps65910_is_enabled,
+ .enable = tps65910_enable,
+ .disable = tps65910_disable,
+ .set_mode = tps65910_set_mode,
+ .get_mode = tps65910_get_mode,
+ .get_voltage = tps65910_get_voltage_dcdc,
+ .set_voltage_sel = tps65910_set_voltage_dcdc,
+ .list_voltage = tps65910_list_voltage_dcdc,
+};
+
+static struct regulator_ops tps65910_ops_vdd3 = {
+ .is_enabled = tps65910_is_enabled,
+ .enable = tps65910_enable,
+ .disable = tps65910_disable,
+ .set_mode = tps65910_set_mode,
+ .get_mode = tps65910_get_mode,
+ .get_voltage = tps65910_get_voltage_vdd3,
+ .list_voltage = tps65910_list_voltage,
+};
+
+static struct regulator_ops tps65910_ops = {
+ .is_enabled = tps65910_is_enabled,
+ .enable = tps65910_enable,
+ .disable = tps65910_disable,
+ .set_mode = tps65910_set_mode,
+ .get_mode = tps65910_get_mode,
+ .get_voltage = tps65910_get_voltage,
+ .set_voltage_sel = tps65910_set_voltage,
+ .list_voltage = tps65910_list_voltage,
+};
+
+static struct regulator_ops tps65911_ops = {
+ .is_enabled = tps65910_is_enabled,
+ .enable = tps65910_enable,
+ .disable = tps65910_disable,
+ .set_mode = tps65910_set_mode,
+ .get_mode = tps65910_get_mode,
+ .get_voltage = tps65911_get_voltage,
+ .set_voltage_sel = tps65911_set_voltage,
+ .list_voltage = tps65911_list_voltage,
+};
+
+static __devinit int tps65910_probe(struct platform_device *pdev)
+{
+ struct tps65910 *tps65910 = dev_get_drvdata(pdev->dev.parent);
+ struct tps_info *info;
+ struct regulator_init_data *reg_data;
+ struct regulator_dev *rdev;
+ struct tps65910_reg *pmic;
+ struct tps65910_board *pmic_plat_data;
+ int i, err;
+
+ pmic_plat_data = dev_get_platdata(tps65910->dev);
+ if (!pmic_plat_data)
+ return -EINVAL;
+
+ reg_data = pmic_plat_data->tps65910_pmic_init_data;
+
+ pmic = kzalloc(sizeof(*pmic), GFP_KERNEL);
+ if (!pmic)
+ return -ENOMEM;
+
+ mutex_init(&pmic->mutex);
+ pmic->mfd = tps65910;
+ platform_set_drvdata(pdev, pmic);
+
+ /* Give control of all register to control port */
+ tps65910_set_bits(pmic->mfd, TPS65910_DEVCTRL,
+ DEVCTRL_SR_CTL_I2C_SEL_MASK);
+
+ switch(tps65910_chip_id(tps65910)) {
+ case TPS65910:
+ pmic->get_ctrl_reg = &tps65910_get_ctrl_register;
+ info = tps65910_regs;
+ case TPS65911:
+ pmic->get_ctrl_reg = &tps65911_get_ctrl_register;
+ info = tps65911_regs;
+ default:
+ pr_err("Invalid tps chip version\n");
+ return -ENODEV;
+ }
+
+ for (i = 0; i < TPS65910_NUM_REGULATOR; i++, info++, reg_data++) {
+ /* Register the regulators */
+ pmic->info[i] = info;
+
+ pmic->desc[i].name = info->name;
+ pmic->desc[i].id = i;
+ pmic->desc[i].n_voltages = info->table_len;
+
+ if (i == TPS65910_REG_VDD1 || i == TPS65910_REG_VDD2) {
+ pmic->desc[i].ops = &tps65910_ops_dcdc;
+ } else if (i == TPS65910_REG_VDD3) {
+ if (tps65910_chip_id(tps65910) == TPS65910)
+ pmic->desc[i].ops = &tps65910_ops_vdd3;
+ else
+ pmic->desc[i].ops = &tps65910_ops_dcdc;
+ } else {
+ if (tps65910_chip_id(tps65910) == TPS65910)
+ pmic->desc[i].ops = &tps65910_ops;
+ else
+ pmic->desc[i].ops = &tps65911_ops;
+ }
+
+ pmic->desc[i].type = REGULATOR_VOLTAGE;
+ pmic->desc[i].owner = THIS_MODULE;
+
+ rdev = regulator_register(&pmic->desc[i],
+ tps65910->dev, reg_data, pmic);
+ if (IS_ERR(rdev)) {
+ dev_err(tps65910->dev,
+ "failed to register %s regulator\n",
+ pdev->name);
+ err = PTR_ERR(rdev);
+ goto err;
+ }
+
+ /* Save regulator for cleanup */
+ pmic->rdev[i] = rdev;
+ }
+ return 0;
+
+err:
+ while (--i >= 0)
+ regulator_unregister(pmic->rdev[i]);
+
+ kfree(pmic);
+ return err;
+}
+
+static int __devexit tps65910_remove(struct platform_device *pdev)
+{
+ struct tps65910_reg *tps65910_reg = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < TPS65910_NUM_REGULATOR; i++)
+ regulator_unregister(tps65910_reg->rdev[i]);
+
+ kfree(tps65910_reg);
+ return 0;
+}
+
+static struct platform_driver tps65910_driver = {
+ .driver = {
+ .name = "tps65910-pmic",
+ .owner = THIS_MODULE,
+ },
+ .probe = tps65910_probe,
+ .remove = __devexit_p(tps65910_remove),
+};
+
+static int __init tps65910_init(void)
+{
+ return platform_driver_register(&tps65910_driver);
+}
+subsys_initcall(tps65910_init);
+
+static void __exit tps65910_cleanup(void)
+{
+ platform_driver_unregister(&tps65910_driver);
+}
+module_exit(tps65910_cleanup);
+
+MODULE_AUTHOR("Graeme Gregory <gg@slimlogic.co.uk>");
+MODULE_DESCRIPTION("TPS6507x voltage regulator driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:tps65910-pmic");
diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c
index 6a292852a358..87fe0f75a56e 100644
--- a/drivers/regulator/twl-regulator.c
+++ b/drivers/regulator/twl-regulator.c
@@ -51,8 +51,13 @@ struct twlreg_info {
u16 min_mV;
u16 max_mV;
+ u8 flags;
+
/* used by regulator core */
struct regulator_desc desc;
+
+ /* chip specific features */
+ unsigned long features;
};
@@ -70,12 +75,35 @@ struct twlreg_info {
#define VREG_TRANS 1
#define VREG_STATE 2
#define VREG_VOLTAGE 3
+#define VREG_VOLTAGE_SMPS 4
/* TWL6030 Misc register offsets */
#define VREG_BC_ALL 1
#define VREG_BC_REF 2
#define VREG_BC_PROC 3
#define VREG_BC_CLK_RST 4
+/* TWL6030 LDO register values for CFG_STATE */
+#define TWL6030_CFG_STATE_OFF 0x00
+#define TWL6030_CFG_STATE_ON 0x01
+#define TWL6030_CFG_STATE_OFF2 0x02
+#define TWL6030_CFG_STATE_SLEEP 0x03
+#define TWL6030_CFG_STATE_GRP_SHIFT 5
+#define TWL6030_CFG_STATE_APP_SHIFT 2
+#define TWL6030_CFG_STATE_APP_MASK (0x03 << TWL6030_CFG_STATE_APP_SHIFT)
+#define TWL6030_CFG_STATE_APP(v) (((v) & TWL6030_CFG_STATE_APP_MASK) >>\
+ TWL6030_CFG_STATE_APP_SHIFT)
+
+/* Flags for SMPS Voltage reading */
+#define SMPS_OFFSET_EN BIT(0)
+#define SMPS_EXTENDED_EN BIT(1)
+
+/* twl6025 SMPS EPROM values */
+#define TWL6030_SMPS_OFFSET 0xB0
+#define TWL6030_SMPS_MULT 0xB3
+#define SMPS_MULTOFFSET_SMPS4 BIT(0)
+#define SMPS_MULTOFFSET_VIO BIT(1)
+#define SMPS_MULTOFFSET_SMPS3 BIT(6)
+
static inline int
twlreg_read(struct twlreg_info *info, unsigned slave_subgp, unsigned offset)
{
@@ -118,21 +146,38 @@ static int twlreg_grp(struct regulator_dev *rdev)
#define P2_GRP_6030 BIT(1) /* "peripherals" */
#define P1_GRP_6030 BIT(0) /* CPU/Linux */
-static int twlreg_is_enabled(struct regulator_dev *rdev)
+static int twl4030reg_is_enabled(struct regulator_dev *rdev)
{
int state = twlreg_grp(rdev);
if (state < 0)
return state;
- if (twl_class_is_4030())
- state &= P1_GRP_4030;
+ return state & P1_GRP_4030;
+}
+
+static int twl6030reg_is_enabled(struct regulator_dev *rdev)
+{
+ struct twlreg_info *info = rdev_get_drvdata(rdev);
+ int grp = 0, val;
+
+ if (!(twl_class_is_6030() && (info->features & TWL6025_SUBCLASS)))
+ grp = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_GRP);
+ if (grp < 0)
+ return grp;
+
+ if (!(twl_class_is_6030() && (info->features & TWL6025_SUBCLASS)))
+ grp &= P1_GRP_6030;
else
- state &= P1_GRP_6030;
- return state;
+ grp = 1;
+
+ val = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_STATE);
+ val = TWL6030_CFG_STATE_APP(val);
+
+ return grp && (val == TWL6030_CFG_STATE_ON);
}
-static int twlreg_enable(struct regulator_dev *rdev)
+static int twl4030reg_enable(struct regulator_dev *rdev)
{
struct twlreg_info *info = rdev_get_drvdata(rdev);
int grp;
@@ -142,10 +187,7 @@ static int twlreg_enable(struct regulator_dev *rdev)
if (grp < 0)
return grp;
- if (twl_class_is_4030())
- grp |= P1_GRP_4030;
- else
- grp |= P1_GRP_6030;
+ grp |= P1_GRP_4030;
ret = twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_GRP, grp);
@@ -154,29 +196,63 @@ static int twlreg_enable(struct regulator_dev *rdev)
return ret;
}
-static int twlreg_disable(struct regulator_dev *rdev)
+static int twl6030reg_enable(struct regulator_dev *rdev)
+{
+ struct twlreg_info *info = rdev_get_drvdata(rdev);
+ int grp = 0;
+ int ret;
+
+ if (!(twl_class_is_6030() && (info->features & TWL6025_SUBCLASS)))
+ grp = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_GRP);
+ if (grp < 0)
+ return grp;
+
+ ret = twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_STATE,
+ grp << TWL6030_CFG_STATE_GRP_SHIFT |
+ TWL6030_CFG_STATE_ON);
+
+ udelay(info->delay);
+
+ return ret;
+}
+
+static int twl4030reg_disable(struct regulator_dev *rdev)
{
struct twlreg_info *info = rdev_get_drvdata(rdev);
int grp;
+ int ret;
grp = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_GRP);
if (grp < 0)
return grp;
- if (twl_class_is_4030())
- grp &= ~(P1_GRP_4030 | P2_GRP_4030 | P3_GRP_4030);
- else
- grp &= ~(P1_GRP_6030 | P2_GRP_6030 | P3_GRP_6030);
+ grp &= ~(P1_GRP_4030 | P2_GRP_4030 | P3_GRP_4030);
- return twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_GRP, grp);
+ ret = twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_GRP, grp);
+
+ return ret;
}
-static int twlreg_get_status(struct regulator_dev *rdev)
+static int twl6030reg_disable(struct regulator_dev *rdev)
{
- int state = twlreg_grp(rdev);
+ struct twlreg_info *info = rdev_get_drvdata(rdev);
+ int grp = 0;
+ int ret;
+
+ if (!(twl_class_is_6030() && (info->features & TWL6025_SUBCLASS)))
+ grp = P1_GRP_6030 | P2_GRP_6030 | P3_GRP_6030;
+
+ /* For 6030, set the off state for all grps enabled */
+ ret = twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_STATE,
+ (grp) << TWL6030_CFG_STATE_GRP_SHIFT |
+ TWL6030_CFG_STATE_OFF);
+
+ return ret;
+}
- if (twl_class_is_6030())
- return 0; /* FIXME return for 6030 regulator */
+static int twl4030reg_get_status(struct regulator_dev *rdev)
+{
+ int state = twlreg_grp(rdev);
if (state < 0)
return state;
@@ -190,15 +266,39 @@ static int twlreg_get_status(struct regulator_dev *rdev)
: REGULATOR_STATUS_STANDBY;
}
-static int twlreg_set_mode(struct regulator_dev *rdev, unsigned mode)
+static int twl6030reg_get_status(struct regulator_dev *rdev)
+{
+ struct twlreg_info *info = rdev_get_drvdata(rdev);
+ int val;
+
+ val = twlreg_grp(rdev);
+ if (val < 0)
+ return val;
+
+ val = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_STATE);
+
+ switch (TWL6030_CFG_STATE_APP(val)) {
+ case TWL6030_CFG_STATE_ON:
+ return REGULATOR_STATUS_NORMAL;
+
+ case TWL6030_CFG_STATE_SLEEP:
+ return REGULATOR_STATUS_STANDBY;
+
+ case TWL6030_CFG_STATE_OFF:
+ case TWL6030_CFG_STATE_OFF2:
+ default:
+ break;
+ }
+
+ return REGULATOR_STATUS_OFF;
+}
+
+static int twl4030reg_set_mode(struct regulator_dev *rdev, unsigned mode)
{
struct twlreg_info *info = rdev_get_drvdata(rdev);
unsigned message;
int status;
- if (twl_class_is_6030())
- return 0; /* FIXME return for 6030 regulator */
-
/* We can only set the mode through state machine commands... */
switch (mode) {
case REGULATOR_MODE_NORMAL:
@@ -227,6 +327,36 @@ static int twlreg_set_mode(struct regulator_dev *rdev, unsigned mode)
message & 0xff, TWL4030_PM_MASTER_PB_WORD_LSB);
}
+static int twl6030reg_set_mode(struct regulator_dev *rdev, unsigned mode)
+{
+ struct twlreg_info *info = rdev_get_drvdata(rdev);
+ int grp = 0;
+ int val;
+
+ if (!(twl_class_is_6030() && (info->features & TWL6025_SUBCLASS)))
+ grp = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_GRP);
+
+ if (grp < 0)
+ return grp;
+
+ /* Compose the state register settings */
+ val = grp << TWL6030_CFG_STATE_GRP_SHIFT;
+ /* We can only set the mode through state machine commands... */
+ switch (mode) {
+ case REGULATOR_MODE_NORMAL:
+ val |= TWL6030_CFG_STATE_ON;
+ break;
+ case REGULATOR_MODE_STANDBY:
+ val |= TWL6030_CFG_STATE_SLEEP;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_STATE, val);
+}
+
/*----------------------------------------------------------------------*/
/*
@@ -375,13 +505,13 @@ static struct regulator_ops twl4030ldo_ops = {
.set_voltage = twl4030ldo_set_voltage,
.get_voltage = twl4030ldo_get_voltage,
- .enable = twlreg_enable,
- .disable = twlreg_disable,
- .is_enabled = twlreg_is_enabled,
+ .enable = twl4030reg_enable,
+ .disable = twl4030reg_disable,
+ .is_enabled = twl4030reg_is_enabled,
- .set_mode = twlreg_set_mode,
+ .set_mode = twl4030reg_set_mode,
- .get_status = twlreg_get_status,
+ .get_status = twl4030reg_get_status,
};
static int twl6030ldo_list_voltage(struct regulator_dev *rdev, unsigned index)
@@ -433,13 +563,13 @@ static struct regulator_ops twl6030ldo_ops = {
.set_voltage = twl6030ldo_set_voltage,
.get_voltage = twl6030ldo_get_voltage,
- .enable = twlreg_enable,
- .disable = twlreg_disable,
- .is_enabled = twlreg_is_enabled,
+ .enable = twl6030reg_enable,
+ .disable = twl6030reg_disable,
+ .is_enabled = twl6030reg_is_enabled,
- .set_mode = twlreg_set_mode,
+ .set_mode = twl6030reg_set_mode,
- .get_status = twlreg_get_status,
+ .get_status = twl6030reg_get_status,
};
/*----------------------------------------------------------------------*/
@@ -461,25 +591,242 @@ static int twlfixed_get_voltage(struct regulator_dev *rdev)
return info->min_mV * 1000;
}
-static struct regulator_ops twlfixed_ops = {
+static struct regulator_ops twl4030fixed_ops = {
+ .list_voltage = twlfixed_list_voltage,
+
+ .get_voltage = twlfixed_get_voltage,
+
+ .enable = twl4030reg_enable,
+ .disable = twl4030reg_disable,
+ .is_enabled = twl4030reg_is_enabled,
+
+ .set_mode = twl4030reg_set_mode,
+
+ .get_status = twl4030reg_get_status,
+};
+
+static struct regulator_ops twl6030fixed_ops = {
.list_voltage = twlfixed_list_voltage,
.get_voltage = twlfixed_get_voltage,
- .enable = twlreg_enable,
- .disable = twlreg_disable,
- .is_enabled = twlreg_is_enabled,
+ .enable = twl6030reg_enable,
+ .disable = twl6030reg_disable,
+ .is_enabled = twl6030reg_is_enabled,
- .set_mode = twlreg_set_mode,
+ .set_mode = twl6030reg_set_mode,
- .get_status = twlreg_get_status,
+ .get_status = twl6030reg_get_status,
};
static struct regulator_ops twl6030_fixed_resource = {
- .enable = twlreg_enable,
- .disable = twlreg_disable,
- .is_enabled = twlreg_is_enabled,
- .get_status = twlreg_get_status,
+ .enable = twl6030reg_enable,
+ .disable = twl6030reg_disable,
+ .is_enabled = twl6030reg_is_enabled,
+ .get_status = twl6030reg_get_status,
+};
+
+/*
+ * SMPS status and control
+ */
+
+static int twl6030smps_list_voltage(struct regulator_dev *rdev, unsigned index)
+{
+ struct twlreg_info *info = rdev_get_drvdata(rdev);
+
+ int voltage = 0;
+
+ switch (info->flags) {
+ case SMPS_OFFSET_EN:
+ voltage = 100000;
+ /* fall through */
+ case 0:
+ switch (index) {
+ case 0:
+ voltage = 0;
+ break;
+ case 58:
+ voltage = 1350 * 1000;
+ break;
+ case 59:
+ voltage = 1500 * 1000;
+ break;
+ case 60:
+ voltage = 1800 * 1000;
+ break;
+ case 61:
+ voltage = 1900 * 1000;
+ break;
+ case 62:
+ voltage = 2100 * 1000;
+ break;
+ default:
+ voltage += (600000 + (12500 * (index - 1)));
+ }
+ break;
+ case SMPS_EXTENDED_EN:
+ switch (index) {
+ case 0:
+ voltage = 0;
+ break;
+ case 58:
+ voltage = 2084 * 1000;
+ break;
+ case 59:
+ voltage = 2315 * 1000;
+ break;
+ case 60:
+ voltage = 2778 * 1000;
+ break;
+ case 61:
+ voltage = 2932 * 1000;
+ break;
+ case 62:
+ voltage = 3241 * 1000;
+ break;
+ default:
+ voltage = (1852000 + (38600 * (index - 1)));
+ }
+ break;
+ case SMPS_OFFSET_EN | SMPS_EXTENDED_EN:
+ switch (index) {
+ case 0:
+ voltage = 0;
+ break;
+ case 58:
+ voltage = 4167 * 1000;
+ break;
+ case 59:
+ voltage = 2315 * 1000;
+ break;
+ case 60:
+ voltage = 2778 * 1000;
+ break;
+ case 61:
+ voltage = 2932 * 1000;
+ break;
+ case 62:
+ voltage = 3241 * 1000;
+ break;
+ default:
+ voltage = (2161000 + (38600 * (index - 1)));
+ }
+ break;
+ }
+
+ return voltage;
+}
+
+static int
+twl6030smps_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV,
+ unsigned int *selector)
+{
+ struct twlreg_info *info = rdev_get_drvdata(rdev);
+ int vsel = 0;
+
+ switch (info->flags) {
+ case 0:
+ if (min_uV == 0)
+ vsel = 0;
+ else if ((min_uV >= 600000) && (max_uV <= 1300000)) {
+ vsel = (min_uV - 600000) / 125;
+ if (vsel % 100)
+ vsel += 100;
+ vsel /= 100;
+ vsel++;
+ }
+ /* Values 1..57 for vsel are linear and can be calculated
+ * values 58..62 are non linear.
+ */
+ else if ((min_uV > 1900000) && (max_uV >= 2100000))
+ vsel = 62;
+ else if ((min_uV > 1800000) && (max_uV >= 1900000))
+ vsel = 61;
+ else if ((min_uV > 1500000) && (max_uV >= 1800000))
+ vsel = 60;
+ else if ((min_uV > 1350000) && (max_uV >= 1500000))
+ vsel = 59;
+ else if ((min_uV > 1300000) && (max_uV >= 1350000))
+ vsel = 58;
+ else
+ return -EINVAL;
+ break;
+ case SMPS_OFFSET_EN:
+ if (min_uV == 0)
+ vsel = 0;
+ else if ((min_uV >= 700000) && (max_uV <= 1420000)) {
+ vsel = (min_uV - 700000) / 125;
+ if (vsel % 100)
+ vsel += 100;
+ vsel /= 100;
+ vsel++;
+ }
+ /* Values 1..57 for vsel are linear and can be calculated
+ * values 58..62 are non linear.
+ */
+ else if ((min_uV > 1900000) && (max_uV >= 2100000))
+ vsel = 62;
+ else if ((min_uV > 1800000) && (max_uV >= 1900000))
+ vsel = 61;
+ else if ((min_uV > 1350000) && (max_uV >= 1800000))
+ vsel = 60;
+ else if ((min_uV > 1350000) && (max_uV >= 1500000))
+ vsel = 59;
+ else if ((min_uV > 1300000) && (max_uV >= 1350000))
+ vsel = 58;
+ else
+ return -EINVAL;
+ break;
+ case SMPS_EXTENDED_EN:
+ if (min_uV == 0)
+ vsel = 0;
+ else if ((min_uV >= 1852000) && (max_uV <= 4013600)) {
+ vsel = (min_uV - 1852000) / 386;
+ if (vsel % 100)
+ vsel += 100;
+ vsel /= 100;
+ vsel++;
+ }
+ break;
+ case SMPS_OFFSET_EN|SMPS_EXTENDED_EN:
+ if (min_uV == 0)
+ vsel = 0;
+ else if ((min_uV >= 2161000) && (max_uV <= 4321000)) {
+ vsel = (min_uV - 1852000) / 386;
+ if (vsel % 100)
+ vsel += 100;
+ vsel /= 100;
+ vsel++;
+ }
+ break;
+ }
+
+ *selector = vsel;
+
+ return twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE_SMPS,
+ vsel);
+}
+
+static int twl6030smps_get_voltage_sel(struct regulator_dev *rdev)
+{
+ struct twlreg_info *info = rdev_get_drvdata(rdev);
+
+ return twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE_SMPS);
+}
+
+static struct regulator_ops twlsmps_ops = {
+ .list_voltage = twl6030smps_list_voltage,
+
+ .set_voltage = twl6030smps_set_voltage,
+ .get_voltage_sel = twl6030smps_get_voltage_sel,
+
+ .enable = twl6030reg_enable,
+ .disable = twl6030reg_disable,
+ .is_enabled = twl6030reg_is_enabled,
+
+ .set_mode = twl6030reg_set_mode,
+
+ .get_status = twl6030reg_get_status,
};
/*----------------------------------------------------------------------*/
@@ -487,11 +834,10 @@ static struct regulator_ops twl6030_fixed_resource = {
#define TWL4030_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \
remap_conf) \
TWL_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \
- remap_conf, TWL4030)
-#define TWL6030_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \
- remap_conf) \
+ remap_conf, TWL4030, twl4030fixed_ops)
+#define TWL6030_FIXED_LDO(label, offset, mVolts, num, turnon_delay) \
TWL_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \
- remap_conf, TWL6030)
+ 0x0, TWL6030, twl6030fixed_ops)
#define TWL4030_ADJUSTABLE_LDO(label, offset, num, turnon_delay, remap_conf) { \
.base = offset, \
@@ -510,13 +856,11 @@ static struct regulator_ops twl6030_fixed_resource = {
}, \
}
-#define TWL6030_ADJUSTABLE_LDO(label, offset, min_mVolts, max_mVolts, num, \
- remap_conf) { \
+#define TWL6030_ADJUSTABLE_LDO(label, offset, min_mVolts, max_mVolts, num) { \
.base = offset, \
.id = num, \
.min_mV = min_mVolts, \
.max_mV = max_mVolts, \
- .remap = remap_conf, \
.desc = { \
.name = #label, \
.id = TWL6030_REG_##label, \
@@ -527,9 +871,23 @@ static struct regulator_ops twl6030_fixed_resource = {
}, \
}
+#define TWL6025_ADJUSTABLE_LDO(label, offset, min_mVolts, max_mVolts, num) { \
+ .base = offset, \
+ .id = num, \
+ .min_mV = min_mVolts, \
+ .max_mV = max_mVolts, \
+ .desc = { \
+ .name = #label, \
+ .id = TWL6025_REG_##label, \
+ .n_voltages = ((max_mVolts - min_mVolts)/100) + 1, \
+ .ops = &twl6030ldo_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ }, \
+ }
#define TWL_FIXED_LDO(label, offset, mVolts, num, turnon_delay, remap_conf, \
- family) { \
+ family, operations) { \
.base = offset, \
.id = num, \
.min_mV = mVolts, \
@@ -539,17 +897,16 @@ static struct regulator_ops twl6030_fixed_resource = {
.name = #label, \
.id = family##_REG_##label, \
.n_voltages = 1, \
- .ops = &twlfixed_ops, \
+ .ops = &operations, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
}, \
}
-#define TWL6030_FIXED_RESOURCE(label, offset, num, turnon_delay, remap_conf) { \
+#define TWL6030_FIXED_RESOURCE(label, offset, num, turnon_delay) { \
.base = offset, \
.id = num, \
.delay = turnon_delay, \
- .remap = remap_conf, \
.desc = { \
.name = #label, \
.id = TWL6030_REG_##label, \
@@ -559,6 +916,21 @@ static struct regulator_ops twl6030_fixed_resource = {
}, \
}
+#define TWL6025_ADJUSTABLE_SMPS(label, offset, num) { \
+ .base = offset, \
+ .id = num, \
+ .min_mV = 600, \
+ .max_mV = 2100, \
+ .desc = { \
+ .name = #label, \
+ .id = TWL6025_REG_##label, \
+ .n_voltages = 63, \
+ .ops = &twlsmps_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ }, \
+ }
+
/*
* We list regulators here if systems need some level of
* software control over them after boot.
@@ -589,19 +961,52 @@ static struct twlreg_info twl_regs[] = {
/* 6030 REG with base as PMC Slave Misc : 0x0030 */
/* Turnon-delay and remap configuration values for 6030 are not
verified since the specification is not public */
- TWL6030_ADJUSTABLE_LDO(VAUX1_6030, 0x54, 1000, 3300, 1, 0x21),
- TWL6030_ADJUSTABLE_LDO(VAUX2_6030, 0x58, 1000, 3300, 2, 0x21),
- TWL6030_ADJUSTABLE_LDO(VAUX3_6030, 0x5c, 1000, 3300, 3, 0x21),
- TWL6030_ADJUSTABLE_LDO(VMMC, 0x68, 1000, 3300, 4, 0x21),
- TWL6030_ADJUSTABLE_LDO(VPP, 0x6c, 1000, 3300, 5, 0x21),
- TWL6030_ADJUSTABLE_LDO(VUSIM, 0x74, 1000, 3300, 7, 0x21),
- TWL6030_FIXED_LDO(VANA, 0x50, 2100, 15, 0, 0x21),
- TWL6030_FIXED_LDO(VCXIO, 0x60, 1800, 16, 0, 0x21),
- TWL6030_FIXED_LDO(VDAC, 0x64, 1800, 17, 0, 0x21),
- TWL6030_FIXED_LDO(VUSB, 0x70, 3300, 18, 0, 0x21),
- TWL6030_FIXED_RESOURCE(CLK32KG, 0x8C, 48, 0, 0x21),
+ TWL6030_ADJUSTABLE_LDO(VAUX1_6030, 0x54, 1000, 3300, 1),
+ TWL6030_ADJUSTABLE_LDO(VAUX2_6030, 0x58, 1000, 3300, 2),
+ TWL6030_ADJUSTABLE_LDO(VAUX3_6030, 0x5c, 1000, 3300, 3),
+ TWL6030_ADJUSTABLE_LDO(VMMC, 0x68, 1000, 3300, 4),
+ TWL6030_ADJUSTABLE_LDO(VPP, 0x6c, 1000, 3300, 5),
+ TWL6030_ADJUSTABLE_LDO(VUSIM, 0x74, 1000, 3300, 7),
+ TWL6030_FIXED_LDO(VANA, 0x50, 2100, 15, 0),
+ TWL6030_FIXED_LDO(VCXIO, 0x60, 1800, 16, 0),
+ TWL6030_FIXED_LDO(VDAC, 0x64, 1800, 17, 0),
+ TWL6030_FIXED_LDO(VUSB, 0x70, 3300, 18, 0),
+ TWL6030_FIXED_RESOURCE(CLK32KG, 0x8C, 48, 0),
+
+ /* 6025 are renamed compared to 6030 versions */
+ TWL6025_ADJUSTABLE_LDO(LDO2, 0x54, 1000, 3300, 1),
+ TWL6025_ADJUSTABLE_LDO(LDO4, 0x58, 1000, 3300, 2),
+ TWL6025_ADJUSTABLE_LDO(LDO3, 0x5c, 1000, 3300, 3),
+ TWL6025_ADJUSTABLE_LDO(LDO5, 0x68, 1000, 3300, 4),
+ TWL6025_ADJUSTABLE_LDO(LDO1, 0x6c, 1000, 3300, 5),
+ TWL6025_ADJUSTABLE_LDO(LDO7, 0x74, 1000, 3300, 7),
+ TWL6025_ADJUSTABLE_LDO(LDO6, 0x60, 1000, 3300, 16),
+ TWL6025_ADJUSTABLE_LDO(LDOLN, 0x64, 1000, 3300, 17),
+ TWL6025_ADJUSTABLE_LDO(LDOUSB, 0x70, 1000, 3300, 18),
+
+ TWL6025_ADJUSTABLE_SMPS(SMPS3, 0x34, 1),
+ TWL6025_ADJUSTABLE_SMPS(SMPS4, 0x10, 2),
+ TWL6025_ADJUSTABLE_SMPS(VIO, 0x16, 3),
};
+static u8 twl_get_smps_offset(void)
+{
+ u8 value;
+
+ twl_i2c_read_u8(TWL_MODULE_PM_RECEIVER, &value,
+ TWL6030_SMPS_OFFSET);
+ return value;
+}
+
+static u8 twl_get_smps_mult(void)
+{
+ u8 value;
+
+ twl_i2c_read_u8(TWL_MODULE_PM_RECEIVER, &value,
+ TWL6030_SMPS_MULT);
+ return value;
+}
+
static int __devinit twlreg_probe(struct platform_device *pdev)
{
int i;
@@ -623,6 +1028,9 @@ static int __devinit twlreg_probe(struct platform_device *pdev)
if (!initdata)
return -EINVAL;
+ /* copy the features into regulator data */
+ info->features = (unsigned long)initdata->driver_data;
+
/* Constrain board-specific capabilities according to what
* this driver and the chip itself can actually do.
*/
@@ -645,6 +1053,27 @@ static int __devinit twlreg_probe(struct platform_device *pdev)
break;
}
+ switch (pdev->id) {
+ case TWL6025_REG_SMPS3:
+ if (twl_get_smps_mult() & SMPS_MULTOFFSET_SMPS3)
+ info->flags |= SMPS_EXTENDED_EN;
+ if (twl_get_smps_offset() & SMPS_MULTOFFSET_SMPS3)
+ info->flags |= SMPS_OFFSET_EN;
+ break;
+ case TWL6025_REG_SMPS4:
+ if (twl_get_smps_mult() & SMPS_MULTOFFSET_SMPS4)
+ info->flags |= SMPS_EXTENDED_EN;
+ if (twl_get_smps_offset() & SMPS_MULTOFFSET_SMPS4)
+ info->flags |= SMPS_OFFSET_EN;
+ break;
+ case TWL6025_REG_VIO:
+ if (twl_get_smps_mult() & SMPS_MULTOFFSET_VIO)
+ info->flags |= SMPS_EXTENDED_EN;
+ if (twl_get_smps_offset() & SMPS_MULTOFFSET_VIO)
+ info->flags |= SMPS_OFFSET_EN;
+ break;
+ }
+
rdev = regulator_register(&info->desc, &pdev->dev, initdata, info);
if (IS_ERR(rdev)) {
dev_err(&pdev->dev, "can't register %s, %ld\n",
@@ -653,7 +1082,8 @@ static int __devinit twlreg_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, rdev);
- twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_REMAP,
+ if (twl_class_is_4030())
+ twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_REMAP,
info->remap);
/* NOTE: many regulators support short-circuit IRQs (presentable
diff --git a/drivers/regulator/wm831x-dcdc.c b/drivers/regulator/wm831x-dcdc.c
index e93453b1b978..a0982e809851 100644
--- a/drivers/regulator/wm831x-dcdc.c
+++ b/drivers/regulator/wm831x-dcdc.c
@@ -600,7 +600,6 @@ err:
static __devexit int wm831x_buckv_remove(struct platform_device *pdev)
{
struct wm831x_dcdc *dcdc = platform_get_drvdata(pdev);
- struct wm831x *wm831x = dcdc->wm831x;
platform_set_drvdata(pdev, NULL);
@@ -776,7 +775,6 @@ err:
static __devexit int wm831x_buckp_remove(struct platform_device *pdev)
{
struct wm831x_dcdc *dcdc = platform_get_drvdata(pdev);
- struct wm831x *wm831x = dcdc->wm831x;
platform_set_drvdata(pdev, NULL);
diff --git a/drivers/regulator/wm8400-regulator.c b/drivers/regulator/wm8400-regulator.c
index b42d01cef35a..0f12c70bebc9 100644
--- a/drivers/regulator/wm8400-regulator.c
+++ b/drivers/regulator/wm8400-regulator.c
@@ -55,7 +55,7 @@ static int wm8400_ldo_list_voltage(struct regulator_dev *dev,
return 1600000 + ((selector - 14) * 100000);
}
-static int wm8400_ldo_get_voltage(struct regulator_dev *dev)
+static int wm8400_ldo_get_voltage_sel(struct regulator_dev *dev)
{
struct wm8400 *wm8400 = rdev_get_drvdata(dev);
u16 val;
@@ -63,7 +63,7 @@ static int wm8400_ldo_get_voltage(struct regulator_dev *dev)
val = wm8400_reg_read(wm8400, WM8400_LDO1_CONTROL + rdev_get_id(dev));
val &= WM8400_LDO1_VSEL_MASK;
- return wm8400_ldo_list_voltage(dev, val);
+ return val;
}
static int wm8400_ldo_set_voltage(struct regulator_dev *dev,
@@ -104,7 +104,7 @@ static struct regulator_ops wm8400_ldo_ops = {
.enable = wm8400_ldo_enable,
.disable = wm8400_ldo_disable,
.list_voltage = wm8400_ldo_list_voltage,
- .get_voltage = wm8400_ldo_get_voltage,
+ .get_voltage_sel = wm8400_ldo_get_voltage_sel,
.set_voltage = wm8400_ldo_set_voltage,
};
@@ -145,7 +145,7 @@ static int wm8400_dcdc_list_voltage(struct regulator_dev *dev,
return 850000 + (selector * 25000);
}
-static int wm8400_dcdc_get_voltage(struct regulator_dev *dev)
+static int wm8400_dcdc_get_voltage_sel(struct regulator_dev *dev)
{
struct wm8400 *wm8400 = rdev_get_drvdata(dev);
u16 val;
@@ -154,7 +154,7 @@ static int wm8400_dcdc_get_voltage(struct regulator_dev *dev)
val = wm8400_reg_read(wm8400, WM8400_DCDC1_CONTROL_1 + offset);
val &= WM8400_DC1_VSEL_MASK;
- return 850000 + (25000 * val);
+ return val;
}
static int wm8400_dcdc_set_voltage(struct regulator_dev *dev,
@@ -261,7 +261,7 @@ static struct regulator_ops wm8400_dcdc_ops = {
.enable = wm8400_dcdc_enable,
.disable = wm8400_dcdc_disable,
.list_voltage = wm8400_dcdc_list_voltage,
- .get_voltage = wm8400_dcdc_get_voltage,
+ .get_voltage_sel = wm8400_dcdc_get_voltage_sel,
.set_voltage = wm8400_dcdc_set_voltage,
.get_mode = wm8400_dcdc_get_mode,
.set_mode = wm8400_dcdc_set_mode,
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index 85dddb1e4126..46784b83c5c4 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -24,7 +24,7 @@
#include <asm/debug.h>
#include <asm/ebcdic.h>
#include <asm/io.h>
-#include <asm/s390_ext.h>
+#include <asm/irq.h>
#include <asm/vtoc.h>
#include <asm/diag.h>
@@ -642,7 +642,7 @@ dasd_diag_init(void)
}
ASCEBC(dasd_diag_discipline.ebcname, 4);
- ctl_set_bit(0, 9);
+ service_subclass_irq_register();
register_external_interrupt(0x2603, dasd_ext_handler);
dasd_diag_discipline_pointer = &dasd_diag_discipline;
return 0;
@@ -652,7 +652,7 @@ static void __exit
dasd_diag_cleanup(void)
{
unregister_external_interrupt(0x2603, dasd_ext_handler);
- ctl_clear_bit(0, 9);
+ service_subclass_irq_unregister();
dasd_diag_discipline_pointer = NULL;
}
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index b76c61f82485..eaa7e78186f9 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -19,7 +19,6 @@
#include <linux/suspend.h>
#include <linux/completion.h>
#include <linux/platform_device.h>
-#include <asm/s390_ext.h>
#include <asm/types.h>
#include <asm/irq.h>
@@ -885,12 +884,12 @@ sclp_check_interface(void)
spin_unlock_irqrestore(&sclp_lock, flags);
/* Enable service-signal interruption - needs to happen
* with IRQs enabled. */
- ctl_set_bit(0, 9);
+ service_subclass_irq_register();
/* Wait for signal from interrupt or timeout */
sclp_sync_wait();
/* Disable service-signal interruption - needs to happen
* with IRQs enabled. */
- ctl_clear_bit(0,9);
+ service_subclass_irq_unregister();
spin_lock_irqsave(&sclp_lock, flags);
del_timer(&sclp_request_timer);
if (sclp_init_req.status == SCLP_REQ_DONE &&
@@ -1070,7 +1069,7 @@ sclp_init(void)
spin_unlock_irqrestore(&sclp_lock, flags);
/* Enable service-signal external interruption - needs to happen with
* IRQs enabled. */
- ctl_set_bit(0, 9);
+ service_subclass_irq_register();
sclp_init_mask(1);
return 0;
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c
index 607998f0b7d8..aec60d55b10d 100644
--- a/drivers/s390/kvm/kvm_virtio.c
+++ b/drivers/s390/kvm/kvm_virtio.c
@@ -25,7 +25,6 @@
#include <asm/kvm_para.h>
#include <asm/kvm_virtio.h>
#include <asm/setup.h>
-#include <asm/s390_ext.h>
#include <asm/irq.h>
#define VIRTIO_SUBCODE_64 0x0D00
@@ -441,7 +440,7 @@ static int __init kvm_devices_init(void)
INIT_WORK(&hotplug_work, hotplug_devices);
- ctl_set_bit(0, 9);
+ service_subclass_irq_register();
register_external_interrupt(0x2603, kvm_extint_handler);
scan_devices();
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
index 3b7e83d2dab4..d5ff142c93a2 100644
--- a/drivers/scsi/aic94xx/aic94xx_init.c
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -486,7 +486,7 @@ static ssize_t asd_show_update_bios(struct device *dev,
flash_error_table[i].reason);
}
-static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUGO,
+static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUSR,
asd_show_update_bios, asd_store_update_bios);
static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
index c1f72c49196f..6c7e0339dda4 100644
--- a/drivers/scsi/bfa/bfa_ioc.c
+++ b/drivers/scsi/bfa/bfa_ioc.c
@@ -56,6 +56,8 @@ BFA_TRC_FILE(CNA, IOC);
#define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
#define bfa_ioc_notify_fail(__ioc) \
((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
+#define bfa_ioc_sync_start(__ioc) \
+ ((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
#define bfa_ioc_sync_join(__ioc) \
((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
#define bfa_ioc_sync_leave(__ioc) \
@@ -647,7 +649,7 @@ bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
switch (event) {
case IOCPF_E_SEMLOCKED:
if (bfa_ioc_firmware_lock(ioc)) {
- if (bfa_ioc_sync_complete(ioc)) {
+ if (bfa_ioc_sync_start(ioc)) {
iocpf->retry_count = 0;
bfa_ioc_sync_join(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
index ec9cf08b0e7f..c85182a704fb 100644
--- a/drivers/scsi/bfa/bfa_ioc.h
+++ b/drivers/scsi/bfa/bfa_ioc.h
@@ -263,6 +263,7 @@ struct bfa_ioc_hwif_s {
bfa_boolean_t msix);
void (*ioc_notify_fail) (struct bfa_ioc_s *ioc);
void (*ioc_ownership_reset) (struct bfa_ioc_s *ioc);
+ bfa_boolean_t (*ioc_sync_start) (struct bfa_ioc_s *ioc);
void (*ioc_sync_join) (struct bfa_ioc_s *ioc);
void (*ioc_sync_leave) (struct bfa_ioc_s *ioc);
void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
diff --git a/drivers/scsi/bfa/bfa_ioc_cb.c b/drivers/scsi/bfa/bfa_ioc_cb.c
index e4a0713185b6..89ae4c8f95a2 100644
--- a/drivers/scsi/bfa/bfa_ioc_cb.c
+++ b/drivers/scsi/bfa/bfa_ioc_cb.c
@@ -32,6 +32,7 @@ static void bfa_ioc_cb_map_port(struct bfa_ioc_s *ioc);
static void bfa_ioc_cb_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix);
static void bfa_ioc_cb_notify_fail(struct bfa_ioc_s *ioc);
static void bfa_ioc_cb_ownership_reset(struct bfa_ioc_s *ioc);
+static bfa_boolean_t bfa_ioc_cb_sync_start(struct bfa_ioc_s *ioc);
static void bfa_ioc_cb_sync_join(struct bfa_ioc_s *ioc);
static void bfa_ioc_cb_sync_leave(struct bfa_ioc_s *ioc);
static void bfa_ioc_cb_sync_ack(struct bfa_ioc_s *ioc);
@@ -53,6 +54,7 @@ bfa_ioc_set_cb_hwif(struct bfa_ioc_s *ioc)
hwif_cb.ioc_isr_mode_set = bfa_ioc_cb_isr_mode_set;
hwif_cb.ioc_notify_fail = bfa_ioc_cb_notify_fail;
hwif_cb.ioc_ownership_reset = bfa_ioc_cb_ownership_reset;
+ hwif_cb.ioc_sync_start = bfa_ioc_cb_sync_start;
hwif_cb.ioc_sync_join = bfa_ioc_cb_sync_join;
hwif_cb.ioc_sync_leave = bfa_ioc_cb_sync_leave;
hwif_cb.ioc_sync_ack = bfa_ioc_cb_sync_ack;
@@ -195,6 +197,15 @@ bfa_ioc_cb_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
}
/*
+ * Synchronized IOC failure processing routines
+ */
+static bfa_boolean_t
+bfa_ioc_cb_sync_start(struct bfa_ioc_s *ioc)
+{
+ return bfa_ioc_cb_sync_complete(ioc);
+}
+
+/*
* Cleanup hw semaphore and usecnt registers
*/
static void
diff --git a/drivers/scsi/bfa/bfa_ioc_ct.c b/drivers/scsi/bfa/bfa_ioc_ct.c
index 008d129ddfcd..93612520f0d2 100644
--- a/drivers/scsi/bfa/bfa_ioc_ct.c
+++ b/drivers/scsi/bfa/bfa_ioc_ct.c
@@ -41,6 +41,7 @@ static void bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc);
static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix);
static void bfa_ioc_ct_notify_fail(struct bfa_ioc_s *ioc);
static void bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc);
+static bfa_boolean_t bfa_ioc_ct_sync_start(struct bfa_ioc_s *ioc);
static void bfa_ioc_ct_sync_join(struct bfa_ioc_s *ioc);
static void bfa_ioc_ct_sync_leave(struct bfa_ioc_s *ioc);
static void bfa_ioc_ct_sync_ack(struct bfa_ioc_s *ioc);
@@ -62,6 +63,7 @@ bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc)
hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail;
hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
+ hwif_ct.ioc_sync_start = bfa_ioc_ct_sync_start;
hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join;
hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave;
hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack;
@@ -351,6 +353,30 @@ bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc)
writel(1, ioc->ioc_regs.ioc_sem_reg);
}
+static bfa_boolean_t
+bfa_ioc_ct_sync_start(struct bfa_ioc_s *ioc)
+{
+ uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
+ uint32_t sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
+
+ /*
+ * Driver load time. If the sync required bit for this PCI fn
+ * is set, it is due to an unclean exit by the driver for this
+ * PCI fn in the previous incarnation. Whoever comes here first
+ * should clean it up, no matter which PCI fn.
+ */
+
+ if (sync_reqd & bfa_ioc_ct_sync_pos(ioc)) {
+ writel(0, ioc->ioc_regs.ioc_fail_sync);
+ writel(1, ioc->ioc_regs.ioc_usage_reg);
+ writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
+ writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
+ return BFA_TRUE;
+ }
+
+ return bfa_ioc_ct_sync_complete(ioc);
+}
+
/*
* Synchronized IOC failure processing routines
*/
diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h
index cfd59023227b..6bdd25a93db9 100644
--- a/drivers/scsi/bnx2i/bnx2i.h
+++ b/drivers/scsi/bnx2i/bnx2i.h
@@ -66,11 +66,11 @@
#define BD_SPLIT_SIZE 32768
/* min, max & default values for SQ/RQ/CQ size, configurable via' modparam */
-#define BNX2I_SQ_WQES_MIN 16
-#define BNX2I_570X_SQ_WQES_MAX 128
-#define BNX2I_5770X_SQ_WQES_MAX 512
-#define BNX2I_570X_SQ_WQES_DEFAULT 128
-#define BNX2I_5770X_SQ_WQES_DEFAULT 256
+#define BNX2I_SQ_WQES_MIN 16
+#define BNX2I_570X_SQ_WQES_MAX 128
+#define BNX2I_5770X_SQ_WQES_MAX 512
+#define BNX2I_570X_SQ_WQES_DEFAULT 128
+#define BNX2I_5770X_SQ_WQES_DEFAULT 128
#define BNX2I_570X_CQ_WQES_MAX 128
#define BNX2I_5770X_CQ_WQES_MAX 512
@@ -115,6 +115,7 @@
#define BNX2X_MAX_CQS 8
#define CNIC_ARM_CQE 1
+#define CNIC_ARM_CQE_FP 2
#define CNIC_DISARM_CQE 0
#define REG_RD(__hba, offset) \
@@ -666,7 +667,9 @@ enum {
* after HBA reset is completed by bnx2i/cnic/bnx2
* modules
* @state: tracks offload connection state machine
- * @teardown_mode: indicates if conn teardown is abortive or orderly
+ * @timestamp: tracks the start time when the ep begins to connect
+ * @num_active_cmds: tracks the number of outstanding commands for this ep
+ * @ec_shift: the amount of shift as part of the event coal calc
* @qp: QP information
* @ids: contains chip allocated *context id* & driver assigned
* *iscsi cid*
@@ -685,6 +688,7 @@ struct bnx2i_endpoint {
u32 state;
unsigned long timestamp;
int num_active_cmds;
+ u32 ec_shift;
struct qp_info qp;
struct ep_handles ids;
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index f0b89513faed..5c54a2d9b834 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -138,7 +138,6 @@ void bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action)
u16 next_index;
u32 num_active_cmds;
-
/* Coalesce CQ entries only on 10G devices */
if (!test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
return;
@@ -148,16 +147,19 @@ void bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action)
* interrupts and other unwanted results
*/
cq_db = (struct bnx2i_5771x_cq_db *) ep->qp.cq_pgtbl_virt;
- if (cq_db->sqn[0] && cq_db->sqn[0] != 0xFFFF)
- return;
- if (action == CNIC_ARM_CQE) {
+ if (action != CNIC_ARM_CQE_FP)
+ if (cq_db->sqn[0] && cq_db->sqn[0] != 0xFFFF)
+ return;
+
+ if (action == CNIC_ARM_CQE || action == CNIC_ARM_CQE_FP) {
num_active_cmds = ep->num_active_cmds;
if (num_active_cmds <= event_coal_min)
next_index = 1;
else
next_index = event_coal_min +
- (num_active_cmds - event_coal_min) / event_coal_div;
+ ((num_active_cmds - event_coal_min) >>
+ ep->ec_shift);
if (!next_index)
next_index = 1;
cq_index = ep->qp.cqe_exp_seq_sn + next_index - 1;
@@ -1274,6 +1276,7 @@ int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba)
iscsi_init.dummy_buffer_addr_hi =
(u32) ((u64) hba->dummy_buf_dma >> 32);
+ hba->num_ccell = hba->max_sqes >> 1;
hba->ctx_ccell_tasks =
((hba->num_ccell & 0xFFFF) | (hba->max_sqes << 16));
iscsi_init.num_ccells_per_conn = hba->num_ccell;
@@ -1934,7 +1937,6 @@ cqe_out:
qp->cq_cons_idx++;
}
}
- bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE);
}
/**
@@ -1948,22 +1950,23 @@ cqe_out:
static void bnx2i_fastpath_notification(struct bnx2i_hba *hba,
struct iscsi_kcqe *new_cqe_kcqe)
{
- struct bnx2i_conn *conn;
+ struct bnx2i_conn *bnx2i_conn;
u32 iscsi_cid;
iscsi_cid = new_cqe_kcqe->iscsi_conn_id;
- conn = bnx2i_get_conn_from_id(hba, iscsi_cid);
+ bnx2i_conn = bnx2i_get_conn_from_id(hba, iscsi_cid);
- if (!conn) {
+ if (!bnx2i_conn) {
printk(KERN_ALERT "cid #%x not valid\n", iscsi_cid);
return;
}
- if (!conn->ep) {
+ if (!bnx2i_conn->ep) {
printk(KERN_ALERT "cid #%x - ep not bound\n", iscsi_cid);
return;
}
-
- bnx2i_process_new_cqes(conn);
+ bnx2i_process_new_cqes(bnx2i_conn);
+ bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE_FP);
+ bnx2i_process_new_cqes(bnx2i_conn);
}
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c
index 1d24a2819736..6adbdc34a9a5 100644
--- a/drivers/scsi/bnx2i/bnx2i_init.c
+++ b/drivers/scsi/bnx2i/bnx2i_init.c
@@ -244,7 +244,7 @@ void bnx2i_stop(void *handle)
wait_event_interruptible_timeout(hba->eh_wait,
(list_empty(&hba->ep_ofld_list) &&
list_empty(&hba->ep_destroy_list)),
- 10 * HZ);
+ 2 * HZ);
/* Wait for all endpoints to be torn down, Chip will be reset once
* control returns to network driver. So it is required to cleanup and
* release all connection resources before returning from this routine.
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index 1809f9ccc4ce..041928b23cb0 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -379,6 +379,7 @@ static struct iscsi_endpoint *bnx2i_alloc_ep(struct bnx2i_hba *hba)
{
struct iscsi_endpoint *ep;
struct bnx2i_endpoint *bnx2i_ep;
+ u32 ec_div;
ep = iscsi_create_endpoint(sizeof(*bnx2i_ep));
if (!ep) {
@@ -393,6 +394,11 @@ static struct iscsi_endpoint *bnx2i_alloc_ep(struct bnx2i_hba *hba)
bnx2i_ep->ep_iscsi_cid = (u16) -1;
bnx2i_ep->hba = hba;
bnx2i_ep->hba_age = hba->age;
+
+ ec_div = event_coal_div;
+ while (ec_div >>= 1)
+ bnx2i_ep->ec_shift += 1;
+
hba->ofld_conns_active++;
init_waitqueue_head(&bnx2i_ep->ofld_wait);
return ep;
@@ -858,7 +864,7 @@ struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic)
mutex_init(&hba->net_dev_lock);
init_waitqueue_head(&hba->eh_wait);
if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) {
- hba->hba_shutdown_tmo = 20 * HZ;
+ hba->hba_shutdown_tmo = 30 * HZ;
hba->conn_teardown_tmo = 20 * HZ;
hba->conn_ctx_destroy_tmo = 6 * HZ;
} else { /* 5706/5708/5709 */
@@ -1208,6 +1214,9 @@ static int bnx2i_task_xmit(struct iscsi_task *task)
struct bnx2i_cmd *cmd = task->dd_data;
struct iscsi_cmd *hdr = (struct iscsi_cmd *) task->hdr;
+ if (bnx2i_conn->ep->num_active_cmds + 1 > hba->max_sqes)
+ return -ENOMEM;
+
/*
* If there is no scsi_cmnd this must be a mgmt task
*/
@@ -2156,7 +2165,7 @@ static struct scsi_host_template bnx2i_host_template = {
.change_queue_depth = iscsi_change_queue_depth,
.can_queue = 1024,
.max_sectors = 127,
- .cmd_per_lun = 32,
+ .cmd_per_lun = 24,
.this_id = -1,
.use_clustering = ENABLE_CLUSTERING,
.sg_tablesize = ISCSI_MAX_BDS_PER_CMD,
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index cc23bd9480b2..155d7b9bdeae 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -137,6 +137,7 @@ static int fcoe_vport_create(struct fc_vport *, bool disabled);
static int fcoe_vport_disable(struct fc_vport *, bool disable);
static void fcoe_set_vport_symbolic_name(struct fc_vport *);
static void fcoe_set_port_id(struct fc_lport *, u32, struct fc_frame *);
+static int fcoe_validate_vport_create(struct fc_vport *);
static struct libfc_function_template fcoe_libfc_fcn_templ = {
.frame_send = fcoe_xmit,
@@ -2351,6 +2352,17 @@ static int fcoe_vport_create(struct fc_vport *vport, bool disabled)
struct fcoe_interface *fcoe = port->priv;
struct net_device *netdev = fcoe->netdev;
struct fc_lport *vn_port;
+ int rc;
+ char buf[32];
+
+ rc = fcoe_validate_vport_create(vport);
+ if (rc) {
+ wwn_to_str(vport->port_name, buf, sizeof(buf));
+ printk(KERN_ERR "fcoe: Failed to create vport, "
+ "WWPN (0x%s) already exists\n",
+ buf);
+ return rc;
+ }
mutex_lock(&fcoe_config_mutex);
vn_port = fcoe_if_create(fcoe, &vport->dev, 1);
@@ -2497,3 +2509,49 @@ static void fcoe_set_port_id(struct fc_lport *lport,
if (fp && fc_frame_payload_op(fp) == ELS_FLOGI)
fcoe_ctlr_recv_flogi(&fcoe->ctlr, lport, fp);
}
+
+/**
+ * fcoe_validate_vport_create() - Validate a vport before creating it
+ * @vport: NPIV port to be created
+ *
+ * This routine is meant to add validation for a vport before creating it
+ * via fcoe_vport_create().
+ * Current validations are:
+ * - WWPN supplied is unique for given lport
+ *
+ *
+*/
+static int fcoe_validate_vport_create(struct fc_vport *vport)
+{
+ struct Scsi_Host *shost = vport_to_shost(vport);
+ struct fc_lport *n_port = shost_priv(shost);
+ struct fc_lport *vn_port;
+ int rc = 0;
+ char buf[32];
+
+ mutex_lock(&n_port->lp_mutex);
+
+ wwn_to_str(vport->port_name, buf, sizeof(buf));
+ /* Check if the wwpn is not same as that of the lport */
+ if (!memcmp(&n_port->wwpn, &vport->port_name, sizeof(u64))) {
+ FCOE_DBG("vport WWPN 0x%s is same as that of the "
+ "base port WWPN\n", buf);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* Check if there is any existing vport with same wwpn */
+ list_for_each_entry(vn_port, &n_port->vports, list) {
+ if (!memcmp(&vn_port->wwpn, &vport->port_name, sizeof(u64))) {
+ FCOE_DBG("vport with given WWPN 0x%s already "
+ "exists\n", buf);
+ rc = -EINVAL;
+ break;
+ }
+ }
+
+out:
+ mutex_unlock(&n_port->lp_mutex);
+
+ return rc;
+}
diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h
index 408a6fd78fb4..c4a93993c0cf 100644
--- a/drivers/scsi/fcoe/fcoe.h
+++ b/drivers/scsi/fcoe/fcoe.h
@@ -99,4 +99,14 @@ static inline struct net_device *fcoe_netdev(const struct fc_lport *lport)
((struct fcoe_port *)lport_priv(lport))->priv)->netdev;
}
+static inline void wwn_to_str(u64 wwn, char *buf, int len)
+{
+ u8 wwpn[8];
+
+ u64_to_wwn(wwn, wwpn);
+ snprintf(buf, len, "%02x%02x%02x%02x%02x%02x%02x%02x",
+ wwpn[0], wwpn[1], wwpn[2], wwpn[3],
+ wwpn[4], wwpn[5], wwpn[6], wwpn[7]);
+}
+
#endif /* _FCOE_H_ */
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index 229e4af5508a..c74c4b8e71ef 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -1173,7 +1173,9 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
struct fc_lport *lport = fip->lp;
struct fc_lport *vn_port = NULL;
u32 desc_mask;
- int is_vn_port = 0;
+ int num_vlink_desc;
+ int reset_phys_port = 0;
+ struct fip_vn_desc **vlink_desc_arr = NULL;
LIBFCOE_FIP_DBG(fip, "Clear Virtual Link received\n");
@@ -1183,70 +1185,73 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
/*
* mask of required descriptors. Validating each one clears its bit.
*/
- desc_mask = BIT(FIP_DT_MAC) | BIT(FIP_DT_NAME) | BIT(FIP_DT_VN_ID);
+ desc_mask = BIT(FIP_DT_MAC) | BIT(FIP_DT_NAME);
rlen = ntohs(fh->fip_dl_len) * FIP_BPW;
desc = (struct fip_desc *)(fh + 1);
+
+ /*
+ * Actually need to subtract 'sizeof(*mp) - sizeof(*wp)' from 'rlen'
+ * before determining max Vx_Port descriptor but a buggy FCF could have
+ * omited either or both MAC Address and Name Identifier descriptors
+ */
+ num_vlink_desc = rlen / sizeof(*vp);
+ if (num_vlink_desc)
+ vlink_desc_arr = kmalloc(sizeof(vp) * num_vlink_desc,
+ GFP_ATOMIC);
+ if (!vlink_desc_arr)
+ return;
+ num_vlink_desc = 0;
+
while (rlen >= sizeof(*desc)) {
dlen = desc->fip_dlen * FIP_BPW;
if (dlen > rlen)
- return;
+ goto err;
/* Drop CVL if there are duplicate critical descriptors */
if ((desc->fip_dtype < 32) &&
+ (desc->fip_dtype != FIP_DT_VN_ID) &&
!(desc_mask & 1U << desc->fip_dtype)) {
LIBFCOE_FIP_DBG(fip, "Duplicate Critical "
"Descriptors in FIP CVL\n");
- return;
+ goto err;
}
switch (desc->fip_dtype) {
case FIP_DT_MAC:
mp = (struct fip_mac_desc *)desc;
if (dlen < sizeof(*mp))
- return;
+ goto err;
if (compare_ether_addr(mp->fd_mac, fcf->fcf_mac))
- return;
+ goto err;
desc_mask &= ~BIT(FIP_DT_MAC);
break;
case FIP_DT_NAME:
wp = (struct fip_wwn_desc *)desc;
if (dlen < sizeof(*wp))
- return;
+ goto err;
if (get_unaligned_be64(&wp->fd_wwn) != fcf->switch_name)
- return;
+ goto err;
desc_mask &= ~BIT(FIP_DT_NAME);
break;
case FIP_DT_VN_ID:
vp = (struct fip_vn_desc *)desc;
if (dlen < sizeof(*vp))
- return;
- if (compare_ether_addr(vp->fd_mac,
- fip->get_src_addr(lport)) == 0 &&
- get_unaligned_be64(&vp->fd_wwpn) == lport->wwpn &&
- ntoh24(vp->fd_fc_id) == lport->port_id) {
- desc_mask &= ~BIT(FIP_DT_VN_ID);
- break;
+ goto err;
+ vlink_desc_arr[num_vlink_desc++] = vp;
+ vn_port = fc_vport_id_lookup(lport,
+ ntoh24(vp->fd_fc_id));
+ if (vn_port && (vn_port == lport)) {
+ mutex_lock(&fip->ctlr_mutex);
+ per_cpu_ptr(lport->dev_stats,
+ get_cpu())->VLinkFailureCount++;
+ put_cpu();
+ fcoe_ctlr_reset(fip);
+ mutex_unlock(&fip->ctlr_mutex);
}
- /* check if clr_vlink is for NPIV port */
- mutex_lock(&lport->lp_mutex);
- list_for_each_entry(vn_port, &lport->vports, list) {
- if (compare_ether_addr(vp->fd_mac,
- fip->get_src_addr(vn_port)) == 0 &&
- (get_unaligned_be64(&vp->fd_wwpn)
- == vn_port->wwpn) &&
- (ntoh24(vp->fd_fc_id) ==
- fc_host_port_id(vn_port->host))) {
- desc_mask &= ~BIT(FIP_DT_VN_ID);
- is_vn_port = 1;
- break;
- }
- }
- mutex_unlock(&lport->lp_mutex);
-
break;
default:
/* standard says ignore unknown descriptors >= 128 */
if (desc->fip_dtype < FIP_DT_VENDOR_BASE)
- return;
+ goto err;
break;
}
desc = (struct fip_desc *)((char *)desc + dlen);
@@ -1256,26 +1261,68 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
/*
* reset only if all required descriptors were present and valid.
*/
- if (desc_mask) {
+ if (desc_mask)
LIBFCOE_FIP_DBG(fip, "missing descriptors mask %x\n",
desc_mask);
+ else if (!num_vlink_desc) {
+ LIBFCOE_FIP_DBG(fip, "CVL: no Vx_Port descriptor found\n");
+ /*
+ * No Vx_Port description. Clear all NPIV ports,
+ * followed by physical port
+ */
+ mutex_lock(&lport->lp_mutex);
+ list_for_each_entry(vn_port, &lport->vports, list)
+ fc_lport_reset(vn_port);
+ mutex_unlock(&lport->lp_mutex);
+
+ mutex_lock(&fip->ctlr_mutex);
+ per_cpu_ptr(lport->dev_stats,
+ get_cpu())->VLinkFailureCount++;
+ put_cpu();
+ fcoe_ctlr_reset(fip);
+ mutex_unlock(&fip->ctlr_mutex);
+
+ fc_lport_reset(fip->lp);
+ fcoe_ctlr_solicit(fip, NULL);
} else {
- LIBFCOE_FIP_DBG(fip, "performing Clear Virtual Link\n");
+ int i;
- if (is_vn_port)
- fc_lport_reset(vn_port);
- else {
- mutex_lock(&fip->ctlr_mutex);
- per_cpu_ptr(lport->dev_stats,
- get_cpu())->VLinkFailureCount++;
- put_cpu();
- fcoe_ctlr_reset(fip);
- mutex_unlock(&fip->ctlr_mutex);
+ LIBFCOE_FIP_DBG(fip, "performing Clear Virtual Link\n");
+ for (i = 0; i < num_vlink_desc; i++) {
+ vp = vlink_desc_arr[i];
+ vn_port = fc_vport_id_lookup(lport,
+ ntoh24(vp->fd_fc_id));
+ if (!vn_port)
+ continue;
+
+ /*
+ * 'port_id' is already validated, check MAC address and
+ * wwpn
+ */
+ if (compare_ether_addr(fip->get_src_addr(vn_port),
+ vp->fd_mac) != 0 ||
+ get_unaligned_be64(&vp->fd_wwpn) !=
+ vn_port->wwpn)
+ continue;
+
+ if (vn_port == lport)
+ /*
+ * Physical port, defer processing till all
+ * listed NPIV ports are cleared
+ */
+ reset_phys_port = 1;
+ else /* NPIV port */
+ fc_lport_reset(vn_port);
+ }
+ if (reset_phys_port) {
fc_lport_reset(fip->lp);
fcoe_ctlr_solicit(fip, NULL);
}
}
+
+err:
+ kfree(vlink_desc_arr);
}
/**
diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c
index f81f77c8569e..41068e8748e7 100644
--- a/drivers/scsi/fcoe/fcoe_transport.c
+++ b/drivers/scsi/fcoe/fcoe_transport.c
@@ -544,16 +544,6 @@ static int fcoe_transport_create(const char *buffer, struct kernel_param *kp)
struct fcoe_transport *ft = NULL;
enum fip_state fip_mode = (enum fip_state)(long)kp->arg;
-#ifdef CONFIG_LIBFCOE_MODULE
- /*
- * Make sure the module has been initialized, and is not about to be
- * removed. Module parameter sysfs files are writable before the
- * module_init function is called and after module_exit.
- */
- if (THIS_MODULE->state != MODULE_STATE_LIVE)
- goto out_nodev;
-#endif
-
mutex_lock(&ft_mutex);
netdev = fcoe_if_to_netdev(buffer);
@@ -618,16 +608,6 @@ static int fcoe_transport_destroy(const char *buffer, struct kernel_param *kp)
struct net_device *netdev = NULL;
struct fcoe_transport *ft = NULL;
-#ifdef CONFIG_LIBFCOE_MODULE
- /*
- * Make sure the module has been initialized, and is not about to be
- * removed. Module parameter sysfs files are writable before the
- * module_init function is called and after module_exit.
- */
- if (THIS_MODULE->state != MODULE_STATE_LIVE)
- goto out_nodev;
-#endif
-
mutex_lock(&ft_mutex);
netdev = fcoe_if_to_netdev(buffer);
@@ -672,16 +652,6 @@ static int fcoe_transport_disable(const char *buffer, struct kernel_param *kp)
struct net_device *netdev = NULL;
struct fcoe_transport *ft = NULL;
-#ifdef CONFIG_LIBFCOE_MODULE
- /*
- * Make sure the module has been initialized, and is not about to be
- * removed. Module parameter sysfs files are writable before the
- * module_init function is called and after module_exit.
- */
- if (THIS_MODULE->state != MODULE_STATE_LIVE)
- goto out_nodev;
-#endif
-
mutex_lock(&ft_mutex);
netdev = fcoe_if_to_netdev(buffer);
@@ -720,16 +690,6 @@ static int fcoe_transport_enable(const char *buffer, struct kernel_param *kp)
struct net_device *netdev = NULL;
struct fcoe_transport *ft = NULL;
-#ifdef CONFIG_LIBFCOE_MODULE
- /*
- * Make sure the module has been initialized, and is not about to be
- * removed. Module parameter sysfs files are writable before the
- * module_init function is called and after module_exit.
- */
- if (THIS_MODULE->state != MODULE_STATE_LIVE)
- goto out_nodev;
-#endif
-
mutex_lock(&ft_mutex);
netdev = fcoe_if_to_netdev(buffer);
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 12868ca46110..888086c4e709 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -5149,21 +5149,21 @@ static irqreturn_t ipr_isr(int irq, void *devp)
if (ipr_cmd != NULL) {
/* Clear the PCI interrupt */
+ num_hrrq = 0;
do {
writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
} while (int_reg & IPR_PCII_HRRQ_UPDATED &&
num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
- if (int_reg & IPR_PCII_HRRQ_UPDATED) {
- ipr_isr_eh(ioa_cfg, "Error clearing HRRQ");
- spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
- return IRQ_HANDLED;
- }
-
} else if (rc == IRQ_NONE && irq_none == 0) {
int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
irq_none++;
+ } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
+ int_reg & IPR_PCII_HRRQ_UPDATED) {
+ ipr_isr_eh(ioa_cfg, "Error clearing HRRQ");
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ return IRQ_HANDLED;
} else
break;
}
diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
index 911b2736cafa..b9cb8140b398 100644
--- a/drivers/scsi/libfc/fc_disc.c
+++ b/drivers/scsi/libfc/fc_disc.c
@@ -205,6 +205,7 @@ static void fc_disc_recv_req(struct fc_lport *lport, struct fc_frame *fp)
default:
FC_DISC_DBG(disc, "Received an unsupported request, "
"the opcode is (%x)\n", op);
+ fc_frame_free(fp);
break;
}
}
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index 77035a746f60..3b8a6451ea28 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -1434,6 +1434,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
(f_ctl & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) ==
(FC_FC_LAST_SEQ | FC_FC_END_SEQ)) {
spin_lock_bh(&ep->ex_lock);
+ resp = ep->resp;
rc = fc_exch_done_locked(ep);
WARN_ON(fc_seq_exch(sp) != ep);
spin_unlock_bh(&ep->ex_lock);
@@ -1978,6 +1979,7 @@ static struct fc_seq *fc_exch_seq_send(struct fc_lport *lport,
spin_unlock_bh(&ep->ex_lock);
return sp;
err:
+ fc_fcp_ddp_done(fr_fsp(fp));
rc = fc_exch_done_locked(ep);
spin_unlock_bh(&ep->ex_lock);
if (!rc)
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 2a3a4720a771..9cd2149519ac 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -312,7 +312,7 @@ void fc_fcp_ddp_setup(struct fc_fcp_pkt *fsp, u16 xid)
* DDP related resources for a fcp_pkt
* @fsp: The FCP packet that DDP had been used on
*/
-static void fc_fcp_ddp_done(struct fc_fcp_pkt *fsp)
+void fc_fcp_ddp_done(struct fc_fcp_pkt *fsp)
{
struct fc_lport *lport;
@@ -681,8 +681,7 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
error = lport->tt.seq_send(lport, seq, fp);
if (error) {
WARN_ON(1); /* send error should be rare */
- fc_fcp_retry_cmd(fsp);
- return 0;
+ return error;
}
fp = NULL;
}
@@ -1673,7 +1672,8 @@ static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset)
FC_FCTL_REQ, 0);
rec_tov = get_fsp_rec_tov(fsp);
- seq = lport->tt.exch_seq_send(lport, fp, fc_fcp_srr_resp, NULL,
+ seq = lport->tt.exch_seq_send(lport, fp, fc_fcp_srr_resp,
+ fc_fcp_pkt_destroy,
fsp, jiffies_to_msecs(rec_tov));
if (!seq)
goto retry;
@@ -1720,7 +1720,6 @@ static void fc_fcp_srr_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
return;
}
- fsp->recov_seq = NULL;
switch (fc_frame_payload_op(fp)) {
case ELS_LS_ACC:
fsp->recov_retry = 0;
@@ -1732,10 +1731,9 @@ static void fc_fcp_srr_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
break;
}
fc_fcp_unlock_pkt(fsp);
- fsp->lp->tt.exch_done(seq);
out:
+ fsp->lp->tt.exch_done(seq);
fc_frame_free(fp);
- fc_fcp_pkt_release(fsp); /* drop hold for outstanding SRR */
}
/**
@@ -1747,8 +1745,6 @@ static void fc_fcp_srr_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
{
if (fc_fcp_lock_pkt(fsp))
goto out;
- fsp->lp->tt.exch_done(fsp->recov_seq);
- fsp->recov_seq = NULL;
switch (PTR_ERR(fp)) {
case -FC_EX_TIMEOUT:
if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
@@ -1764,7 +1760,7 @@ static void fc_fcp_srr_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
}
fc_fcp_unlock_pkt(fsp);
out:
- fc_fcp_pkt_release(fsp); /* drop hold for outstanding SRR */
+ fsp->lp->tt.exch_done(fsp->recov_seq);
}
/**
diff --git a/drivers/scsi/libfc/fc_libfc.h b/drivers/scsi/libfc/fc_libfc.h
index fedc819d70c0..c7d071289af5 100644
--- a/drivers/scsi/libfc/fc_libfc.h
+++ b/drivers/scsi/libfc/fc_libfc.h
@@ -108,6 +108,7 @@ extern struct fc4_prov fc_rport_fcp_init; /* FCP initiator provider */
* Set up direct-data placement for this I/O request
*/
void fc_fcp_ddp_setup(struct fc_fcp_pkt *fsp, u16 xid);
+void fc_fcp_ddp_done(struct fc_fcp_pkt *fsp);
/*
* Module setup functions
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 31fc21f4d831..db9238f2ecb8 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -99,19 +99,29 @@ static void sas_ata_task_done(struct sas_task *task)
struct sas_ha_struct *sas_ha;
enum ata_completion_errors ac;
unsigned long flags;
+ struct ata_link *link;
if (!qc)
goto qc_already_gone;
dev = qc->ap->private_data;
sas_ha = dev->port->ha;
+ link = &dev->sata_dev.ap->link;
spin_lock_irqsave(dev->sata_dev.ap->lock, flags);
if (stat->stat == SAS_PROTO_RESPONSE || stat->stat == SAM_STAT_GOOD ||
((stat->stat == SAM_STAT_CHECK_CONDITION &&
dev->sata_dev.command_set == ATAPI_COMMAND_SET))) {
ata_tf_from_fis(resp->ending_fis, &dev->sata_dev.tf);
- qc->err_mask |= ac_err_mask(dev->sata_dev.tf.command);
+
+ if (!link->sactive) {
+ qc->err_mask |= ac_err_mask(dev->sata_dev.tf.command);
+ } else {
+ link->eh_info.err_mask |= ac_err_mask(dev->sata_dev.tf.command);
+ if (unlikely(link->eh_info.err_mask))
+ qc->flags |= ATA_QCFLAG_FAILED;
+ }
+
dev->sata_dev.sstatus = resp->sstatus;
dev->sata_dev.serror = resp->serror;
dev->sata_dev.scontrol = resp->scontrol;
@@ -121,7 +131,13 @@ static void sas_ata_task_done(struct sas_task *task)
SAS_DPRINTK("%s: SAS error %x\n", __func__,
stat->stat);
/* We saw a SAS error. Send a vague error. */
- qc->err_mask = ac;
+ if (!link->sactive) {
+ qc->err_mask = ac;
+ } else {
+ link->eh_info.err_mask |= AC_ERR_DEV;
+ qc->flags |= ATA_QCFLAG_FAILED;
+ }
+
dev->sata_dev.tf.feature = 0x04; /* status err */
dev->sata_dev.tf.command = ATA_ERR;
}
@@ -279,6 +295,44 @@ static int sas_ata_hard_reset(struct ata_link *link, unsigned int *class,
return ret;
}
+static int sas_ata_soft_reset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline)
+{
+ struct ata_port *ap = link->ap;
+ struct domain_device *dev = ap->private_data;
+ struct sas_internal *i =
+ to_sas_internal(dev->port->ha->core.shost->transportt);
+ int res = TMF_RESP_FUNC_FAILED;
+ int ret = 0;
+
+ if (i->dft->lldd_ata_soft_reset)
+ res = i->dft->lldd_ata_soft_reset(dev);
+
+ if (res != TMF_RESP_FUNC_COMPLETE) {
+ SAS_DPRINTK("%s: Unable to soft reset\n", __func__);
+ ret = -EAGAIN;
+ }
+
+ switch (dev->sata_dev.command_set) {
+ case ATA_COMMAND_SET:
+ SAS_DPRINTK("%s: Found ATA device.\n", __func__);
+ *class = ATA_DEV_ATA;
+ break;
+ case ATAPI_COMMAND_SET:
+ SAS_DPRINTK("%s: Found ATAPI device.\n", __func__);
+ *class = ATA_DEV_ATAPI;
+ break;
+ default:
+ SAS_DPRINTK("%s: Unknown SATA command set: %d.\n",
+ __func__, dev->sata_dev.command_set);
+ *class = ATA_DEV_UNKNOWN;
+ break;
+ }
+
+ ap->cbl = ATA_CBL_SATA;
+ return ret;
+}
+
static void sas_ata_post_internal(struct ata_queued_cmd *qc)
{
if (qc->flags & ATA_QCFLAG_FAILED)
@@ -309,7 +363,7 @@ static void sas_ata_post_internal(struct ata_queued_cmd *qc)
static struct ata_port_operations sas_sata_ops = {
.prereset = ata_std_prereset,
- .softreset = NULL,
+ .softreset = sas_ata_soft_reset,
.hardreset = sas_ata_hard_reset,
.postreset = ata_std_postreset,
.error_handler = ata_std_error_handler,
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
index 8b538bd1ff2b..14e21b5fb8ba 100644
--- a/drivers/scsi/libsas/sas_internal.h
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -57,7 +57,7 @@ int sas_init_queue(struct sas_ha_struct *sas_ha);
int sas_init_events(struct sas_ha_struct *sas_ha);
void sas_shutdown_queue(struct sas_ha_struct *sas_ha);
-void sas_deform_port(struct asd_sas_phy *phy);
+void sas_deform_port(struct asd_sas_phy *phy, int gone);
void sas_porte_bytes_dmaed(struct work_struct *work);
void sas_porte_broadcast_rcvd(struct work_struct *work);
diff --git a/drivers/scsi/libsas/sas_phy.c b/drivers/scsi/libsas/sas_phy.c
index b459c4b635b1..e0f5018e9071 100644
--- a/drivers/scsi/libsas/sas_phy.c
+++ b/drivers/scsi/libsas/sas_phy.c
@@ -39,7 +39,7 @@ static void sas_phye_loss_of_signal(struct work_struct *work)
sas_begin_event(PHYE_LOSS_OF_SIGNAL, &phy->ha->event_lock,
&phy->phy_events_pending);
phy->error = 0;
- sas_deform_port(phy);
+ sas_deform_port(phy, 1);
}
static void sas_phye_oob_done(struct work_struct *work)
@@ -66,7 +66,7 @@ static void sas_phye_oob_error(struct work_struct *work)
sas_begin_event(PHYE_OOB_ERROR, &phy->ha->event_lock,
&phy->phy_events_pending);
- sas_deform_port(phy);
+ sas_deform_port(phy, 1);
if (!port && phy->enabled && i->dft->lldd_control_phy) {
phy->error++;
diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c
index 5257fdfe699a..42fd1f25b664 100644
--- a/drivers/scsi/libsas/sas_port.c
+++ b/drivers/scsi/libsas/sas_port.c
@@ -57,7 +57,7 @@ static void sas_form_port(struct asd_sas_phy *phy)
if (port) {
if (!phy_is_wideport_member(port, phy))
- sas_deform_port(phy);
+ sas_deform_port(phy, 0);
else {
SAS_DPRINTK("%s: phy%d belongs to port%d already(%d)!\n",
__func__, phy->id, phy->port->id,
@@ -153,28 +153,31 @@ static void sas_form_port(struct asd_sas_phy *phy)
* This is called when the physical link to the other phy has been
* lost (on this phy), in Event thread context. We cannot delay here.
*/
-void sas_deform_port(struct asd_sas_phy *phy)
+void sas_deform_port(struct asd_sas_phy *phy, int gone)
{
struct sas_ha_struct *sas_ha = phy->ha;
struct asd_sas_port *port = phy->port;
struct sas_internal *si =
to_sas_internal(sas_ha->core.shost->transportt);
+ struct domain_device *dev;
unsigned long flags;
if (!port)
return; /* done by a phy event */
- if (port->port_dev)
- port->port_dev->pathways--;
+ dev = port->port_dev;
+ if (dev)
+ dev->pathways--;
if (port->num_phys == 1) {
+ if (dev && gone)
+ dev->gone = 1;
sas_unregister_domain_devices(port);
sas_port_delete(port->port);
port->port = NULL;
} else
sas_port_delete_phy(port->port, phy->phy);
-
if (si->dft->lldd_port_deformed)
si->dft->lldd_port_deformed(phy);
@@ -244,7 +247,7 @@ void sas_porte_link_reset_err(struct work_struct *work)
sas_begin_event(PORTE_LINK_RESET_ERR, &phy->ha->event_lock,
&phy->port_events_pending);
- sas_deform_port(phy);
+ sas_deform_port(phy, 1);
}
void sas_porte_timer_event(struct work_struct *work)
@@ -256,7 +259,7 @@ void sas_porte_timer_event(struct work_struct *work)
sas_begin_event(PORTE_TIMER_EVENT, &phy->ha->event_lock,
&phy->port_events_pending);
- sas_deform_port(phy);
+ sas_deform_port(phy, 1);
}
void sas_porte_hard_reset(struct work_struct *work)
@@ -268,7 +271,7 @@ void sas_porte_hard_reset(struct work_struct *work)
sas_begin_event(PORTE_HARD_RESET, &phy->ha->event_lock,
&phy->port_events_pending);
- sas_deform_port(phy);
+ sas_deform_port(phy, 1);
}
/* ---------- SAS port registration ---------- */
@@ -306,6 +309,6 @@ void sas_unregister_ports(struct sas_ha_struct *sas_ha)
for (i = 0; i < sas_ha->num_phys; i++)
if (sas_ha->sas_phy[i]->port)
- sas_deform_port(sas_ha->sas_phy[i]);
+ sas_deform_port(sas_ha->sas_phy[i], 0);
}
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index f6e189f40917..eeba76cdf774 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -207,6 +207,13 @@ static int sas_queuecommand_lck(struct scsi_cmnd *cmd,
struct sas_ha_struct *sas_ha = dev->port->ha;
struct sas_task *task;
+ /* If the device fell off, no sense in issuing commands */
+ if (dev->gone) {
+ cmd->result = DID_BAD_TARGET << 16;
+ scsi_done(cmd);
+ goto out;
+ }
+
if (dev_is_sata(dev)) {
unsigned long flags;
@@ -216,13 +223,6 @@ static int sas_queuecommand_lck(struct scsi_cmnd *cmd,
goto out;
}
- /* If the device fell off, no sense in issuing commands */
- if (dev->gone) {
- cmd->result = DID_BAD_TARGET << 16;
- scsi_done(cmd);
- goto out;
- }
-
res = -ENOMEM;
task = sas_create_task(cmd, dev, GFP_ATOMIC);
if (!task)
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 02d53d89534f..8ec2c86a49d4 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -41,6 +41,7 @@ struct lpfc_sli2_slim;
downloads using bsg */
#define LPFC_DEFAULT_PROT_SG_SEG_CNT 4096 /* sg protection elements count */
#define LPFC_MAX_SG_SEG_CNT 4096 /* sg element count per scsi cmnd */
+#define LPFC_MAX_SGE_SIZE 0x80000000 /* Maximum data allowed in a SGE */
#define LPFC_MAX_PROT_SG_SEG_CNT 4096 /* prot sg element count per scsi cmd*/
#define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */
#define LPFC_Q_RAMP_UP_INTERVAL 120 /* lun q_depth ramp up interval */
@@ -486,6 +487,42 @@ struct unsol_rcv_ct_ctx {
(1 << LPFC_USER_LINK_SPEED_AUTO))
#define LPFC_LINK_SPEED_STRING "0, 1, 2, 4, 8, 10, 16"
+enum nemb_type {
+ nemb_mse = 1,
+ nemb_hbd
+};
+
+enum mbox_type {
+ mbox_rd = 1,
+ mbox_wr
+};
+
+enum dma_type {
+ dma_mbox = 1,
+ dma_ebuf
+};
+
+enum sta_type {
+ sta_pre_addr = 1,
+ sta_pos_addr
+};
+
+struct lpfc_mbox_ext_buf_ctx {
+ uint32_t state;
+#define LPFC_BSG_MBOX_IDLE 0
+#define LPFC_BSG_MBOX_HOST 1
+#define LPFC_BSG_MBOX_PORT 2
+#define LPFC_BSG_MBOX_DONE 3
+#define LPFC_BSG_MBOX_ABTS 4
+ enum nemb_type nembType;
+ enum mbox_type mboxType;
+ uint32_t numBuf;
+ uint32_t mbxTag;
+ uint32_t seqNum;
+ struct lpfc_dmabuf *mbx_dmabuf;
+ struct list_head ext_dmabuf_list;
+};
+
struct lpfc_hba {
/* SCSI interface function jump table entries */
int (*lpfc_new_scsi_buf)
@@ -589,6 +626,7 @@ struct lpfc_hba {
MAILBOX_t *mbox;
uint32_t *mbox_ext;
+ struct lpfc_mbox_ext_buf_ctx mbox_ext_buf_ctx;
uint32_t ha_copy;
struct _PCB *pcb;
struct _IOCB *IOCBs;
@@ -659,6 +697,7 @@ struct lpfc_hba {
uint32_t cfg_hostmem_hgp;
uint32_t cfg_log_verbose;
uint32_t cfg_aer_support;
+ uint32_t cfg_sriov_nr_virtfn;
uint32_t cfg_iocb_cnt;
uint32_t cfg_suppress_link_up;
#define LPFC_INITIALIZE_LINK 0 /* do normal init_link mbox */
@@ -706,7 +745,6 @@ struct lpfc_hba {
uint32_t *hbq_get; /* Host mem address of HBQ get ptrs */
int brd_no; /* FC board number */
-
char SerialNumber[32]; /* adapter Serial Number */
char OptionROMVersion[32]; /* adapter BIOS / Fcode version */
char ModelDesc[256]; /* Model Description */
@@ -778,6 +816,9 @@ struct lpfc_hba {
uint16_t vpi_base;
uint16_t vfi_base;
unsigned long *vpi_bmask; /* vpi allocation table */
+ uint16_t *vpi_ids;
+ uint16_t vpi_count;
+ struct list_head lpfc_vpi_blk_list;
/* Data structure used by fabric iocb scheduler */
struct list_head fabric_iocb_list;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 8dcbf8fff673..135a53baa735 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -755,6 +755,73 @@ lpfc_issue_reset(struct device *dev, struct device_attribute *attr,
}
/**
+ * lpfc_sli4_pdev_reg_request - Request physical dev to perform a register acc
+ * @phba: lpfc_hba pointer.
+ *
+ * Description:
+ * Request SLI4 interface type-2 device to perform a physical register set
+ * access.
+ *
+ * Returns:
+ * zero for success
+ **/
+static ssize_t
+lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
+{
+ struct completion online_compl;
+ uint32_t reg_val;
+ int status = 0;
+ int rc;
+
+ if (!phba->cfg_enable_hba_reset)
+ return -EIO;
+
+ if ((phba->sli_rev < LPFC_SLI_REV4) ||
+ (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+ LPFC_SLI_INTF_IF_TYPE_2))
+ return -EPERM;
+
+ status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
+
+ if (status != 0)
+ return status;
+
+ /* wait for the device to be quiesced before firmware reset */
+ msleep(100);
+
+ reg_val = readl(phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_CTL_PDEV_CTL_OFFSET);
+
+ if (opcode == LPFC_FW_DUMP)
+ reg_val |= LPFC_FW_DUMP_REQUEST;
+ else if (opcode == LPFC_FW_RESET)
+ reg_val |= LPFC_CTL_PDEV_CTL_FRST;
+ else if (opcode == LPFC_DV_RESET)
+ reg_val |= LPFC_CTL_PDEV_CTL_DRST;
+
+ writel(reg_val, phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_CTL_PDEV_CTL_OFFSET);
+ /* flush */
+ readl(phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
+
+ /* delay driver action following IF_TYPE_2 reset */
+ msleep(100);
+
+ init_completion(&online_compl);
+ rc = lpfc_workq_post_event(phba, &status, &online_compl,
+ LPFC_EVT_ONLINE);
+ if (rc == 0)
+ return -ENOMEM;
+
+ wait_for_completion(&online_compl);
+
+ if (status != 0)
+ return -EIO;
+
+ return 0;
+}
+
+/**
* lpfc_nport_evt_cnt_show - Return the number of nport events
* @dev: class device that is converted into a Scsi_host.
* @attr: device attribute, not used.
@@ -848,6 +915,12 @@ lpfc_board_mode_store(struct device *dev, struct device_attribute *attr,
return -EINVAL;
else
status = lpfc_do_offline(phba, LPFC_EVT_KILL);
+ else if (strncmp(buf, "dump", sizeof("dump") - 1) == 0)
+ status = lpfc_sli4_pdev_reg_request(phba, LPFC_FW_DUMP);
+ else if (strncmp(buf, "fw_reset", sizeof("fw_reset") - 1) == 0)
+ status = lpfc_sli4_pdev_reg_request(phba, LPFC_FW_RESET);
+ else if (strncmp(buf, "dv_reset", sizeof("dv_reset") - 1) == 0)
+ status = lpfc_sli4_pdev_reg_request(phba, LPFC_DV_RESET);
else
return -EINVAL;
@@ -1322,6 +1395,102 @@ lpfc_dss_show(struct device *dev, struct device_attribute *attr,
}
/**
+ * lpfc_sriov_hw_max_virtfn_show - Return maximum number of virtual functions
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the formatted support level.
+ *
+ * Description:
+ * Returns the maximum number of virtual functions a physical function can
+ * support, 0 will be returned if called on virtual function.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_sriov_hw_max_virtfn_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ struct pci_dev *pdev = phba->pcidev;
+ union lpfc_sli4_cfg_shdr *shdr;
+ uint32_t shdr_status, shdr_add_status;
+ LPFC_MBOXQ_t *mboxq;
+ struct lpfc_mbx_get_prof_cfg *get_prof_cfg;
+ struct lpfc_rsrc_desc_pcie *desc;
+ uint32_t max_nr_virtfn;
+ uint32_t desc_count;
+ int length, rc, i;
+
+ if ((phba->sli_rev < LPFC_SLI_REV4) ||
+ (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+ LPFC_SLI_INTF_IF_TYPE_2))
+ return -EPERM;
+
+ if (!pdev->is_physfn)
+ return snprintf(buf, PAGE_SIZE, "%d\n", 0);
+
+ mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mboxq)
+ return -ENOMEM;
+
+ /* get the maximum number of virtfn support by physfn */
+ length = (sizeof(struct lpfc_mbx_get_prof_cfg) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_GET_PROFILE_CONFIG,
+ length, LPFC_SLI4_MBX_EMBED);
+ shdr = (union lpfc_sli4_cfg_shdr *)
+ &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
+ bf_set(lpfc_mbox_hdr_pf_num, &shdr->request,
+ phba->sli4_hba.iov.pf_number + 1);
+
+ get_prof_cfg = &mboxq->u.mqe.un.get_prof_cfg;
+ bf_set(lpfc_mbx_get_prof_cfg_prof_tp, &get_prof_cfg->u.request,
+ LPFC_CFG_TYPE_CURRENT_ACTIVE);
+
+ rc = lpfc_sli_issue_mbox_wait(phba, mboxq,
+ lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG));
+
+ if (rc != MBX_TIMEOUT) {
+ /* check return status */
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
+ &shdr->response);
+ if (shdr_status || shdr_add_status || rc)
+ goto error_out;
+
+ } else
+ goto error_out;
+
+ desc_count = get_prof_cfg->u.response.prof_cfg.rsrc_desc_count;
+
+ for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
+ desc = (struct lpfc_rsrc_desc_pcie *)
+ &get_prof_cfg->u.response.prof_cfg.desc[i];
+ if (LPFC_RSRC_DESC_TYPE_PCIE ==
+ bf_get(lpfc_rsrc_desc_pcie_type, desc)) {
+ max_nr_virtfn = bf_get(lpfc_rsrc_desc_pcie_nr_virtfn,
+ desc);
+ break;
+ }
+ }
+
+ if (i < LPFC_RSRC_DESC_MAX_NUM) {
+ if (rc != MBX_TIMEOUT)
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ return snprintf(buf, PAGE_SIZE, "%d\n", max_nr_virtfn);
+ }
+
+error_out:
+ if (rc != MBX_TIMEOUT)
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ return -EIO;
+}
+
+/**
* lpfc_param_show - Return a cfg attribute value in decimal
*
* Description:
@@ -1762,6 +1931,8 @@ static DEVICE_ATTR(lpfc_temp_sensor, S_IRUGO, lpfc_temp_sensor_show, NULL);
static DEVICE_ATTR(lpfc_fips_level, S_IRUGO, lpfc_fips_level_show, NULL);
static DEVICE_ATTR(lpfc_fips_rev, S_IRUGO, lpfc_fips_rev_show, NULL);
static DEVICE_ATTR(lpfc_dss, S_IRUGO, lpfc_dss_show, NULL);
+static DEVICE_ATTR(lpfc_sriov_hw_max_virtfn, S_IRUGO,
+ lpfc_sriov_hw_max_virtfn_show, NULL);
static char *lpfc_soft_wwn_key = "C99G71SL8032A";
@@ -3014,7 +3185,7 @@ static DEVICE_ATTR(lpfc_link_speed, S_IRUGO | S_IWUSR,
*
* @dev: class device that is converted into a Scsi_host.
* @attr: device attribute, not used.
- * @buf: containing the string "selective".
+ * @buf: containing enable or disable aer flag.
* @count: unused variable.
*
* Description:
@@ -3098,7 +3269,7 @@ lpfc_param_show(aer_support)
/**
* lpfc_aer_support_init - Set the initial adapters aer support flag
* @phba: lpfc_hba pointer.
- * @val: link speed value.
+ * @val: enable aer or disable aer flag.
*
* Description:
* If val is in a valid range [0,1], then set the adapter's initial
@@ -3137,7 +3308,7 @@ static DEVICE_ATTR(lpfc_aer_support, S_IRUGO | S_IWUSR,
* lpfc_aer_cleanup_state - Clean up aer state to the aer enabled device
* @dev: class device that is converted into a Scsi_host.
* @attr: device attribute, not used.
- * @buf: containing the string "selective".
+ * @buf: containing flag 1 for aer cleanup state.
* @count: unused variable.
*
* Description:
@@ -3180,6 +3351,136 @@ lpfc_aer_cleanup_state(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(lpfc_aer_state_cleanup, S_IWUSR, NULL,
lpfc_aer_cleanup_state);
+/**
+ * lpfc_sriov_nr_virtfn_store - Enable the adapter for sr-iov virtual functions
+ *
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: containing the string the number of vfs to be enabled.
+ * @count: unused variable.
+ *
+ * Description:
+ * When this api is called either through user sysfs, the driver shall
+ * try to enable or disable SR-IOV virtual functions according to the
+ * following:
+ *
+ * If zero virtual function has been enabled to the physical function,
+ * the driver shall invoke the pci enable virtual function api trying
+ * to enable the virtual functions. If the nr_vfn provided is greater
+ * than the maximum supported, the maximum virtual function number will
+ * be used for invoking the api; otherwise, the nr_vfn provided shall
+ * be used for invoking the api. If the api call returned success, the
+ * actual number of virtual functions enabled will be set to the driver
+ * cfg_sriov_nr_virtfn; otherwise, -EINVAL shall be returned and driver
+ * cfg_sriov_nr_virtfn remains zero.
+ *
+ * If none-zero virtual functions have already been enabled to the
+ * physical function, as reflected by the driver's cfg_sriov_nr_virtfn,
+ * -EINVAL will be returned and the driver does nothing;
+ *
+ * If the nr_vfn provided is zero and none-zero virtual functions have
+ * been enabled, as indicated by the driver's cfg_sriov_nr_virtfn, the
+ * disabling virtual function api shall be invoded to disable all the
+ * virtual functions and driver's cfg_sriov_nr_virtfn shall be set to
+ * zero. Otherwise, if zero virtual function has been enabled, do
+ * nothing.
+ *
+ * Returns:
+ * length of the buf on success if val is in range the intended mode
+ * is supported.
+ * -EINVAL if val out of range or intended mode is not supported.
+ **/
+static ssize_t
+lpfc_sriov_nr_virtfn_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ struct pci_dev *pdev = phba->pcidev;
+ int val = 0, rc = -EINVAL;
+
+ /* Sanity check on user data */
+ if (!isdigit(buf[0]))
+ return -EINVAL;
+ if (sscanf(buf, "%i", &val) != 1)
+ return -EINVAL;
+ if (val < 0)
+ return -EINVAL;
+
+ /* Request disabling virtual functions */
+ if (val == 0) {
+ if (phba->cfg_sriov_nr_virtfn > 0) {
+ pci_disable_sriov(pdev);
+ phba->cfg_sriov_nr_virtfn = 0;
+ }
+ return strlen(buf);
+ }
+
+ /* Request enabling virtual functions */
+ if (phba->cfg_sriov_nr_virtfn > 0) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3018 There are %d virtual functions "
+ "enabled on physical function.\n",
+ phba->cfg_sriov_nr_virtfn);
+ return -EEXIST;
+ }
+
+ if (val <= LPFC_MAX_VFN_PER_PFN)
+ phba->cfg_sriov_nr_virtfn = val;
+ else {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3019 Enabling %d virtual functions is not "
+ "allowed.\n", val);
+ return -EINVAL;
+ }
+
+ rc = lpfc_sli_probe_sriov_nr_virtfn(phba, phba->cfg_sriov_nr_virtfn);
+ if (rc) {
+ phba->cfg_sriov_nr_virtfn = 0;
+ rc = -EPERM;
+ } else
+ rc = strlen(buf);
+
+ return rc;
+}
+
+static int lpfc_sriov_nr_virtfn = LPFC_DEF_VFN_PER_PFN;
+module_param(lpfc_sriov_nr_virtfn, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(lpfc_sriov_nr_virtfn, "Enable PCIe device SR-IOV virtual fn");
+lpfc_param_show(sriov_nr_virtfn)
+
+/**
+ * lpfc_sriov_nr_virtfn_init - Set the initial sr-iov virtual function enable
+ * @phba: lpfc_hba pointer.
+ * @val: link speed value.
+ *
+ * Description:
+ * If val is in a valid range [0,255], then set the adapter's initial
+ * cfg_sriov_nr_virtfn field. If it's greater than the maximum, the maximum
+ * number shall be used instead. It will be up to the driver's probe_one
+ * routine to determine whether the device's SR-IOV is supported or not.
+ *
+ * Returns:
+ * zero if val saved.
+ * -EINVAL val out of range
+ **/
+static int
+lpfc_sriov_nr_virtfn_init(struct lpfc_hba *phba, int val)
+{
+ if (val >= 0 && val <= LPFC_MAX_VFN_PER_PFN) {
+ phba->cfg_sriov_nr_virtfn = val;
+ return 0;
+ }
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3017 Enabling %d virtual functions is not "
+ "allowed.\n", val);
+ return -EINVAL;
+}
+static DEVICE_ATTR(lpfc_sriov_nr_virtfn, S_IRUGO | S_IWUSR,
+ lpfc_sriov_nr_virtfn_show, lpfc_sriov_nr_virtfn_store);
+
/*
# lpfc_fcp_class: Determines FC class to use for the FCP protocol.
# Value range is [2,3]. Default value is 3.
@@ -3497,6 +3798,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_prot_sg_seg_cnt,
&dev_attr_lpfc_aer_support,
&dev_attr_lpfc_aer_state_cleanup,
+ &dev_attr_lpfc_sriov_nr_virtfn,
&dev_attr_lpfc_suppress_link_up,
&dev_attr_lpfc_iocb_cnt,
&dev_attr_iocb_hw,
@@ -3505,6 +3807,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_fips_level,
&dev_attr_lpfc_fips_rev,
&dev_attr_lpfc_dss,
+ &dev_attr_lpfc_sriov_hw_max_virtfn,
NULL,
};
@@ -3961,7 +4264,7 @@ static struct bin_attribute sysfs_mbox_attr = {
.name = "mbox",
.mode = S_IRUSR | S_IWUSR,
},
- .size = MAILBOX_CMD_SIZE,
+ .size = MAILBOX_SYSFS_MAX,
.read = sysfs_mbox_read,
.write = sysfs_mbox_write,
};
@@ -4705,6 +5008,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth);
lpfc_hba_log_verbose_init(phba, lpfc_log_verbose);
lpfc_aer_support_init(phba, lpfc_aer_support);
+ lpfc_sriov_nr_virtfn_init(phba, lpfc_sriov_nr_virtfn);
lpfc_suppress_link_up_init(phba, lpfc_suppress_link_up);
lpfc_iocb_cnt_init(phba, lpfc_iocb_cnt);
phba->cfg_enable_dss = 1;
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 853e5042f39c..7fb0ba4cbfa7 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -23,6 +23,7 @@
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/delay.h>
+#include <linux/list.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
@@ -79,8 +80,7 @@ struct lpfc_bsg_iocb {
struct lpfc_bsg_mbox {
LPFC_MBOXQ_t *pmboxq;
MAILBOX_t *mb;
- struct lpfc_dmabuf *rxbmp; /* for BIU diags */
- struct lpfc_dmabufext *dmp; /* for BIU diags */
+ struct lpfc_dmabuf *dmabuffers; /* for BIU diags */
uint8_t *ext; /* extended mailbox data */
uint32_t mbOffset; /* from app */
uint32_t inExtWLen; /* from app */
@@ -332,6 +332,8 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
cmd->ulpLe = 1;
cmd->ulpClass = CLASS3;
cmd->ulpContext = ndlp->nlp_rpi;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ cmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
cmd->ulpOwner = OWN_CHIP;
cmdiocbq->vport = phba->pport;
cmdiocbq->context3 = bmp;
@@ -1336,6 +1338,10 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
}
icmd->un.ulpWord[3] = ndlp->nlp_rpi;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ icmd->ulpContext =
+ phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
+
/* The exchange is done, mark the entry as invalid */
phba->ct_ctx[tag].flags &= ~UNSOL_VALID;
} else
@@ -1463,11 +1469,91 @@ send_mgmt_rsp_exit:
}
/**
- * lpfc_bsg_diag_mode - process a LPFC_BSG_VENDOR_DIAG_MODE bsg vendor command
+ * lpfc_bsg_diag_mode_enter - process preparing into device diag loopback mode
+ * @phba: Pointer to HBA context object.
* @job: LPFC_BSG_VENDOR_DIAG_MODE
*
- * This function is responsible for placing a port into diagnostic loopback
- * mode in order to perform a diagnostic loopback test.
+ * This function is responsible for preparing driver for diag loopback
+ * on device.
+ */
+static int
+lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba, struct fc_bsg_job *job)
+{
+ struct lpfc_vport **vports;
+ struct Scsi_Host *shost;
+ struct lpfc_sli *psli;
+ struct lpfc_sli_ring *pring;
+ int i = 0;
+
+ psli = &phba->sli;
+ if (!psli)
+ return -ENODEV;
+
+ pring = &psli->ring[LPFC_FCP_RING];
+ if (!pring)
+ return -ENODEV;
+
+ if ((phba->link_state == LPFC_HBA_ERROR) ||
+ (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
+ (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
+ return -EACCES;
+
+ vports = lpfc_create_vport_work_array(phba);
+ if (vports) {
+ for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
+ shost = lpfc_shost_from_vport(vports[i]);
+ scsi_block_requests(shost);
+ }
+ lpfc_destroy_vport_work_array(phba, vports);
+ } else {
+ shost = lpfc_shost_from_vport(phba->pport);
+ scsi_block_requests(shost);
+ }
+
+ while (pring->txcmplq_cnt) {
+ if (i++ > 500) /* wait up to 5 seconds */
+ break;
+ msleep(10);
+ }
+ return 0;
+}
+
+/**
+ * lpfc_bsg_diag_mode_exit - exit process from device diag loopback mode
+ * @phba: Pointer to HBA context object.
+ * @job: LPFC_BSG_VENDOR_DIAG_MODE
+ *
+ * This function is responsible for driver exit processing of setting up
+ * diag loopback mode on device.
+ */
+static void
+lpfc_bsg_diag_mode_exit(struct lpfc_hba *phba)
+{
+ struct Scsi_Host *shost;
+ struct lpfc_vport **vports;
+ int i;
+
+ vports = lpfc_create_vport_work_array(phba);
+ if (vports) {
+ for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
+ shost = lpfc_shost_from_vport(vports[i]);
+ scsi_unblock_requests(shost);
+ }
+ lpfc_destroy_vport_work_array(phba, vports);
+ } else {
+ shost = lpfc_shost_from_vport(phba->pport);
+ scsi_unblock_requests(shost);
+ }
+ return;
+}
+
+/**
+ * lpfc_sli3_bsg_diag_loopback_mode - process an sli3 bsg vendor command
+ * @phba: Pointer to HBA context object.
+ * @job: LPFC_BSG_VENDOR_DIAG_MODE
+ *
+ * This function is responsible for placing an sli3 port into diagnostic
+ * loopback mode in order to perform a diagnostic loopback test.
* All new scsi requests are blocked, a small delay is used to allow the
* scsi requests to complete then the link is brought down. If the link is
* is placed in loopback mode then scsi requests are again allowed
@@ -1475,17 +1561,11 @@ send_mgmt_rsp_exit:
* All of this is done in-line.
*/
static int
-lpfc_bsg_diag_mode(struct fc_bsg_job *job)
+lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
{
- struct Scsi_Host *shost = job->shost;
- struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
- struct lpfc_hba *phba = vport->phba;
struct diag_mode_set *loopback_mode;
- struct lpfc_sli *psli = &phba->sli;
- struct lpfc_sli_ring *pring = &psli->ring[LPFC_FCP_RING];
uint32_t link_flags;
uint32_t timeout;
- struct lpfc_vport **vports;
LPFC_MBOXQ_t *pmboxq;
int mbxstatus;
int i = 0;
@@ -1494,53 +1574,33 @@ lpfc_bsg_diag_mode(struct fc_bsg_job *job)
/* no data to return just the return code */
job->reply->reply_payload_rcv_len = 0;
- if (job->request_len <
- sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_set)) {
+ if (job->request_len < sizeof(struct fc_bsg_request) +
+ sizeof(struct diag_mode_set)) {
lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
- "2738 Received DIAG MODE request below minimum "
- "size\n");
+ "2738 Received DIAG MODE request size:%d "
+ "below the minimum size:%d\n",
+ job->request_len,
+ (int)(sizeof(struct fc_bsg_request) +
+ sizeof(struct diag_mode_set)));
rc = -EINVAL;
goto job_error;
}
+ rc = lpfc_bsg_diag_mode_enter(phba, job);
+ if (rc)
+ goto job_error;
+
+ /* bring the link to diagnostic mode */
loopback_mode = (struct diag_mode_set *)
job->request->rqst_data.h_vendor.vendor_cmd;
link_flags = loopback_mode->type;
timeout = loopback_mode->timeout * 100;
- if ((phba->link_state == LPFC_HBA_ERROR) ||
- (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
- (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
- rc = -EACCES;
- goto job_error;
- }
-
pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!pmboxq) {
rc = -ENOMEM;
- goto job_error;
- }
-
- vports = lpfc_create_vport_work_array(phba);
- if (vports) {
- for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
- shost = lpfc_shost_from_vport(vports[i]);
- scsi_block_requests(shost);
- }
-
- lpfc_destroy_vport_work_array(phba, vports);
- } else {
- shost = lpfc_shost_from_vport(phba->pport);
- scsi_block_requests(shost);
+ goto loopback_mode_exit;
}
-
- while (pring->txcmplq_cnt) {
- if (i++ > 500) /* wait up to 5 seconds */
- break;
-
- msleep(10);
- }
-
memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t));
pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
pmboxq->u.mb.mbxOwner = OWN_HOST;
@@ -1594,22 +1654,186 @@ lpfc_bsg_diag_mode(struct fc_bsg_job *job)
rc = -ENODEV;
loopback_mode_exit:
- vports = lpfc_create_vport_work_array(phba);
- if (vports) {
- for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
- shost = lpfc_shost_from_vport(vports[i]);
- scsi_unblock_requests(shost);
+ lpfc_bsg_diag_mode_exit(phba);
+
+ /*
+ * Let SLI layer release mboxq if mbox command completed after timeout.
+ */
+ if (mbxstatus != MBX_TIMEOUT)
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+
+job_error:
+ /* make error code available to userspace */
+ job->reply->result = rc;
+ /* complete the job back to userspace if no error */
+ if (rc == 0)
+ job->job_done(job);
+ return rc;
+}
+
+/**
+ * lpfc_sli4_bsg_set_link_diag_state - set sli4 link diag state
+ * @phba: Pointer to HBA context object.
+ * @diag: Flag for set link to diag or nomral operation state.
+ *
+ * This function is responsible for issuing a sli4 mailbox command for setting
+ * link to either diag state or normal operation state.
+ */
+static int
+lpfc_sli4_bsg_set_link_diag_state(struct lpfc_hba *phba, uint32_t diag)
+{
+ LPFC_MBOXQ_t *pmboxq;
+ struct lpfc_mbx_set_link_diag_state *link_diag_state;
+ uint32_t req_len, alloc_len;
+ int mbxstatus = MBX_SUCCESS, rc;
+
+ pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmboxq)
+ return -ENOMEM;
+
+ req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
+ LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE,
+ req_len, LPFC_SLI4_MBX_EMBED);
+ if (alloc_len != req_len) {
+ rc = -ENOMEM;
+ goto link_diag_state_set_out;
+ }
+ link_diag_state = &pmboxq->u.mqe.un.link_diag_state;
+ bf_set(lpfc_mbx_set_diag_state_link_num, &link_diag_state->u.req,
+ phba->sli4_hba.link_state.number);
+ bf_set(lpfc_mbx_set_diag_state_link_type, &link_diag_state->u.req,
+ phba->sli4_hba.link_state.type);
+ if (diag)
+ bf_set(lpfc_mbx_set_diag_state_diag,
+ &link_diag_state->u.req, 1);
+ else
+ bf_set(lpfc_mbx_set_diag_state_diag,
+ &link_diag_state->u.req, 0);
+
+ mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
+
+ if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0))
+ rc = 0;
+ else
+ rc = -ENODEV;
+
+link_diag_state_set_out:
+ if (pmboxq && (mbxstatus != MBX_TIMEOUT))
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+
+ return rc;
+}
+
+/**
+ * lpfc_sli4_bsg_diag_loopback_mode - process an sli4 bsg vendor command
+ * @phba: Pointer to HBA context object.
+ * @job: LPFC_BSG_VENDOR_DIAG_MODE
+ *
+ * This function is responsible for placing an sli4 port into diagnostic
+ * loopback mode in order to perform a diagnostic loopback test.
+ */
+static int
+lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
+{
+ struct diag_mode_set *loopback_mode;
+ uint32_t link_flags, timeout, req_len, alloc_len;
+ struct lpfc_mbx_set_link_diag_loopback *link_diag_loopback;
+ LPFC_MBOXQ_t *pmboxq = NULL;
+ int mbxstatus, i, rc = 0;
+
+ /* no data to return just the return code */
+ job->reply->reply_payload_rcv_len = 0;
+
+ if (job->request_len < sizeof(struct fc_bsg_request) +
+ sizeof(struct diag_mode_set)) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "3011 Received DIAG MODE request size:%d "
+ "below the minimum size:%d\n",
+ job->request_len,
+ (int)(sizeof(struct fc_bsg_request) +
+ sizeof(struct diag_mode_set)));
+ rc = -EINVAL;
+ goto job_error;
+ }
+
+ rc = lpfc_bsg_diag_mode_enter(phba, job);
+ if (rc)
+ goto job_error;
+
+ /* bring the link to diagnostic mode */
+ loopback_mode = (struct diag_mode_set *)
+ job->request->rqst_data.h_vendor.vendor_cmd;
+ link_flags = loopback_mode->type;
+ timeout = loopback_mode->timeout * 100;
+
+ rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1);
+ if (rc)
+ goto loopback_mode_exit;
+
+ /* wait for link down before proceeding */
+ i = 0;
+ while (phba->link_state != LPFC_LINK_DOWN) {
+ if (i++ > timeout) {
+ rc = -ETIMEDOUT;
+ goto loopback_mode_exit;
+ }
+ msleep(10);
+ }
+ /* set up loopback mode */
+ pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmboxq) {
+ rc = -ENOMEM;
+ goto loopback_mode_exit;
+ }
+ req_len = (sizeof(struct lpfc_mbx_set_link_diag_loopback) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
+ LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK,
+ req_len, LPFC_SLI4_MBX_EMBED);
+ if (alloc_len != req_len) {
+ rc = -ENOMEM;
+ goto loopback_mode_exit;
+ }
+ link_diag_loopback = &pmboxq->u.mqe.un.link_diag_loopback;
+ bf_set(lpfc_mbx_set_diag_state_link_num,
+ &link_diag_loopback->u.req, phba->sli4_hba.link_state.number);
+ bf_set(lpfc_mbx_set_diag_state_link_type,
+ &link_diag_loopback->u.req, phba->sli4_hba.link_state.type);
+ if (link_flags == INTERNAL_LOOP_BACK)
+ bf_set(lpfc_mbx_set_diag_lpbk_type,
+ &link_diag_loopback->u.req,
+ LPFC_DIAG_LOOPBACK_TYPE_INTERNAL);
+ else
+ bf_set(lpfc_mbx_set_diag_lpbk_type,
+ &link_diag_loopback->u.req,
+ LPFC_DIAG_LOOPBACK_TYPE_EXTERNAL);
+
+ mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
+ if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus))
+ rc = -ENODEV;
+ else {
+ phba->link_flag |= LS_LOOPBACK_MODE;
+ /* wait for the link attention interrupt */
+ msleep(100);
+ i = 0;
+ while (phba->link_state != LPFC_HBA_READY) {
+ if (i++ > timeout) {
+ rc = -ETIMEDOUT;
+ break;
+ }
+ msleep(10);
}
- lpfc_destroy_vport_work_array(phba, vports);
- } else {
- shost = lpfc_shost_from_vport(phba->pport);
- scsi_unblock_requests(shost);
}
+loopback_mode_exit:
+ lpfc_bsg_diag_mode_exit(phba);
+
/*
* Let SLI layer release mboxq if mbox command completed after timeout.
*/
- if (mbxstatus != MBX_TIMEOUT)
+ if (pmboxq && (mbxstatus != MBX_TIMEOUT))
mempool_free(pmboxq, phba->mbox_mem_pool);
job_error:
@@ -1622,6 +1846,234 @@ job_error:
}
/**
+ * lpfc_bsg_diag_loopback_mode - bsg vendor command for diag loopback mode
+ * @job: LPFC_BSG_VENDOR_DIAG_MODE
+ *
+ * This function is responsible for responding to check and dispatch bsg diag
+ * command from the user to proper driver action routines.
+ */
+static int
+lpfc_bsg_diag_loopback_mode(struct fc_bsg_job *job)
+{
+ struct Scsi_Host *shost;
+ struct lpfc_vport *vport;
+ struct lpfc_hba *phba;
+ int rc;
+
+ shost = job->shost;
+ if (!shost)
+ return -ENODEV;
+ vport = (struct lpfc_vport *)job->shost->hostdata;
+ if (!vport)
+ return -ENODEV;
+ phba = vport->phba;
+ if (!phba)
+ return -ENODEV;
+
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ rc = lpfc_sli3_bsg_diag_loopback_mode(phba, job);
+ else if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
+ LPFC_SLI_INTF_IF_TYPE_2)
+ rc = lpfc_sli4_bsg_diag_loopback_mode(phba, job);
+ else
+ rc = -ENODEV;
+
+ return rc;
+
+}
+
+/**
+ * lpfc_sli4_bsg_diag_mode_end - sli4 bsg vendor command for ending diag mode
+ * @job: LPFC_BSG_VENDOR_DIAG_MODE_END
+ *
+ * This function is responsible for responding to check and dispatch bsg diag
+ * command from the user to proper driver action routines.
+ */
+static int
+lpfc_sli4_bsg_diag_mode_end(struct fc_bsg_job *job)
+{
+ struct Scsi_Host *shost;
+ struct lpfc_vport *vport;
+ struct lpfc_hba *phba;
+ int rc;
+
+ shost = job->shost;
+ if (!shost)
+ return -ENODEV;
+ vport = (struct lpfc_vport *)job->shost->hostdata;
+ if (!vport)
+ return -ENODEV;
+ phba = vport->phba;
+ if (!phba)
+ return -ENODEV;
+
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ return -ENODEV;
+ if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+ LPFC_SLI_INTF_IF_TYPE_2)
+ return -ENODEV;
+
+ rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0);
+
+ if (!rc)
+ rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
+
+ return rc;
+}
+
+/**
+ * lpfc_sli4_bsg_link_diag_test - sli4 bsg vendor command for diag link test
+ * @job: LPFC_BSG_VENDOR_DIAG_LINK_TEST
+ *
+ * This function is to perform SLI4 diag link test request from the user
+ * applicaiton.
+ */
+static int
+lpfc_sli4_bsg_link_diag_test(struct fc_bsg_job *job)
+{
+ struct Scsi_Host *shost;
+ struct lpfc_vport *vport;
+ struct lpfc_hba *phba;
+ LPFC_MBOXQ_t *pmboxq;
+ struct sli4_link_diag *link_diag_test_cmd;
+ uint32_t req_len, alloc_len;
+ uint32_t timeout;
+ struct lpfc_mbx_run_link_diag_test *run_link_diag_test;
+ union lpfc_sli4_cfg_shdr *shdr;
+ uint32_t shdr_status, shdr_add_status;
+ struct diag_status *diag_status_reply;
+ int mbxstatus, rc = 0;
+
+ shost = job->shost;
+ if (!shost) {
+ rc = -ENODEV;
+ goto job_error;
+ }
+ vport = (struct lpfc_vport *)job->shost->hostdata;
+ if (!vport) {
+ rc = -ENODEV;
+ goto job_error;
+ }
+ phba = vport->phba;
+ if (!phba) {
+ rc = -ENODEV;
+ goto job_error;
+ }
+
+ if (phba->sli_rev < LPFC_SLI_REV4) {
+ rc = -ENODEV;
+ goto job_error;
+ }
+ if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+ LPFC_SLI_INTF_IF_TYPE_2) {
+ rc = -ENODEV;
+ goto job_error;
+ }
+
+ if (job->request_len < sizeof(struct fc_bsg_request) +
+ sizeof(struct sli4_link_diag)) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "3013 Received LINK DIAG TEST request "
+ " size:%d below the minimum size:%d\n",
+ job->request_len,
+ (int)(sizeof(struct fc_bsg_request) +
+ sizeof(struct sli4_link_diag)));
+ rc = -EINVAL;
+ goto job_error;
+ }
+
+ rc = lpfc_bsg_diag_mode_enter(phba, job);
+ if (rc)
+ goto job_error;
+
+ link_diag_test_cmd = (struct sli4_link_diag *)
+ job->request->rqst_data.h_vendor.vendor_cmd;
+ timeout = link_diag_test_cmd->timeout * 100;
+
+ rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1);
+
+ if (rc)
+ goto job_error;
+
+ pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmboxq) {
+ rc = -ENOMEM;
+ goto link_diag_test_exit;
+ }
+
+ req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
+ LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE,
+ req_len, LPFC_SLI4_MBX_EMBED);
+ if (alloc_len != req_len) {
+ rc = -ENOMEM;
+ goto link_diag_test_exit;
+ }
+ run_link_diag_test = &pmboxq->u.mqe.un.link_diag_test;
+ bf_set(lpfc_mbx_run_diag_test_link_num, &run_link_diag_test->u.req,
+ phba->sli4_hba.link_state.number);
+ bf_set(lpfc_mbx_run_diag_test_link_type, &run_link_diag_test->u.req,
+ phba->sli4_hba.link_state.type);
+ bf_set(lpfc_mbx_run_diag_test_test_id, &run_link_diag_test->u.req,
+ link_diag_test_cmd->test_id);
+ bf_set(lpfc_mbx_run_diag_test_loops, &run_link_diag_test->u.req,
+ link_diag_test_cmd->loops);
+ bf_set(lpfc_mbx_run_diag_test_test_ver, &run_link_diag_test->u.req,
+ link_diag_test_cmd->test_version);
+ bf_set(lpfc_mbx_run_diag_test_err_act, &run_link_diag_test->u.req,
+ link_diag_test_cmd->error_action);
+
+ mbxstatus = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
+
+ shdr = (union lpfc_sli4_cfg_shdr *)
+ &pmboxq->u.mqe.un.sli4_config.header.cfg_shdr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+ if (shdr_status || shdr_add_status || mbxstatus) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "3010 Run link diag test mailbox failed with "
+ "mbx_status x%x status x%x, add_status x%x\n",
+ mbxstatus, shdr_status, shdr_add_status);
+ }
+
+ diag_status_reply = (struct diag_status *)
+ job->reply->reply_data.vendor_reply.vendor_rsp;
+
+ if (job->reply_len <
+ sizeof(struct fc_bsg_request) + sizeof(struct diag_status)) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "3012 Received Run link diag test reply "
+ "below minimum size (%d): reply_len:%d\n",
+ (int)(sizeof(struct fc_bsg_request) +
+ sizeof(struct diag_status)),
+ job->reply_len);
+ rc = -EINVAL;
+ goto job_error;
+ }
+
+ diag_status_reply->mbox_status = mbxstatus;
+ diag_status_reply->shdr_status = shdr_status;
+ diag_status_reply->shdr_add_status = shdr_add_status;
+
+link_diag_test_exit:
+ rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0);
+
+ if (pmboxq)
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+
+ lpfc_bsg_diag_mode_exit(phba);
+
+job_error:
+ /* make error code available to userspace */
+ job->reply->result = rc;
+ /* complete the job back to userspace if no error */
+ if (rc == 0)
+ job->job_done(job);
+ return rc;
+}
+
+/**
* lpfcdiag_loop_self_reg - obtains a remote port login id
* @phba: Pointer to HBA context object
* @rpi: Pointer to a remote port login id
@@ -1851,6 +2303,86 @@ err_get_xri_exit:
}
/**
+ * lpfc_bsg_dma_page_alloc - allocate a bsg mbox page sized dma buffers
+ * @phba: Pointer to HBA context object
+ *
+ * This function allocates BSG_MBOX_SIZE (4KB) page size dma buffer and.
+ * retruns the pointer to the buffer.
+ **/
+static struct lpfc_dmabuf *
+lpfc_bsg_dma_page_alloc(struct lpfc_hba *phba)
+{
+ struct lpfc_dmabuf *dmabuf;
+ struct pci_dev *pcidev = phba->pcidev;
+
+ /* allocate dma buffer struct */
+ dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+ if (!dmabuf)
+ return NULL;
+
+ INIT_LIST_HEAD(&dmabuf->list);
+
+ /* now, allocate dma buffer */
+ dmabuf->virt = dma_alloc_coherent(&pcidev->dev, BSG_MBOX_SIZE,
+ &(dmabuf->phys), GFP_KERNEL);
+
+ if (!dmabuf->virt) {
+ kfree(dmabuf);
+ return NULL;
+ }
+ memset((uint8_t *)dmabuf->virt, 0, BSG_MBOX_SIZE);
+
+ return dmabuf;
+}
+
+/**
+ * lpfc_bsg_dma_page_free - free a bsg mbox page sized dma buffer
+ * @phba: Pointer to HBA context object.
+ * @dmabuf: Pointer to the bsg mbox page sized dma buffer descriptor.
+ *
+ * This routine just simply frees a dma buffer and its associated buffer
+ * descriptor referred by @dmabuf.
+ **/
+static void
+lpfc_bsg_dma_page_free(struct lpfc_hba *phba, struct lpfc_dmabuf *dmabuf)
+{
+ struct pci_dev *pcidev = phba->pcidev;
+
+ if (!dmabuf)
+ return;
+
+ if (dmabuf->virt)
+ dma_free_coherent(&pcidev->dev, BSG_MBOX_SIZE,
+ dmabuf->virt, dmabuf->phys);
+ kfree(dmabuf);
+ return;
+}
+
+/**
+ * lpfc_bsg_dma_page_list_free - free a list of bsg mbox page sized dma buffers
+ * @phba: Pointer to HBA context object.
+ * @dmabuf_list: Pointer to a list of bsg mbox page sized dma buffer descs.
+ *
+ * This routine just simply frees all dma buffers and their associated buffer
+ * descriptors referred by @dmabuf_list.
+ **/
+static void
+lpfc_bsg_dma_page_list_free(struct lpfc_hba *phba,
+ struct list_head *dmabuf_list)
+{
+ struct lpfc_dmabuf *dmabuf, *next_dmabuf;
+
+ if (list_empty(dmabuf_list))
+ return;
+
+ list_for_each_entry_safe(dmabuf, next_dmabuf, dmabuf_list, list) {
+ list_del_init(&dmabuf->list);
+ lpfc_bsg_dma_page_free(phba, dmabuf);
+ }
+ return;
+}
+
+/**
* diag_cmd_data_alloc - fills in a bde struct with dma buffers
* @phba: Pointer to HBA context object
* @bpl: Pointer to 64 bit bde structure
@@ -2067,7 +2599,7 @@ err_post_rxbufs_exit:
}
/**
- * lpfc_bsg_diag_test - with a port in loopback issues a Ct cmd to itself
+ * lpfc_bsg_diag_loopback_run - run loopback on a port by issue ct cmd to itself
* @job: LPFC_BSG_VENDOR_DIAG_TEST fc_bsg_job
*
* This function receives a user data buffer to be transmitted and received on
@@ -2086,7 +2618,7 @@ err_post_rxbufs_exit:
* of loopback mode.
**/
static int
-lpfc_bsg_diag_test(struct fc_bsg_job *job)
+lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
{
struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
struct lpfc_hba *phba = vport->phba;
@@ -2411,7 +2943,7 @@ job_error:
}
/**
- * lpfc_bsg_wake_mbox_wait - lpfc_bsg_issue_mbox mbox completion handler
+ * lpfc_bsg_issue_mbox_cmpl - lpfc_bsg_issue_mbox mbox completion handler
* @phba: Pointer to HBA context object.
* @pmboxq: Pointer to mailbox command.
*
@@ -2422,15 +2954,13 @@ job_error:
* of the mailbox.
**/
void
-lpfc_bsg_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
+lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
{
struct bsg_job_data *dd_data;
struct fc_bsg_job *job;
- struct lpfc_mbx_nembed_cmd *nembed_sge;
uint32_t size;
unsigned long flags;
- uint8_t *to;
- uint8_t *from;
+ uint8_t *pmb, *pmb_buf;
spin_lock_irqsave(&phba->ct_ev_lock, flags);
dd_data = pmboxq->context1;
@@ -2440,62 +2970,21 @@ lpfc_bsg_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
return;
}
- /* build the outgoing buffer to do an sg copy
- * the format is the response mailbox followed by any extended
- * mailbox data
+ /*
+ * The outgoing buffer is readily referred from the dma buffer,
+ * just need to get header part from mailboxq structure.
*/
- from = (uint8_t *)&pmboxq->u.mb;
- to = (uint8_t *)dd_data->context_un.mbox.mb;
- memcpy(to, from, sizeof(MAILBOX_t));
- if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS) {
- /* copy the extended data if any, count is in words */
- if (dd_data->context_un.mbox.outExtWLen) {
- from = (uint8_t *)dd_data->context_un.mbox.ext;
- to += sizeof(MAILBOX_t);
- size = dd_data->context_un.mbox.outExtWLen *
- sizeof(uint32_t);
- memcpy(to, from, size);
- } else if (pmboxq->u.mb.mbxCommand == MBX_RUN_BIU_DIAG64) {
- from = (uint8_t *)dd_data->context_un.mbox.
- dmp->dma.virt;
- to += sizeof(MAILBOX_t);
- size = dd_data->context_un.mbox.dmp->size;
- memcpy(to, from, size);
- } else if ((phba->sli_rev == LPFC_SLI_REV4) &&
- (pmboxq->u.mb.mbxCommand == MBX_DUMP_MEMORY)) {
- from = (uint8_t *)dd_data->context_un.mbox.dmp->dma.
- virt;
- to += sizeof(MAILBOX_t);
- size = pmboxq->u.mb.un.varWords[5];
- memcpy(to, from, size);
- } else if ((phba->sli_rev == LPFC_SLI_REV4) &&
- (pmboxq->u.mb.mbxCommand == MBX_SLI4_CONFIG)) {
- nembed_sge = (struct lpfc_mbx_nembed_cmd *)
- &pmboxq->u.mb.un.varWords[0];
-
- from = (uint8_t *)dd_data->context_un.mbox.dmp->dma.
- virt;
- to += sizeof(MAILBOX_t);
- size = nembed_sge->sge[0].length;
- memcpy(to, from, size);
- } else if (pmboxq->u.mb.mbxCommand == MBX_READ_EVENT_LOG) {
- from = (uint8_t *)dd_data->context_un.
- mbox.dmp->dma.virt;
- to += sizeof(MAILBOX_t);
- size = dd_data->context_un.mbox.dmp->size;
- memcpy(to, from, size);
- }
- }
+ pmb = (uint8_t *)&pmboxq->u.mb;
+ pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb;
+ memcpy(pmb_buf, pmb, sizeof(MAILBOX_t));
- from = (uint8_t *)dd_data->context_un.mbox.mb;
job = dd_data->context_un.mbox.set_job;
if (job) {
size = job->reply_payload.payload_len;
job->reply->reply_payload_rcv_len =
sg_copy_from_buffer(job->reply_payload.sg_list,
- job->reply_payload.sg_cnt,
- from, size);
- job->reply->result = 0;
+ job->reply_payload.sg_cnt,
+ pmb_buf, size);
/* need to hold the lock until we set job->dd_data to NULL
* to hold off the timeout handler returning to the mid-layer
* while we are still processing the job.
@@ -2503,28 +2992,19 @@ lpfc_bsg_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
job->dd_data = NULL;
dd_data->context_un.mbox.set_job = NULL;
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
- job->job_done(job);
} else {
dd_data->context_un.mbox.set_job = NULL;
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
}
- kfree(dd_data->context_un.mbox.mb);
mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool);
- kfree(dd_data->context_un.mbox.ext);
- if (dd_data->context_un.mbox.dmp) {
- dma_free_coherent(&phba->pcidev->dev,
- dd_data->context_un.mbox.dmp->size,
- dd_data->context_un.mbox.dmp->dma.virt,
- dd_data->context_un.mbox.dmp->dma.phys);
- kfree(dd_data->context_un.mbox.dmp);
- }
- if (dd_data->context_un.mbox.rxbmp) {
- lpfc_mbuf_free(phba, dd_data->context_un.mbox.rxbmp->virt,
- dd_data->context_un.mbox.rxbmp->phys);
- kfree(dd_data->context_un.mbox.rxbmp);
- }
+ lpfc_bsg_dma_page_free(phba, dd_data->context_un.mbox.dmabuffers);
kfree(dd_data);
+
+ if (job) {
+ job->reply->result = 0;
+ job->job_done(job);
+ }
return;
}
@@ -2619,6 +3099,1006 @@ static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
}
/**
+ * lpfc_bsg_mbox_ext_cleanup - clean up context of multi-buffer mbox session
+ * @phba: Pointer to HBA context object.
+ *
+ * This is routine clean up and reset BSG handling of multi-buffer mbox
+ * command session.
+ **/
+static void
+lpfc_bsg_mbox_ext_session_reset(struct lpfc_hba *phba)
+{
+ if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE)
+ return;
+
+ /* free all memory, including dma buffers */
+ lpfc_bsg_dma_page_list_free(phba,
+ &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
+ lpfc_bsg_dma_page_free(phba, phba->mbox_ext_buf_ctx.mbx_dmabuf);
+ /* multi-buffer write mailbox command pass-through complete */
+ memset((char *)&phba->mbox_ext_buf_ctx, 0,
+ sizeof(struct lpfc_mbox_ext_buf_ctx));
+ INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
+
+ return;
+}
+
+/**
+ * lpfc_bsg_issue_mbox_ext_handle_job - job handler for multi-buffer mbox cmpl
+ * @phba: Pointer to HBA context object.
+ * @pmboxq: Pointer to mailbox command.
+ *
+ * This is routine handles BSG job for mailbox commands completions with
+ * multiple external buffers.
+ **/
+static struct fc_bsg_job *
+lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
+{
+ struct bsg_job_data *dd_data;
+ struct fc_bsg_job *job;
+ uint8_t *pmb, *pmb_buf;
+ unsigned long flags;
+ uint32_t size;
+ int rc = 0;
+
+ spin_lock_irqsave(&phba->ct_ev_lock, flags);
+ dd_data = pmboxq->context1;
+ /* has the job already timed out? */
+ if (!dd_data) {
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+ job = NULL;
+ goto job_done_out;
+ }
+
+ /*
+ * The outgoing buffer is readily referred from the dma buffer,
+ * just need to get header part from mailboxq structure.
+ */
+ pmb = (uint8_t *)&pmboxq->u.mb;
+ pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb;
+ memcpy(pmb_buf, pmb, sizeof(MAILBOX_t));
+
+ job = dd_data->context_un.mbox.set_job;
+ if (job) {
+ size = job->reply_payload.payload_len;
+ job->reply->reply_payload_rcv_len =
+ sg_copy_from_buffer(job->reply_payload.sg_list,
+ job->reply_payload.sg_cnt,
+ pmb_buf, size);
+ /* result for successful */
+ job->reply->result = 0;
+ job->dd_data = NULL;
+ /* need to hold the lock util we set job->dd_data to NULL
+ * to hold off the timeout handler from midlayer to take
+ * any action.
+ */
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2937 SLI_CONFIG ext-buffer maibox command "
+ "(x%x/x%x) complete bsg job done, bsize:%d\n",
+ phba->mbox_ext_buf_ctx.nembType,
+ phba->mbox_ext_buf_ctx.mboxType, size);
+ } else
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+job_done_out:
+ if (!job)
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "2938 SLI_CONFIG ext-buffer maibox "
+ "command (x%x/x%x) failure, rc:x%x\n",
+ phba->mbox_ext_buf_ctx.nembType,
+ phba->mbox_ext_buf_ctx.mboxType, rc);
+ /* state change */
+ phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_DONE;
+ kfree(dd_data);
+
+ return job;
+}
+
+/**
+ * lpfc_bsg_issue_read_mbox_ext_cmpl - compl handler for multi-buffer read mbox
+ * @phba: Pointer to HBA context object.
+ * @pmboxq: Pointer to mailbox command.
+ *
+ * This is completion handler function for mailbox read commands with multiple
+ * external buffers.
+ **/
+static void
+lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
+{
+ struct fc_bsg_job *job;
+
+ /* handle the BSG job with mailbox command */
+ if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_ABTS)
+ pmboxq->u.mb.mbxStatus = MBXERR_ERROR;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2939 SLI_CONFIG ext-buffer rd maibox command "
+ "complete, ctxState:x%x, mbxStatus:x%x\n",
+ phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus);
+
+ job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
+
+ if (pmboxq->u.mb.mbxStatus || phba->mbox_ext_buf_ctx.numBuf == 1)
+ lpfc_bsg_mbox_ext_session_reset(phba);
+
+ /* free base driver mailbox structure memory */
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+
+ /* complete the bsg job if we have it */
+ if (job)
+ job->job_done(job);
+
+ return;
+}
+
+/**
+ * lpfc_bsg_issue_write_mbox_ext_cmpl - cmpl handler for multi-buffer write mbox
+ * @phba: Pointer to HBA context object.
+ * @pmboxq: Pointer to mailbox command.
+ *
+ * This is completion handler function for mailbox write commands with multiple
+ * external buffers.
+ **/
+static void
+lpfc_bsg_issue_write_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
+{
+ struct fc_bsg_job *job;
+
+ /* handle the BSG job with the mailbox command */
+ if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_ABTS)
+ pmboxq->u.mb.mbxStatus = MBXERR_ERROR;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2940 SLI_CONFIG ext-buffer wr maibox command "
+ "complete, ctxState:x%x, mbxStatus:x%x\n",
+ phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus);
+
+ job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
+
+ /* free all memory, including dma buffers */
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+ lpfc_bsg_mbox_ext_session_reset(phba);
+
+ /* complete the bsg job if we have it */
+ if (job)
+ job->job_done(job);
+
+ return;
+}
+
+static void
+lpfc_bsg_sli_cfg_dma_desc_setup(struct lpfc_hba *phba, enum nemb_type nemb_tp,
+ uint32_t index, struct lpfc_dmabuf *mbx_dmabuf,
+ struct lpfc_dmabuf *ext_dmabuf)
+{
+ struct lpfc_sli_config_mbox *sli_cfg_mbx;
+
+ /* pointer to the start of mailbox command */
+ sli_cfg_mbx = (struct lpfc_sli_config_mbox *)mbx_dmabuf->virt;
+
+ if (nemb_tp == nemb_mse) {
+ if (index == 0) {
+ sli_cfg_mbx->un.sli_config_emb0_subsys.
+ mse[index].pa_hi =
+ putPaddrHigh(mbx_dmabuf->phys +
+ sizeof(MAILBOX_t));
+ sli_cfg_mbx->un.sli_config_emb0_subsys.
+ mse[index].pa_lo =
+ putPaddrLow(mbx_dmabuf->phys +
+ sizeof(MAILBOX_t));
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2943 SLI_CONFIG(mse)[%d], "
+ "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
+ index,
+ sli_cfg_mbx->un.sli_config_emb0_subsys.
+ mse[index].buf_len,
+ sli_cfg_mbx->un.sli_config_emb0_subsys.
+ mse[index].pa_hi,
+ sli_cfg_mbx->un.sli_config_emb0_subsys.
+ mse[index].pa_lo);
+ } else {
+ sli_cfg_mbx->un.sli_config_emb0_subsys.
+ mse[index].pa_hi =
+ putPaddrHigh(ext_dmabuf->phys);
+ sli_cfg_mbx->un.sli_config_emb0_subsys.
+ mse[index].pa_lo =
+ putPaddrLow(ext_dmabuf->phys);
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2944 SLI_CONFIG(mse)[%d], "
+ "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
+ index,
+ sli_cfg_mbx->un.sli_config_emb0_subsys.
+ mse[index].buf_len,
+ sli_cfg_mbx->un.sli_config_emb0_subsys.
+ mse[index].pa_hi,
+ sli_cfg_mbx->un.sli_config_emb0_subsys.
+ mse[index].pa_lo);
+ }
+ } else {
+ if (index == 0) {
+ sli_cfg_mbx->un.sli_config_emb1_subsys.
+ hbd[index].pa_hi =
+ putPaddrHigh(mbx_dmabuf->phys +
+ sizeof(MAILBOX_t));
+ sli_cfg_mbx->un.sli_config_emb1_subsys.
+ hbd[index].pa_lo =
+ putPaddrLow(mbx_dmabuf->phys +
+ sizeof(MAILBOX_t));
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "3007 SLI_CONFIG(hbd)[%d], "
+ "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
+ index,
+ bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
+ &sli_cfg_mbx->un.
+ sli_config_emb1_subsys.hbd[index]),
+ sli_cfg_mbx->un.sli_config_emb1_subsys.
+ hbd[index].pa_hi,
+ sli_cfg_mbx->un.sli_config_emb1_subsys.
+ hbd[index].pa_lo);
+
+ } else {
+ sli_cfg_mbx->un.sli_config_emb1_subsys.
+ hbd[index].pa_hi =
+ putPaddrHigh(ext_dmabuf->phys);
+ sli_cfg_mbx->un.sli_config_emb1_subsys.
+ hbd[index].pa_lo =
+ putPaddrLow(ext_dmabuf->phys);
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "3008 SLI_CONFIG(hbd)[%d], "
+ "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
+ index,
+ bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
+ &sli_cfg_mbx->un.
+ sli_config_emb1_subsys.hbd[index]),
+ sli_cfg_mbx->un.sli_config_emb1_subsys.
+ hbd[index].pa_hi,
+ sli_cfg_mbx->un.sli_config_emb1_subsys.
+ hbd[index].pa_lo);
+ }
+ }
+ return;
+}
+
+/**
+ * lpfc_bsg_sli_cfg_mse_read_cmd_ext - sli_config non-embedded mailbox cmd read
+ * @phba: Pointer to HBA context object.
+ * @mb: Pointer to a BSG mailbox object.
+ * @nemb_tp: Enumerate of non-embedded mailbox command type.
+ * @dmabuff: Pointer to a DMA buffer descriptor.
+ *
+ * This routine performs SLI_CONFIG (0x9B) read mailbox command operation with
+ * non-embedded external bufffers.
+ **/
+static int
+lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
+ enum nemb_type nemb_tp,
+ struct lpfc_dmabuf *dmabuf)
+{
+ struct lpfc_sli_config_mbox *sli_cfg_mbx;
+ struct dfc_mbox_req *mbox_req;
+ struct lpfc_dmabuf *curr_dmabuf, *next_dmabuf;
+ uint32_t ext_buf_cnt, ext_buf_index;
+ struct lpfc_dmabuf *ext_dmabuf = NULL;
+ struct bsg_job_data *dd_data = NULL;
+ LPFC_MBOXQ_t *pmboxq = NULL;
+ MAILBOX_t *pmb;
+ uint8_t *pmbx;
+ int rc, i;
+
+ mbox_req =
+ (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
+
+ /* pointer to the start of mailbox command */
+ sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
+
+ if (nemb_tp == nemb_mse) {
+ ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt,
+ &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr);
+ if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "2945 Handled SLI_CONFIG(mse) rd, "
+ "ext_buf_cnt(%d) out of range(%d)\n",
+ ext_buf_cnt,
+ LPFC_MBX_SLI_CONFIG_MAX_MSE);
+ rc = -ERANGE;
+ goto job_error;
+ }
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2941 Handled SLI_CONFIG(mse) rd, "
+ "ext_buf_cnt:%d\n", ext_buf_cnt);
+ } else {
+ /* sanity check on interface type for support */
+ if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+ LPFC_SLI_INTF_IF_TYPE_2) {
+ rc = -ENODEV;
+ goto job_error;
+ }
+ /* nemb_tp == nemb_hbd */
+ ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count;
+ if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "2946 Handled SLI_CONFIG(hbd) rd, "
+ "ext_buf_cnt(%d) out of range(%d)\n",
+ ext_buf_cnt,
+ LPFC_MBX_SLI_CONFIG_MAX_HBD);
+ rc = -ERANGE;
+ goto job_error;
+ }
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2942 Handled SLI_CONFIG(hbd) rd, "
+ "ext_buf_cnt:%d\n", ext_buf_cnt);
+ }
+
+ /* reject non-embedded mailbox command with none external buffer */
+ if (ext_buf_cnt == 0) {
+ rc = -EPERM;
+ goto job_error;
+ } else if (ext_buf_cnt > 1) {
+ /* additional external read buffers */
+ for (i = 1; i < ext_buf_cnt; i++) {
+ ext_dmabuf = lpfc_bsg_dma_page_alloc(phba);
+ if (!ext_dmabuf) {
+ rc = -ENOMEM;
+ goto job_error;
+ }
+ list_add_tail(&ext_dmabuf->list,
+ &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
+ }
+ }
+
+ /* bsg tracking structure */
+ dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
+ if (!dd_data) {
+ rc = -ENOMEM;
+ goto job_error;
+ }
+
+ /* mailbox command structure for base driver */
+ pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmboxq) {
+ rc = -ENOMEM;
+ goto job_error;
+ }
+ memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
+
+ /* for the first external buffer */
+ lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf);
+
+ /* for the rest of external buffer descriptors if any */
+ if (ext_buf_cnt > 1) {
+ ext_buf_index = 1;
+ list_for_each_entry_safe(curr_dmabuf, next_dmabuf,
+ &phba->mbox_ext_buf_ctx.ext_dmabuf_list, list) {
+ lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp,
+ ext_buf_index, dmabuf,
+ curr_dmabuf);
+ ext_buf_index++;
+ }
+ }
+
+ /* construct base driver mbox command */
+ pmb = &pmboxq->u.mb;
+ pmbx = (uint8_t *)dmabuf->virt;
+ memcpy(pmb, pmbx, sizeof(*pmb));
+ pmb->mbxOwner = OWN_HOST;
+ pmboxq->vport = phba->pport;
+
+ /* multi-buffer handling context */
+ phba->mbox_ext_buf_ctx.nembType = nemb_tp;
+ phba->mbox_ext_buf_ctx.mboxType = mbox_rd;
+ phba->mbox_ext_buf_ctx.numBuf = ext_buf_cnt;
+ phba->mbox_ext_buf_ctx.mbxTag = mbox_req->extMboxTag;
+ phba->mbox_ext_buf_ctx.seqNum = mbox_req->extSeqNum;
+ phba->mbox_ext_buf_ctx.mbx_dmabuf = dmabuf;
+
+ /* callback for multi-buffer read mailbox command */
+ pmboxq->mbox_cmpl = lpfc_bsg_issue_read_mbox_ext_cmpl;
+
+ /* context fields to callback function */
+ pmboxq->context1 = dd_data;
+ dd_data->type = TYPE_MBOX;
+ dd_data->context_un.mbox.pmboxq = pmboxq;
+ dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx;
+ dd_data->context_un.mbox.set_job = job;
+ job->dd_data = dd_data;
+
+ /* state change */
+ phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
+
+ rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
+ if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2947 Issued SLI_CONFIG ext-buffer "
+ "maibox command, rc:x%x\n", rc);
+ return 1;
+ }
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "2948 Failed to issue SLI_CONFIG ext-buffer "
+ "maibox command, rc:x%x\n", rc);
+ rc = -EPIPE;
+
+job_error:
+ if (pmboxq)
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+ lpfc_bsg_dma_page_list_free(phba,
+ &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
+ kfree(dd_data);
+ phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_IDLE;
+ return rc;
+}
+
+/**
+ * lpfc_bsg_sli_cfg_write_cmd_ext - sli_config non-embedded mailbox cmd write
+ * @phba: Pointer to HBA context object.
+ * @mb: Pointer to a BSG mailbox object.
+ * @dmabuff: Pointer to a DMA buffer descriptor.
+ *
+ * This routine performs SLI_CONFIG (0x9B) write mailbox command operation with
+ * non-embedded external bufffers.
+ **/
+static int
+lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
+ enum nemb_type nemb_tp,
+ struct lpfc_dmabuf *dmabuf)
+{
+ struct dfc_mbox_req *mbox_req;
+ struct lpfc_sli_config_mbox *sli_cfg_mbx;
+ uint32_t ext_buf_cnt;
+ struct bsg_job_data *dd_data = NULL;
+ LPFC_MBOXQ_t *pmboxq = NULL;
+ MAILBOX_t *pmb;
+ uint8_t *mbx;
+ int rc = 0, i;
+
+ mbox_req =
+ (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
+
+ /* pointer to the start of mailbox command */
+ sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
+
+ if (nemb_tp == nemb_mse) {
+ ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt,
+ &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr);
+ if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "2953 Handled SLI_CONFIG(mse) wr, "
+ "ext_buf_cnt(%d) out of range(%d)\n",
+ ext_buf_cnt,
+ LPFC_MBX_SLI_CONFIG_MAX_MSE);
+ return -ERANGE;
+ }
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2949 Handled SLI_CONFIG(mse) wr, "
+ "ext_buf_cnt:%d\n", ext_buf_cnt);
+ } else {
+ /* sanity check on interface type for support */
+ if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+ LPFC_SLI_INTF_IF_TYPE_2)
+ return -ENODEV;
+ /* nemb_tp == nemb_hbd */
+ ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count;
+ if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "2954 Handled SLI_CONFIG(hbd) wr, "
+ "ext_buf_cnt(%d) out of range(%d)\n",
+ ext_buf_cnt,
+ LPFC_MBX_SLI_CONFIG_MAX_HBD);
+ return -ERANGE;
+ }
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2950 Handled SLI_CONFIG(hbd) wr, "
+ "ext_buf_cnt:%d\n", ext_buf_cnt);
+ }
+
+ if (ext_buf_cnt == 0)
+ return -EPERM;
+
+ /* for the first external buffer */
+ lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf);
+
+ /* log for looking forward */
+ for (i = 1; i < ext_buf_cnt; i++) {
+ if (nemb_tp == nemb_mse)
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2951 SLI_CONFIG(mse), buf[%d]-length:%d\n",
+ i, sli_cfg_mbx->un.sli_config_emb0_subsys.
+ mse[i].buf_len);
+ else
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2952 SLI_CONFIG(hbd), buf[%d]-length:%d\n",
+ i, bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
+ &sli_cfg_mbx->un.sli_config_emb1_subsys.
+ hbd[i]));
+ }
+
+ /* multi-buffer handling context */
+ phba->mbox_ext_buf_ctx.nembType = nemb_tp;
+ phba->mbox_ext_buf_ctx.mboxType = mbox_wr;
+ phba->mbox_ext_buf_ctx.numBuf = ext_buf_cnt;
+ phba->mbox_ext_buf_ctx.mbxTag = mbox_req->extMboxTag;
+ phba->mbox_ext_buf_ctx.seqNum = mbox_req->extSeqNum;
+ phba->mbox_ext_buf_ctx.mbx_dmabuf = dmabuf;
+
+ if (ext_buf_cnt == 1) {
+ /* bsg tracking structure */
+ dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
+ if (!dd_data) {
+ rc = -ENOMEM;
+ goto job_error;
+ }
+
+ /* mailbox command structure for base driver */
+ pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmboxq) {
+ rc = -ENOMEM;
+ goto job_error;
+ }
+ memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
+ pmb = &pmboxq->u.mb;
+ mbx = (uint8_t *)dmabuf->virt;
+ memcpy(pmb, mbx, sizeof(*pmb));
+ pmb->mbxOwner = OWN_HOST;
+ pmboxq->vport = phba->pport;
+
+ /* callback for multi-buffer read mailbox command */
+ pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl;
+
+ /* context fields to callback function */
+ pmboxq->context1 = dd_data;
+ dd_data->type = TYPE_MBOX;
+ dd_data->context_un.mbox.pmboxq = pmboxq;
+ dd_data->context_un.mbox.mb = (MAILBOX_t *)mbx;
+ dd_data->context_un.mbox.set_job = job;
+ job->dd_data = dd_data;
+
+ /* state change */
+ phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
+
+ rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
+ if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2955 Issued SLI_CONFIG ext-buffer "
+ "maibox command, rc:x%x\n", rc);
+ return 1;
+ }
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "2956 Failed to issue SLI_CONFIG ext-buffer "
+ "maibox command, rc:x%x\n", rc);
+ rc = -EPIPE;
+ }
+
+job_error:
+ if (pmboxq)
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+ kfree(dd_data);
+
+ return rc;
+}
+
+/**
+ * lpfc_bsg_handle_sli_cfg_mbox - handle sli-cfg mailbox cmd with ext buffer
+ * @phba: Pointer to HBA context object.
+ * @mb: Pointer to a BSG mailbox object.
+ * @dmabuff: Pointer to a DMA buffer descriptor.
+ *
+ * This routine handles SLI_CONFIG (0x9B) mailbox command with non-embedded
+ * external bufffers, including both 0x9B with non-embedded MSEs and 0x9B
+ * with embedded sussystem 0x1 and opcodes with external HBDs.
+ **/
+static int
+lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
+ struct lpfc_dmabuf *dmabuf)
+{
+ struct lpfc_sli_config_mbox *sli_cfg_mbx;
+ uint32_t subsys;
+ uint32_t opcode;
+ int rc = SLI_CONFIG_NOT_HANDLED;
+
+ /* state change */
+ phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_HOST;
+
+ sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
+
+ if (!bsg_bf_get(lpfc_mbox_hdr_emb,
+ &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) {
+ subsys = bsg_bf_get(lpfc_emb0_subcmnd_subsys,
+ &sli_cfg_mbx->un.sli_config_emb0_subsys);
+ opcode = bsg_bf_get(lpfc_emb0_subcmnd_opcode,
+ &sli_cfg_mbx->un.sli_config_emb0_subsys);
+ if (subsys == SLI_CONFIG_SUBSYS_FCOE) {
+ switch (opcode) {
+ case FCOE_OPCODE_READ_FCF:
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2957 Handled SLI_CONFIG "
+ "subsys_fcoe, opcode:x%x\n",
+ opcode);
+ rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
+ nemb_mse, dmabuf);
+ break;
+ case FCOE_OPCODE_ADD_FCF:
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2958 Handled SLI_CONFIG "
+ "subsys_fcoe, opcode:x%x\n",
+ opcode);
+ rc = lpfc_bsg_sli_cfg_write_cmd_ext(phba, job,
+ nemb_mse, dmabuf);
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2959 Not handled SLI_CONFIG "
+ "subsys_fcoe, opcode:x%x\n",
+ opcode);
+ rc = SLI_CONFIG_NOT_HANDLED;
+ break;
+ }
+ } else {
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2977 Handled SLI_CONFIG "
+ "subsys:x%d, opcode:x%x\n",
+ subsys, opcode);
+ rc = SLI_CONFIG_NOT_HANDLED;
+ }
+ } else {
+ subsys = bsg_bf_get(lpfc_emb1_subcmnd_subsys,
+ &sli_cfg_mbx->un.sli_config_emb1_subsys);
+ opcode = bsg_bf_get(lpfc_emb1_subcmnd_opcode,
+ &sli_cfg_mbx->un.sli_config_emb1_subsys);
+ if (subsys == SLI_CONFIG_SUBSYS_COMN) {
+ switch (opcode) {
+ case COMN_OPCODE_READ_OBJECT:
+ case COMN_OPCODE_READ_OBJECT_LIST:
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2960 Handled SLI_CONFIG "
+ "subsys_comn, opcode:x%x\n",
+ opcode);
+ rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
+ nemb_hbd, dmabuf);
+ break;
+ case COMN_OPCODE_WRITE_OBJECT:
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2961 Handled SLI_CONFIG "
+ "subsys_comn, opcode:x%x\n",
+ opcode);
+ rc = lpfc_bsg_sli_cfg_write_cmd_ext(phba, job,
+ nemb_hbd, dmabuf);
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2962 Not handled SLI_CONFIG "
+ "subsys_comn, opcode:x%x\n",
+ opcode);
+ rc = SLI_CONFIG_NOT_HANDLED;
+ break;
+ }
+ } else {
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2978 Handled SLI_CONFIG "
+ "subsys:x%d, opcode:x%x\n",
+ subsys, opcode);
+ rc = SLI_CONFIG_NOT_HANDLED;
+ }
+ }
+ return rc;
+}
+
+/**
+ * lpfc_bsg_mbox_ext_abort_req - request to abort mbox command with ext buffers
+ * @phba: Pointer to HBA context object.
+ *
+ * This routine is for requesting to abort a pass-through mailbox command with
+ * multiple external buffers due to error condition.
+ **/
+static void
+lpfc_bsg_mbox_ext_abort(struct lpfc_hba *phba)
+{
+ if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT)
+ phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS;
+ else
+ lpfc_bsg_mbox_ext_session_reset(phba);
+ return;
+}
+
+/**
+ * lpfc_bsg_read_ebuf_get - get the next mailbox read external buffer
+ * @phba: Pointer to HBA context object.
+ * @dmabuf: Pointer to a DMA buffer descriptor.
+ *
+ * This routine extracts the next mailbox read external buffer back to
+ * user space through BSG.
+ **/
+static int
+lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct fc_bsg_job *job)
+{
+ struct lpfc_sli_config_mbox *sli_cfg_mbx;
+ struct lpfc_dmabuf *dmabuf;
+ uint8_t *pbuf;
+ uint32_t size;
+ uint32_t index;
+
+ index = phba->mbox_ext_buf_ctx.seqNum;
+ phba->mbox_ext_buf_ctx.seqNum++;
+
+ sli_cfg_mbx = (struct lpfc_sli_config_mbox *)
+ phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
+
+ if (phba->mbox_ext_buf_ctx.nembType == nemb_mse) {
+ size = bsg_bf_get(lpfc_mbox_sli_config_mse_len,
+ &sli_cfg_mbx->un.sli_config_emb0_subsys.mse[index]);
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2963 SLI_CONFIG (mse) ext-buffer rd get "
+ "buffer[%d], size:%d\n", index, size);
+ } else {
+ size = bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
+ &sli_cfg_mbx->un.sli_config_emb1_subsys.hbd[index]);
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2964 SLI_CONFIG (hbd) ext-buffer rd get "
+ "buffer[%d], size:%d\n", index, size);
+ }
+ if (list_empty(&phba->mbox_ext_buf_ctx.ext_dmabuf_list))
+ return -EPIPE;
+ dmabuf = list_first_entry(&phba->mbox_ext_buf_ctx.ext_dmabuf_list,
+ struct lpfc_dmabuf, list);
+ list_del_init(&dmabuf->list);
+ pbuf = (uint8_t *)dmabuf->virt;
+ job->reply->reply_payload_rcv_len =
+ sg_copy_from_buffer(job->reply_payload.sg_list,
+ job->reply_payload.sg_cnt,
+ pbuf, size);
+
+ lpfc_bsg_dma_page_free(phba, dmabuf);
+
+ if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2965 SLI_CONFIG (hbd) ext-buffer rd mbox "
+ "command session done\n");
+ lpfc_bsg_mbox_ext_session_reset(phba);
+ }
+
+ job->reply->result = 0;
+ job->job_done(job);
+
+ return SLI_CONFIG_HANDLED;
+}
+
+/**
+ * lpfc_bsg_write_ebuf_set - set the next mailbox write external buffer
+ * @phba: Pointer to HBA context object.
+ * @dmabuf: Pointer to a DMA buffer descriptor.
+ *
+ * This routine sets up the next mailbox read external buffer obtained
+ * from user space through BSG.
+ **/
+static int
+lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct fc_bsg_job *job,
+ struct lpfc_dmabuf *dmabuf)
+{
+ struct lpfc_sli_config_mbox *sli_cfg_mbx;
+ struct bsg_job_data *dd_data = NULL;
+ LPFC_MBOXQ_t *pmboxq = NULL;
+ MAILBOX_t *pmb;
+ enum nemb_type nemb_tp;
+ uint8_t *pbuf;
+ uint32_t size;
+ uint32_t index;
+ int rc;
+
+ index = phba->mbox_ext_buf_ctx.seqNum;
+ phba->mbox_ext_buf_ctx.seqNum++;
+ nemb_tp = phba->mbox_ext_buf_ctx.nembType;
+
+ sli_cfg_mbx = (struct lpfc_sli_config_mbox *)
+ phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
+
+ dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
+ if (!dd_data) {
+ rc = -ENOMEM;
+ goto job_error;
+ }
+
+ pbuf = (uint8_t *)dmabuf->virt;
+ size = job->request_payload.payload_len;
+ sg_copy_to_buffer(job->request_payload.sg_list,
+ job->request_payload.sg_cnt,
+ pbuf, size);
+
+ if (phba->mbox_ext_buf_ctx.nembType == nemb_mse) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2966 SLI_CONFIG (mse) ext-buffer wr set "
+ "buffer[%d], size:%d\n",
+ phba->mbox_ext_buf_ctx.seqNum, size);
+
+ } else {
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2967 SLI_CONFIG (hbd) ext-buffer wr set "
+ "buffer[%d], size:%d\n",
+ phba->mbox_ext_buf_ctx.seqNum, size);
+
+ }
+
+ /* set up external buffer descriptor and add to external buffer list */
+ lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, index,
+ phba->mbox_ext_buf_ctx.mbx_dmabuf,
+ dmabuf);
+ list_add_tail(&dmabuf->list, &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
+
+ if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2968 SLI_CONFIG ext-buffer wr all %d "
+ "ebuffers received\n",
+ phba->mbox_ext_buf_ctx.numBuf);
+ /* mailbox command structure for base driver */
+ pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmboxq) {
+ rc = -ENOMEM;
+ goto job_error;
+ }
+ memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
+ pbuf = (uint8_t *)phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
+ pmb = &pmboxq->u.mb;
+ memcpy(pmb, pbuf, sizeof(*pmb));
+ pmb->mbxOwner = OWN_HOST;
+ pmboxq->vport = phba->pport;
+
+ /* callback for multi-buffer write mailbox command */
+ pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl;
+
+ /* context fields to callback function */
+ pmboxq->context1 = dd_data;
+ dd_data->type = TYPE_MBOX;
+ dd_data->context_un.mbox.pmboxq = pmboxq;
+ dd_data->context_un.mbox.mb = (MAILBOX_t *)pbuf;
+ dd_data->context_un.mbox.set_job = job;
+ job->dd_data = dd_data;
+
+ /* state change */
+ phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
+
+ rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
+ if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2969 Issued SLI_CONFIG ext-buffer "
+ "maibox command, rc:x%x\n", rc);
+ return 1;
+ }
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "2970 Failed to issue SLI_CONFIG ext-buffer "
+ "maibox command, rc:x%x\n", rc);
+ rc = -EPIPE;
+ goto job_error;
+ }
+
+ /* wait for additoinal external buffers */
+ job->reply->result = 0;
+ job->job_done(job);
+ return SLI_CONFIG_HANDLED;
+
+job_error:
+ lpfc_bsg_dma_page_free(phba, dmabuf);
+ kfree(dd_data);
+
+ return rc;
+}
+
+/**
+ * lpfc_bsg_handle_sli_cfg_ebuf - handle ext buffer with sli-cfg mailbox cmd
+ * @phba: Pointer to HBA context object.
+ * @mb: Pointer to a BSG mailbox object.
+ * @dmabuff: Pointer to a DMA buffer descriptor.
+ *
+ * This routine handles the external buffer with SLI_CONFIG (0x9B) mailbox
+ * command with multiple non-embedded external buffers.
+ **/
+static int
+lpfc_bsg_handle_sli_cfg_ebuf(struct lpfc_hba *phba, struct fc_bsg_job *job,
+ struct lpfc_dmabuf *dmabuf)
+{
+ int rc;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2971 SLI_CONFIG buffer (type:x%x)\n",
+ phba->mbox_ext_buf_ctx.mboxType);
+
+ if (phba->mbox_ext_buf_ctx.mboxType == mbox_rd) {
+ if (phba->mbox_ext_buf_ctx.state != LPFC_BSG_MBOX_DONE) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "2972 SLI_CONFIG rd buffer state "
+ "mismatch:x%x\n",
+ phba->mbox_ext_buf_ctx.state);
+ lpfc_bsg_mbox_ext_abort(phba);
+ return -EPIPE;
+ }
+ rc = lpfc_bsg_read_ebuf_get(phba, job);
+ if (rc == SLI_CONFIG_HANDLED)
+ lpfc_bsg_dma_page_free(phba, dmabuf);
+ } else { /* phba->mbox_ext_buf_ctx.mboxType == mbox_wr */
+ if (phba->mbox_ext_buf_ctx.state != LPFC_BSG_MBOX_HOST) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "2973 SLI_CONFIG wr buffer state "
+ "mismatch:x%x\n",
+ phba->mbox_ext_buf_ctx.state);
+ lpfc_bsg_mbox_ext_abort(phba);
+ return -EPIPE;
+ }
+ rc = lpfc_bsg_write_ebuf_set(phba, job, dmabuf);
+ }
+ return rc;
+}
+
+/**
+ * lpfc_bsg_handle_sli_cfg_ext - handle sli-cfg mailbox with external buffer
+ * @phba: Pointer to HBA context object.
+ * @mb: Pointer to a BSG mailbox object.
+ * @dmabuff: Pointer to a DMA buffer descriptor.
+ *
+ * This routine checkes and handles non-embedded multi-buffer SLI_CONFIG
+ * (0x9B) mailbox commands and external buffers.
+ **/
+static int
+lpfc_bsg_handle_sli_cfg_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
+ struct lpfc_dmabuf *dmabuf)
+{
+ struct dfc_mbox_req *mbox_req;
+ int rc;
+
+ mbox_req =
+ (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
+
+ /* mbox command with/without single external buffer */
+ if (mbox_req->extMboxTag == 0 && mbox_req->extSeqNum == 0)
+ return SLI_CONFIG_NOT_HANDLED;
+
+ /* mbox command and first external buffer */
+ if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE) {
+ if (mbox_req->extSeqNum == 1) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2974 SLI_CONFIG mailbox: tag:%d, "
+ "seq:%d\n", mbox_req->extMboxTag,
+ mbox_req->extSeqNum);
+ rc = lpfc_bsg_handle_sli_cfg_mbox(phba, job, dmabuf);
+ return rc;
+ } else
+ goto sli_cfg_ext_error;
+ }
+
+ /*
+ * handle additional external buffers
+ */
+
+ /* check broken pipe conditions */
+ if (mbox_req->extMboxTag != phba->mbox_ext_buf_ctx.mbxTag)
+ goto sli_cfg_ext_error;
+ if (mbox_req->extSeqNum > phba->mbox_ext_buf_ctx.numBuf)
+ goto sli_cfg_ext_error;
+ if (mbox_req->extSeqNum != phba->mbox_ext_buf_ctx.seqNum + 1)
+ goto sli_cfg_ext_error;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2975 SLI_CONFIG mailbox external buffer: "
+ "extSta:x%x, tag:%d, seq:%d\n",
+ phba->mbox_ext_buf_ctx.state, mbox_req->extMboxTag,
+ mbox_req->extSeqNum);
+ rc = lpfc_bsg_handle_sli_cfg_ebuf(phba, job, dmabuf);
+ return rc;
+
+sli_cfg_ext_error:
+ /* all other cases, broken pipe */
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "2976 SLI_CONFIG mailbox broken pipe: "
+ "ctxSta:x%x, ctxNumBuf:%d "
+ "ctxTag:%d, ctxSeq:%d, tag:%d, seq:%d\n",
+ phba->mbox_ext_buf_ctx.state,
+ phba->mbox_ext_buf_ctx.numBuf,
+ phba->mbox_ext_buf_ctx.mbxTag,
+ phba->mbox_ext_buf_ctx.seqNum,
+ mbox_req->extMboxTag, mbox_req->extSeqNum);
+
+ lpfc_bsg_mbox_ext_session_reset(phba);
+
+ return -EPIPE;
+}
+
+/**
* lpfc_bsg_issue_mbox - issues a mailbox command on behalf of an app
* @phba: Pointer to HBA context object.
* @mb: Pointer to a mailbox object.
@@ -2638,22 +4118,21 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
LPFC_MBOXQ_t *pmboxq = NULL; /* internal mailbox queue */
MAILBOX_t *pmb; /* shortcut to the pmboxq mailbox */
/* a 4k buffer to hold the mb and extended data from/to the bsg */
- MAILBOX_t *mb = NULL;
+ uint8_t *pmbx = NULL;
struct bsg_job_data *dd_data = NULL; /* bsg data tracking structure */
- uint32_t size;
- struct lpfc_dmabuf *rxbmp = NULL; /* for biu diag */
- struct lpfc_dmabufext *dmp = NULL; /* for biu diag */
- struct ulp_bde64 *rxbpl = NULL;
- struct dfc_mbox_req *mbox_req = (struct dfc_mbox_req *)
- job->request->rqst_data.h_vendor.vendor_cmd;
+ struct lpfc_dmabuf *dmabuf = NULL;
+ struct dfc_mbox_req *mbox_req;
struct READ_EVENT_LOG_VAR *rdEventLog;
uint32_t transmit_length, receive_length, mode;
+ struct lpfc_mbx_sli4_config *sli4_config;
struct lpfc_mbx_nembed_cmd *nembed_sge;
struct mbox_header *header;
struct ulp_bde64 *bde;
uint8_t *ext = NULL;
int rc = 0;
uint8_t *from;
+ uint32_t size;
+
/* in case no data is transferred */
job->reply->reply_payload_rcv_len = 0;
@@ -2665,6 +4144,18 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
goto job_done;
}
+ /*
+ * Don't allow mailbox commands to be sent when blocked or when in
+ * the middle of discovery
+ */
+ if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
+ rc = -EAGAIN;
+ goto job_done;
+ }
+
+ mbox_req =
+ (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
+
/* check if requested extended data lengths are valid */
if ((mbox_req->inExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t)) ||
(mbox_req->outExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t))) {
@@ -2672,6 +4163,32 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
goto job_done;
}
+ dmabuf = lpfc_bsg_dma_page_alloc(phba);
+ if (!dmabuf || !dmabuf->virt) {
+ rc = -ENOMEM;
+ goto job_done;
+ }
+
+ /* Get the mailbox command or external buffer from BSG */
+ pmbx = (uint8_t *)dmabuf->virt;
+ size = job->request_payload.payload_len;
+ sg_copy_to_buffer(job->request_payload.sg_list,
+ job->request_payload.sg_cnt, pmbx, size);
+
+ /* Handle possible SLI_CONFIG with non-embedded payloads */
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ rc = lpfc_bsg_handle_sli_cfg_ext(phba, job, dmabuf);
+ if (rc == SLI_CONFIG_HANDLED)
+ goto job_cont;
+ if (rc)
+ goto job_done;
+ /* SLI_CONFIG_NOT_HANDLED for other mailbox commands */
+ }
+
+ rc = lpfc_bsg_check_cmd_access(phba, (MAILBOX_t *)pmbx, vport);
+ if (rc != 0)
+ goto job_done; /* must be negative */
+
/* allocate our bsg tracking structure */
dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
if (!dd_data) {
@@ -2681,12 +4198,6 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
goto job_done;
}
- mb = kzalloc(BSG_MBOX_SIZE, GFP_KERNEL);
- if (!mb) {
- rc = -ENOMEM;
- goto job_done;
- }
-
pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!pmboxq) {
rc = -ENOMEM;
@@ -2694,17 +4205,8 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
}
memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
- size = job->request_payload.payload_len;
- sg_copy_to_buffer(job->request_payload.sg_list,
- job->request_payload.sg_cnt,
- mb, size);
-
- rc = lpfc_bsg_check_cmd_access(phba, mb, vport);
- if (rc != 0)
- goto job_done; /* must be negative */
-
pmb = &pmboxq->u.mb;
- memcpy(pmb, mb, sizeof(*pmb));
+ memcpy(pmb, pmbx, sizeof(*pmb));
pmb->mbxOwner = OWN_HOST;
pmboxq->vport = vport;
@@ -2721,30 +4223,13 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
"0x%x while in stopped state.\n",
pmb->mbxCommand);
- /* Don't allow mailbox commands to be sent when blocked
- * or when in the middle of discovery
- */
- if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
- rc = -EAGAIN;
- goto job_done;
- }
-
/* extended mailbox commands will need an extended buffer */
if (mbox_req->inExtWLen || mbox_req->outExtWLen) {
- ext = kzalloc(MAILBOX_EXT_SIZE, GFP_KERNEL);
- if (!ext) {
- rc = -ENOMEM;
- goto job_done;
- }
-
/* any data for the device? */
if (mbox_req->inExtWLen) {
- from = (uint8_t *)mb;
- from += sizeof(MAILBOX_t);
- memcpy((uint8_t *)ext, from,
- mbox_req->inExtWLen * sizeof(uint32_t));
+ from = pmbx;
+ ext = from + sizeof(MAILBOX_t);
}
-
pmboxq->context2 = ext;
pmboxq->in_ext_byte_len =
mbox_req->inExtWLen * sizeof(uint32_t);
@@ -2768,46 +4253,17 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
rc = -ERANGE;
goto job_done;
}
-
- rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
- if (!rxbmp) {
- rc = -ENOMEM;
- goto job_done;
- }
-
- rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
- if (!rxbmp->virt) {
- rc = -ENOMEM;
- goto job_done;
- }
-
- INIT_LIST_HEAD(&rxbmp->list);
- rxbpl = (struct ulp_bde64 *) rxbmp->virt;
- dmp = diag_cmd_data_alloc(phba, rxbpl, transmit_length, 0);
- if (!dmp) {
- rc = -ENOMEM;
- goto job_done;
- }
-
- INIT_LIST_HEAD(&dmp->dma.list);
pmb->un.varBIUdiag.un.s2.xmit_bde64.addrHigh =
- putPaddrHigh(dmp->dma.phys);
+ putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t));
pmb->un.varBIUdiag.un.s2.xmit_bde64.addrLow =
- putPaddrLow(dmp->dma.phys);
+ putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t));
pmb->un.varBIUdiag.un.s2.rcv_bde64.addrHigh =
- putPaddrHigh(dmp->dma.phys +
- pmb->un.varBIUdiag.un.s2.
- xmit_bde64.tus.f.bdeSize);
+ putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t)
+ + pmb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize);
pmb->un.varBIUdiag.un.s2.rcv_bde64.addrLow =
- putPaddrLow(dmp->dma.phys +
- pmb->un.varBIUdiag.un.s2.
- xmit_bde64.tus.f.bdeSize);
-
- /* copy the transmit data found in the mailbox extension area */
- from = (uint8_t *)mb;
- from += sizeof(MAILBOX_t);
- memcpy((uint8_t *)dmp->dma.virt, from, transmit_length);
+ putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t)
+ + pmb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize);
} else if (pmb->mbxCommand == MBX_READ_EVENT_LOG) {
rdEventLog = &pmb->un.varRdEventLog;
receive_length = rdEventLog->rcv_bde64.tus.f.bdeSize;
@@ -2823,33 +4279,10 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
/* mode zero uses a bde like biu diags command */
if (mode == 0) {
-
- /* rebuild the command for sli4 using our own buffers
- * like we do for biu diags
- */
-
- rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
- if (!rxbmp) {
- rc = -ENOMEM;
- goto job_done;
- }
-
- rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
- rxbpl = (struct ulp_bde64 *) rxbmp->virt;
- if (rxbpl) {
- INIT_LIST_HEAD(&rxbmp->list);
- dmp = diag_cmd_data_alloc(phba, rxbpl,
- receive_length, 0);
- }
-
- if (!dmp) {
- rc = -ENOMEM;
- goto job_done;
- }
-
- INIT_LIST_HEAD(&dmp->dma.list);
- pmb->un.varWords[3] = putPaddrLow(dmp->dma.phys);
- pmb->un.varWords[4] = putPaddrHigh(dmp->dma.phys);
+ pmb->un.varWords[3] = putPaddrLow(dmabuf->phys
+ + sizeof(MAILBOX_t));
+ pmb->un.varWords[4] = putPaddrHigh(dmabuf->phys
+ + sizeof(MAILBOX_t));
}
} else if (phba->sli_rev == LPFC_SLI_REV4) {
if (pmb->mbxCommand == MBX_DUMP_MEMORY) {
@@ -2860,36 +4293,14 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
/* receive length cannot be greater than mailbox
* extension size
*/
- if ((receive_length == 0) ||
- (receive_length > MAILBOX_EXT_SIZE)) {
+ if (receive_length == 0) {
rc = -ERANGE;
goto job_done;
}
-
- rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
- if (!rxbmp) {
- rc = -ENOMEM;
- goto job_done;
- }
-
- rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
- if (!rxbmp->virt) {
- rc = -ENOMEM;
- goto job_done;
- }
-
- INIT_LIST_HEAD(&rxbmp->list);
- rxbpl = (struct ulp_bde64 *) rxbmp->virt;
- dmp = diag_cmd_data_alloc(phba, rxbpl, receive_length,
- 0);
- if (!dmp) {
- rc = -ENOMEM;
- goto job_done;
- }
-
- INIT_LIST_HEAD(&dmp->dma.list);
- pmb->un.varWords[3] = putPaddrLow(dmp->dma.phys);
- pmb->un.varWords[4] = putPaddrHigh(dmp->dma.phys);
+ pmb->un.varWords[3] = putPaddrLow(dmabuf->phys
+ + sizeof(MAILBOX_t));
+ pmb->un.varWords[4] = putPaddrHigh(dmabuf->phys
+ + sizeof(MAILBOX_t));
} else if ((pmb->mbxCommand == MBX_UPDATE_CFG) &&
pmb->un.varUpdateCfg.co) {
bde = (struct ulp_bde64 *)&pmb->un.varWords[4];
@@ -2899,102 +4310,53 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
rc = -ERANGE;
goto job_done;
}
-
- rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
- if (!rxbmp) {
- rc = -ENOMEM;
- goto job_done;
- }
-
- rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
- if (!rxbmp->virt) {
- rc = -ENOMEM;
- goto job_done;
- }
-
- INIT_LIST_HEAD(&rxbmp->list);
- rxbpl = (struct ulp_bde64 *) rxbmp->virt;
- dmp = diag_cmd_data_alloc(phba, rxbpl,
- bde->tus.f.bdeSize, 0);
- if (!dmp) {
- rc = -ENOMEM;
- goto job_done;
- }
-
- INIT_LIST_HEAD(&dmp->dma.list);
- bde->addrHigh = putPaddrHigh(dmp->dma.phys);
- bde->addrLow = putPaddrLow(dmp->dma.phys);
-
- /* copy the transmit data found in the mailbox
- * extension area
- */
- from = (uint8_t *)mb;
- from += sizeof(MAILBOX_t);
- memcpy((uint8_t *)dmp->dma.virt, from,
- bde->tus.f.bdeSize);
+ bde->addrHigh = putPaddrHigh(dmabuf->phys
+ + sizeof(MAILBOX_t));
+ bde->addrLow = putPaddrLow(dmabuf->phys
+ + sizeof(MAILBOX_t));
} else if (pmb->mbxCommand == MBX_SLI4_CONFIG) {
- /* rebuild the command for sli4 using our own buffers
- * like we do for biu diags
- */
- header = (struct mbox_header *)&pmb->un.varWords[0];
- nembed_sge = (struct lpfc_mbx_nembed_cmd *)
- &pmb->un.varWords[0];
- receive_length = nembed_sge->sge[0].length;
-
- /* receive length cannot be greater than mailbox
- * extension size
- */
- if ((receive_length == 0) ||
- (receive_length > MAILBOX_EXT_SIZE)) {
- rc = -ERANGE;
- goto job_done;
- }
-
- rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
- if (!rxbmp) {
- rc = -ENOMEM;
- goto job_done;
- }
-
- rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
- if (!rxbmp->virt) {
- rc = -ENOMEM;
- goto job_done;
- }
+ /* Handling non-embedded SLI_CONFIG mailbox command */
+ sli4_config = &pmboxq->u.mqe.un.sli4_config;
+ if (!bf_get(lpfc_mbox_hdr_emb,
+ &sli4_config->header.cfg_mhdr)) {
+ /* rebuild the command for sli4 using our
+ * own buffers like we do for biu diags
+ */
+ header = (struct mbox_header *)
+ &pmb->un.varWords[0];
+ nembed_sge = (struct lpfc_mbx_nembed_cmd *)
+ &pmb->un.varWords[0];
+ receive_length = nembed_sge->sge[0].length;
+
+ /* receive length cannot be greater than
+ * mailbox extension size
+ */
+ if ((receive_length == 0) ||
+ (receive_length > MAILBOX_EXT_SIZE)) {
+ rc = -ERANGE;
+ goto job_done;
+ }
- INIT_LIST_HEAD(&rxbmp->list);
- rxbpl = (struct ulp_bde64 *) rxbmp->virt;
- dmp = diag_cmd_data_alloc(phba, rxbpl, receive_length,
- 0);
- if (!dmp) {
- rc = -ENOMEM;
- goto job_done;
+ nembed_sge->sge[0].pa_hi =
+ putPaddrHigh(dmabuf->phys
+ + sizeof(MAILBOX_t));
+ nembed_sge->sge[0].pa_lo =
+ putPaddrLow(dmabuf->phys
+ + sizeof(MAILBOX_t));
}
-
- INIT_LIST_HEAD(&dmp->dma.list);
- nembed_sge->sge[0].pa_hi = putPaddrHigh(dmp->dma.phys);
- nembed_sge->sge[0].pa_lo = putPaddrLow(dmp->dma.phys);
- /* copy the transmit data found in the mailbox
- * extension area
- */
- from = (uint8_t *)mb;
- from += sizeof(MAILBOX_t);
- memcpy((uint8_t *)dmp->dma.virt, from,
- header->cfg_mhdr.payload_length);
}
}
- dd_data->context_un.mbox.rxbmp = rxbmp;
- dd_data->context_un.mbox.dmp = dmp;
+ dd_data->context_un.mbox.dmabuffers = dmabuf;
/* setup wake call as IOCB callback */
- pmboxq->mbox_cmpl = lpfc_bsg_wake_mbox_wait;
+ pmboxq->mbox_cmpl = lpfc_bsg_issue_mbox_cmpl;
/* setup context field to pass wait_queue pointer to wake function */
pmboxq->context1 = dd_data;
dd_data->type = TYPE_MBOX;
dd_data->context_un.mbox.pmboxq = pmboxq;
- dd_data->context_un.mbox.mb = mb;
+ dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx;
dd_data->context_un.mbox.set_job = job;
dd_data->context_un.mbox.ext = ext;
dd_data->context_un.mbox.mbOffset = mbox_req->mbOffset;
@@ -3011,11 +4373,11 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
}
/* job finished, copy the data */
- memcpy(mb, pmb, sizeof(*pmb));
+ memcpy(pmbx, pmb, sizeof(*pmb));
job->reply->reply_payload_rcv_len =
sg_copy_from_buffer(job->reply_payload.sg_list,
- job->reply_payload.sg_cnt,
- mb, size);
+ job->reply_payload.sg_cnt,
+ pmbx, size);
/* not waiting mbox already done */
rc = 0;
goto job_done;
@@ -3027,22 +4389,12 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
job_done:
/* common exit for error or job completed inline */
- kfree(mb);
if (pmboxq)
mempool_free(pmboxq, phba->mbox_mem_pool);
- kfree(ext);
- if (dmp) {
- dma_free_coherent(&phba->pcidev->dev,
- dmp->size, dmp->dma.virt,
- dmp->dma.phys);
- kfree(dmp);
- }
- if (rxbmp) {
- lpfc_mbuf_free(phba, rxbmp->virt, rxbmp->phys);
- kfree(rxbmp);
- }
+ lpfc_bsg_dma_page_free(phba, dmabuf);
kfree(dd_data);
+job_cont:
return rc;
}
@@ -3055,37 +4407,28 @@ lpfc_bsg_mbox_cmd(struct fc_bsg_job *job)
{
struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
struct lpfc_hba *phba = vport->phba;
+ struct dfc_mbox_req *mbox_req;
int rc = 0;
- /* in case no data is transferred */
+ /* mix-and-match backward compatibility */
job->reply->reply_payload_rcv_len = 0;
if (job->request_len <
sizeof(struct fc_bsg_request) + sizeof(struct dfc_mbox_req)) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
- "2737 Received MBOX_REQ request below "
- "minimum size\n");
- rc = -EINVAL;
- goto job_error;
- }
-
- if (job->request_payload.payload_len != BSG_MBOX_SIZE) {
- rc = -EINVAL;
- goto job_error;
- }
-
- if (job->reply_payload.payload_len != BSG_MBOX_SIZE) {
- rc = -EINVAL;
- goto job_error;
- }
-
- if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
- rc = -EAGAIN;
- goto job_error;
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2737 Mix-and-match backward compability "
+ "between MBOX_REQ old size:%d and "
+ "new request size:%d\n",
+ (int)(job->request_len -
+ sizeof(struct fc_bsg_request)),
+ (int)sizeof(struct dfc_mbox_req));
+ mbox_req = (struct dfc_mbox_req *)
+ job->request->rqst_data.h_vendor.vendor_cmd;
+ mbox_req->extMboxTag = 0;
+ mbox_req->extSeqNum = 0;
}
rc = lpfc_bsg_issue_mbox(phba, job, vport);
-job_error:
if (rc == 0) {
/* job done */
job->reply->result = 0;
@@ -3416,10 +4759,16 @@ lpfc_bsg_hst_vendor(struct fc_bsg_job *job)
rc = lpfc_bsg_send_mgmt_rsp(job);
break;
case LPFC_BSG_VENDOR_DIAG_MODE:
- rc = lpfc_bsg_diag_mode(job);
+ rc = lpfc_bsg_diag_loopback_mode(job);
+ break;
+ case LPFC_BSG_VENDOR_DIAG_MODE_END:
+ rc = lpfc_sli4_bsg_diag_mode_end(job);
+ break;
+ case LPFC_BSG_VENDOR_DIAG_RUN_LOOPBACK:
+ rc = lpfc_bsg_diag_loopback_run(job);
break;
- case LPFC_BSG_VENDOR_DIAG_TEST:
- rc = lpfc_bsg_diag_test(job);
+ case LPFC_BSG_VENDOR_LINK_DIAG_TEST:
+ rc = lpfc_sli4_bsg_link_diag_test(job);
break;
case LPFC_BSG_VENDOR_GET_MGMT_REV:
rc = lpfc_bsg_get_dfc_rev(job);
@@ -3538,6 +4887,8 @@ lpfc_bsg_timeout(struct fc_bsg_job *job)
/* the mbox completion handler can now be run */
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
job->job_done(job);
+ if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT)
+ phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS;
break;
case TYPE_MENLO:
menlo = &dd_data->context_un.menlo;
diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h
index b542aca6f5ae..c8c2b47ea886 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.h
+++ b/drivers/scsi/lpfc/lpfc_bsg.h
@@ -24,15 +24,17 @@
* These are the vendor unique structures passed in using the bsg
* FC_BSG_HST_VENDOR message code type.
*/
-#define LPFC_BSG_VENDOR_SET_CT_EVENT 1
-#define LPFC_BSG_VENDOR_GET_CT_EVENT 2
-#define LPFC_BSG_VENDOR_SEND_MGMT_RESP 3
-#define LPFC_BSG_VENDOR_DIAG_MODE 4
-#define LPFC_BSG_VENDOR_DIAG_TEST 5
-#define LPFC_BSG_VENDOR_GET_MGMT_REV 6
-#define LPFC_BSG_VENDOR_MBOX 7
-#define LPFC_BSG_VENDOR_MENLO_CMD 8
-#define LPFC_BSG_VENDOR_MENLO_DATA 9
+#define LPFC_BSG_VENDOR_SET_CT_EVENT 1
+#define LPFC_BSG_VENDOR_GET_CT_EVENT 2
+#define LPFC_BSG_VENDOR_SEND_MGMT_RESP 3
+#define LPFC_BSG_VENDOR_DIAG_MODE 4
+#define LPFC_BSG_VENDOR_DIAG_RUN_LOOPBACK 5
+#define LPFC_BSG_VENDOR_GET_MGMT_REV 6
+#define LPFC_BSG_VENDOR_MBOX 7
+#define LPFC_BSG_VENDOR_MENLO_CMD 8
+#define LPFC_BSG_VENDOR_MENLO_DATA 9
+#define LPFC_BSG_VENDOR_DIAG_MODE_END 10
+#define LPFC_BSG_VENDOR_LINK_DIAG_TEST 11
struct set_ct_event {
uint32_t command;
@@ -67,10 +69,25 @@ struct diag_mode_set {
uint32_t timeout;
};
+struct sli4_link_diag {
+ uint32_t command;
+ uint32_t timeout;
+ uint32_t test_id;
+ uint32_t loops;
+ uint32_t test_version;
+ uint32_t error_action;
+};
+
struct diag_mode_test {
uint32_t command;
};
+struct diag_status {
+ uint32_t mbox_status;
+ uint32_t shdr_status;
+ uint32_t shdr_add_status;
+};
+
#define LPFC_WWNN_TYPE 0
#define LPFC_WWPN_TYPE 1
@@ -92,11 +109,15 @@ struct get_mgmt_rev_reply {
};
#define BSG_MBOX_SIZE 4096 /* mailbox command plus extended data */
+
+/* BSG mailbox request header */
struct dfc_mbox_req {
uint32_t command;
uint32_t mbOffset;
uint32_t inExtWLen;
uint32_t outExtWLen;
+ uint32_t extMboxTag;
+ uint32_t extSeqNum;
};
/* Used for menlo command or menlo data. The xri is only used for menlo data */
@@ -171,7 +192,7 @@ struct lpfc_sli_config_mse {
#define lpfc_mbox_sli_config_mse_len_WORD buf_len
};
-struct lpfc_sli_config_subcmd_hbd {
+struct lpfc_sli_config_hbd {
uint32_t buf_len;
#define lpfc_mbox_sli_config_ecmn_hbd_len_SHIFT 0
#define lpfc_mbox_sli_config_ecmn_hbd_len_MASK 0xffffff
@@ -194,21 +215,39 @@ struct lpfc_sli_config_hdr {
uint32_t reserved5;
};
-struct lpfc_sli_config_generic {
+struct lpfc_sli_config_emb0_subsys {
struct lpfc_sli_config_hdr sli_config_hdr;
#define LPFC_MBX_SLI_CONFIG_MAX_MSE 19
struct lpfc_sli_config_mse mse[LPFC_MBX_SLI_CONFIG_MAX_MSE];
+ uint32_t padding;
+ uint32_t word64;
+#define lpfc_emb0_subcmnd_opcode_SHIFT 0
+#define lpfc_emb0_subcmnd_opcode_MASK 0xff
+#define lpfc_emb0_subcmnd_opcode_WORD word64
+#define lpfc_emb0_subcmnd_subsys_SHIFT 8
+#define lpfc_emb0_subcmnd_subsys_MASK 0xff
+#define lpfc_emb0_subcmnd_subsys_WORD word64
+/* Subsystem FCOE (0x0C) OpCodes */
+#define SLI_CONFIG_SUBSYS_FCOE 0x0C
+#define FCOE_OPCODE_READ_FCF 0x08
+#define FCOE_OPCODE_ADD_FCF 0x09
};
-struct lpfc_sli_config_subcmnd {
+struct lpfc_sli_config_emb1_subsys {
struct lpfc_sli_config_hdr sli_config_hdr;
uint32_t word6;
-#define lpfc_subcmnd_opcode_SHIFT 0
-#define lpfc_subcmnd_opcode_MASK 0xff
-#define lpfc_subcmnd_opcode_WORD word6
-#define lpfc_subcmnd_subsys_SHIFT 8
-#define lpfc_subcmnd_subsys_MASK 0xff
-#define lpfc_subcmnd_subsys_WORD word6
+#define lpfc_emb1_subcmnd_opcode_SHIFT 0
+#define lpfc_emb1_subcmnd_opcode_MASK 0xff
+#define lpfc_emb1_subcmnd_opcode_WORD word6
+#define lpfc_emb1_subcmnd_subsys_SHIFT 8
+#define lpfc_emb1_subcmnd_subsys_MASK 0xff
+#define lpfc_emb1_subcmnd_subsys_WORD word6
+/* Subsystem COMN (0x01) OpCodes */
+#define SLI_CONFIG_SUBSYS_COMN 0x01
+#define COMN_OPCODE_READ_OBJECT 0xAB
+#define COMN_OPCODE_WRITE_OBJECT 0xAC
+#define COMN_OPCODE_READ_OBJECT_LIST 0xAD
+#define COMN_OPCODE_DELETE_OBJECT 0xAE
uint32_t timeout;
uint32_t request_length;
uint32_t word9;
@@ -222,8 +261,8 @@ struct lpfc_sli_config_subcmnd {
uint32_t rd_offset;
uint32_t obj_name[26];
uint32_t hbd_count;
-#define LPFC_MBX_SLI_CONFIG_MAX_HBD 10
- struct lpfc_sli_config_subcmd_hbd hbd[LPFC_MBX_SLI_CONFIG_MAX_HBD];
+#define LPFC_MBX_SLI_CONFIG_MAX_HBD 8
+ struct lpfc_sli_config_hbd hbd[LPFC_MBX_SLI_CONFIG_MAX_HBD];
};
struct lpfc_sli_config_mbox {
@@ -235,7 +274,11 @@ struct lpfc_sli_config_mbox {
#define lpfc_mqe_command_MASK 0x000000FF
#define lpfc_mqe_command_WORD word0
union {
- struct lpfc_sli_config_generic sli_config_generic;
- struct lpfc_sli_config_subcmnd sli_config_subcmnd;
+ struct lpfc_sli_config_emb0_subsys sli_config_emb0_subsys;
+ struct lpfc_sli_config_emb1_subsys sli_config_emb1_subsys;
} un;
};
+
+/* driver only */
+#define SLI_CONFIG_NOT_HANDLED 0
+#define SLI_CONFIG_HANDLED 1
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index f0b332f4eedb..fc20c247f36b 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -55,6 +55,8 @@ void lpfc_request_features(struct lpfc_hba *, struct lpfcMboxq *);
void lpfc_supported_pages(struct lpfcMboxq *);
void lpfc_pc_sli4_params(struct lpfcMboxq *);
int lpfc_pc_sli4_params_get(struct lpfc_hba *, LPFC_MBOXQ_t *);
+int lpfc_sli4_mbox_rsrc_extent(struct lpfc_hba *, struct lpfcMboxq *,
+ uint16_t, uint16_t, bool);
int lpfc_get_sli4_parameters(struct lpfc_hba *, LPFC_MBOXQ_t *);
struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t);
void lpfc_cleanup_rcv_buffers(struct lpfc_vport *);
@@ -171,6 +173,7 @@ void lpfc_delayed_disc_tmo(unsigned long);
void lpfc_delayed_disc_timeout_handler(struct lpfc_vport *);
int lpfc_config_port_prep(struct lpfc_hba *);
+void lpfc_update_vport_wwn(struct lpfc_vport *vport);
int lpfc_config_port_post(struct lpfc_hba *);
int lpfc_hba_down_prep(struct lpfc_hba *);
int lpfc_hba_down_post(struct lpfc_hba *);
@@ -365,6 +368,10 @@ extern void lpfc_debugfs_slow_ring_trc(struct lpfc_hba *, char *, uint32_t,
uint32_t, uint32_t);
extern struct lpfc_hbq_init *lpfc_hbq_defs[];
+/* SLI4 if_type 2 externs. */
+int lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *);
+int lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *);
+
/* externs BlockGuard */
extern char *_dump_buf_data;
extern unsigned long _dump_buf_data_order;
@@ -429,3 +436,6 @@ void lpfc_cleanup_wt_rrqs(struct lpfc_hba *);
void lpfc_cleanup_vports_rrqs(struct lpfc_vport *, struct lpfc_nodelist *);
struct lpfc_node_rrq *lpfc_get_active_rrq(struct lpfc_vport *, uint16_t,
uint32_t);
+int lpfc_wr_object(struct lpfc_hba *, struct list_head *, uint32_t, uint32_t *);
+/* functions to support SR-IOV */
+int lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *, int);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index d9edfd90d7ff..779b88e1469d 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -352,6 +352,8 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
icmd->ulpLe = 1;
icmd->ulpClass = CLASS3;
icmd->ulpContext = ndlp->nlp_rpi;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
/* For GEN_REQUEST64_CR, use the RPI */
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index c93fca058603..ffe82d169b40 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -1665,7 +1665,8 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
/* Get fast-path complete queue information */
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Fast-path FCP CQ information:\n");
- for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) {
+ fcp_qidx = 0;
+ do {
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Associated EQID[%02d]:\n",
phba->sli4_hba.fcp_cq[fcp_qidx]->assoc_qid);
@@ -1678,7 +1679,7 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
phba->sli4_hba.fcp_cq[fcp_qidx]->entry_size,
phba->sli4_hba.fcp_cq[fcp_qidx]->host_index,
phba->sli4_hba.fcp_cq[fcp_qidx]->hba_index);
- }
+ } while (++fcp_qidx < phba->cfg_fcp_eq_count);
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
/* Get mailbox queue information */
@@ -2012,7 +2013,8 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
goto pass_check;
}
/* FCP complete queue */
- for (qidx = 0; qidx < phba->cfg_fcp_eq_count; qidx++) {
+ qidx = 0;
+ do {
if (phba->sli4_hba.fcp_cq[qidx]->queue_id == queid) {
/* Sanity check */
rc = lpfc_idiag_que_param_check(
@@ -2024,7 +2026,7 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
phba->sli4_hba.fcp_cq[qidx];
goto pass_check;
}
- }
+ } while (++qidx < phba->cfg_fcp_eq_count);
goto error_out;
break;
case LPFC_IDIAG_MQ:
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index e2c452467c8b..32a084534f3e 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -250,7 +250,7 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
icmd->un.elsreq64.myID = vport->fc_myDID;
/* For ELS_REQUEST64_CR, use the VPI by default */
- icmd->ulpContext = vport->vpi + phba->vpi_base;
+ icmd->ulpContext = phba->vpi_ids[vport->vpi];
icmd->ulpCt_h = 0;
/* The CT field must be 0=INVALID_RPI for the ECHO cmd */
if (elscmd == ELS_CMD_ECHO)
@@ -454,6 +454,7 @@ lpfc_issue_reg_vfi(struct lpfc_vport *vport)
rc = -ENOMEM;
goto fail_free_dmabuf;
}
+
mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mboxq) {
rc = -ENOMEM;
@@ -6585,6 +6586,26 @@ lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
{
struct lpfc_vport *vport;
unsigned long flags;
+ int i;
+
+ /* The physical ports are always vpi 0 - translate is unnecessary. */
+ if (vpi > 0) {
+ /*
+ * Translate the physical vpi to the logical vpi. The
+ * vport stores the logical vpi.
+ */
+ for (i = 0; i < phba->max_vpi; i++) {
+ if (vpi == phba->vpi_ids[i])
+ break;
+ }
+
+ if (i >= phba->max_vpi) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+ "2936 Could not find Vport mapped "
+ "to vpi %d\n", vpi);
+ return NULL;
+ }
+ }
spin_lock_irqsave(&phba->hbalock, flags);
list_for_each_entry(vport, &phba->port_list, listentry) {
@@ -6641,8 +6662,9 @@ lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
vport = phba->pport;
else
vport = lpfc_find_vport_by_vpid(phba,
- icmd->unsli3.rcvsli3.vpi - phba->vpi_base);
+ icmd->unsli3.rcvsli3.vpi);
}
+
/* If there are no BDEs associated
* with this IOCB, there is nothing to do.
*/
@@ -7222,7 +7244,7 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
elsiocb->iocb.ulpCt_h = (SLI4_CT_VPI >> 1) & 1;
elsiocb->iocb.ulpCt_l = SLI4_CT_VPI & 1 ;
/* Set the ulpContext to the vpi */
- elsiocb->iocb.ulpContext = vport->vpi + phba->vpi_base;
+ elsiocb->iocb.ulpContext = phba->vpi_ids[vport->vpi];
} else {
/* For FDISC, Let FDISC rsp set the NPortID for this VPI */
icmd->ulpCt_h = 1;
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 7a35df5e2038..18d0dbfda2bc 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -881,7 +881,7 @@ lpfc_linkdown(struct lpfc_hba *phba)
/* Clean up any firmware default rpi's */
mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (mb) {
- lpfc_unreg_did(phba, 0xffff, 0xffffffff, mb);
+ lpfc_unreg_did(phba, 0xffff, LPFC_UNREG_ALL_DFLT_RPIS, mb);
mb->vport = vport;
mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT)
@@ -2690,16 +2690,7 @@ lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
memcpy((uint8_t *) &vport->fc_sparam, (uint8_t *) mp->virt,
sizeof (struct serv_parm));
- if (phba->cfg_soft_wwnn)
- u64_to_wwn(phba->cfg_soft_wwnn,
- vport->fc_sparam.nodeName.u.wwn);
- if (phba->cfg_soft_wwpn)
- u64_to_wwn(phba->cfg_soft_wwpn,
- vport->fc_sparam.portName.u.wwn);
- memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
- sizeof(vport->fc_nodename));
- memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
- sizeof(vport->fc_portname));
+ lpfc_update_vport_wwn(vport);
if (vport->port_type == LPFC_PHYSICAL_PORT) {
memcpy(&phba->wwnn, &vport->fc_nodename, sizeof(phba->wwnn));
memcpy(&phba->wwpn, &vport->fc_portname, sizeof(phba->wwnn));
@@ -3430,7 +3421,8 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
return;
}
- ndlp->nlp_rpi = mb->un.varWords[0];
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ ndlp->nlp_rpi = mb->un.varWords[0];
ndlp->nlp_flag |= NLP_RPI_REGISTERED;
ndlp->nlp_type |= NLP_FABRIC;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
@@ -3504,7 +3496,8 @@ out:
return;
}
- ndlp->nlp_rpi = mb->un.varWords[0];
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ ndlp->nlp_rpi = mb->un.varWords[0];
ndlp->nlp_flag |= NLP_RPI_REGISTERED;
ndlp->nlp_type |= NLP_FABRIC;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
@@ -3591,7 +3584,6 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
if (ndlp->nlp_type & NLP_FCP_INITIATOR)
rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
-
if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN)
fc_remote_port_rolechg(rport, rport_ids.roles);
@@ -4106,11 +4098,16 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
struct lpfc_hba *phba = vport->phba;
LPFC_MBOXQ_t *mbox;
int rc;
+ uint16_t rpi;
if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (mbox) {
- lpfc_unreg_login(phba, vport->vpi, ndlp->nlp_rpi, mbox);
+ /* SLI4 ports require the physical rpi value. */
+ rpi = ndlp->nlp_rpi;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ rpi = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
+ lpfc_unreg_login(phba, vport->vpi, rpi, mbox);
mbox->vport = vport;
mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
@@ -4179,7 +4176,8 @@ lpfc_unreg_all_rpis(struct lpfc_vport *vport)
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (mbox) {
- lpfc_unreg_login(phba, vport->vpi, 0xffff, mbox);
+ lpfc_unreg_login(phba, vport->vpi, LPFC_UNREG_ALL_RPIS_VPORT,
+ mbox);
mbox->vport = vport;
mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
mbox->context1 = NULL;
@@ -4203,7 +4201,8 @@ lpfc_unreg_default_rpis(struct lpfc_vport *vport)
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (mbox) {
- lpfc_unreg_did(phba, vport->vpi, 0xffffffff, mbox);
+ lpfc_unreg_did(phba, vport->vpi, LPFC_UNREG_ALL_DFLT_RPIS,
+ mbox);
mbox->vport = vport;
mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
mbox->context1 = NULL;
@@ -4653,10 +4652,7 @@ lpfc_disc_start(struct lpfc_vport *vport)
if (num_sent)
return;
- /*
- * For SLI3, cmpl_reg_vpi will set port_state to READY, and
- * continue discovery.
- */
+ /* Register the VPI for SLI3, NON-NPIV only. */
if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
!(vport->fc_flag & FC_PT2PT) &&
!(vport->fc_flag & FC_RSCN_MODE) &&
@@ -4943,7 +4939,7 @@ restart_disc:
if (phba->sli_rev < LPFC_SLI_REV4) {
if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
lpfc_issue_reg_vpi(phba, vport);
- else { /* NPIV Not enabled */
+ else {
lpfc_issue_clear_la(phba, vport);
vport->port_state = LPFC_VPORT_READY;
}
@@ -5069,7 +5065,8 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
pmb->context1 = NULL;
pmb->context2 = NULL;
- ndlp->nlp_rpi = mb->un.varWords[0];
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ ndlp->nlp_rpi = mb->un.varWords[0];
ndlp->nlp_flag |= NLP_RPI_REGISTERED;
ndlp->nlp_type |= NLP_FABRIC;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
@@ -5354,6 +5351,17 @@ lpfc_fcf_inuse(struct lpfc_hba *phba)
for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
shost = lpfc_shost_from_vport(vports[i]);
spin_lock_irq(shost->host_lock);
+ /*
+ * IF the CVL_RCVD bit is not set then we have sent the
+ * flogi.
+ * If dev_loss fires while we are waiting we do not want to
+ * unreg the fcf.
+ */
+ if (!(vports[i]->fc_flag & FC_VPORT_CVL_RCVD)) {
+ spin_unlock_irq(shost->host_lock);
+ ret = 1;
+ goto out;
+ }
list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
if (NLP_CHK_NODE_ACT(ndlp) && ndlp->rport &&
(ndlp->rport->roles & FC_RPORT_ROLE_FCP_TARGET)) {
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 86b6f7e6686a..9059524cf225 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -64,6 +64,8 @@
#define SLI3_IOCB_CMD_SIZE 128
#define SLI3_IOCB_RSP_SIZE 64
+#define LPFC_UNREG_ALL_RPIS_VPORT 0xffff
+#define LPFC_UNREG_ALL_DFLT_RPIS 0xffffffff
/* vendor ID used in SCSI netlink calls */
#define LPFC_NL_VENDOR_ID (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX)
@@ -903,6 +905,8 @@ struct RRQ { /* Structure is in Big Endian format */
#define rrq_rxid_WORD rrq_exchg
};
+#define LPFC_MAX_VFN_PER_PFN 255 /* Maximum VFs allowed per ARI */
+#define LPFC_DEF_VFN_PER_PFN 0 /* Default VFs due to platform limitation*/
struct RTV_RSP { /* Structure is in Big Endian format */
uint32_t ratov;
@@ -1199,7 +1203,9 @@ typedef struct {
#define PCI_DEVICE_ID_BALIUS 0xe131
#define PCI_DEVICE_ID_PROTEUS_PF 0xe180
#define PCI_DEVICE_ID_LANCER_FC 0xe200
+#define PCI_DEVICE_ID_LANCER_FC_VF 0xe208
#define PCI_DEVICE_ID_LANCER_FCOE 0xe260
+#define PCI_DEVICE_ID_LANCER_FCOE_VF 0xe268
#define PCI_DEVICE_ID_SAT_SMB 0xf011
#define PCI_DEVICE_ID_SAT_MID 0xf015
#define PCI_DEVICE_ID_RFLY 0xf095
@@ -3021,7 +3027,7 @@ typedef struct {
#define MAILBOX_EXT_SIZE (MAILBOX_EXT_WSIZE * sizeof(uint32_t))
#define MAILBOX_HBA_EXT_OFFSET 0x100
/* max mbox xmit size is a page size for sysfs IO operations */
-#define MAILBOX_MAX_XMIT_SIZE PAGE_SIZE
+#define MAILBOX_SYSFS_MAX 4096
typedef union {
uint32_t varWords[MAILBOX_CMD_WSIZE - 1]; /* first word is type/
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 4dff668ebdad..11e26a26b5d1 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -170,6 +170,25 @@ struct lpfc_sli_intf {
#define LPFC_PCI_FUNC3 3
#define LPFC_PCI_FUNC4 4
+/* SLI4 interface type-2 control register offsets */
+#define LPFC_CTL_PORT_SEM_OFFSET 0x400
+#define LPFC_CTL_PORT_STA_OFFSET 0x404
+#define LPFC_CTL_PORT_CTL_OFFSET 0x408
+#define LPFC_CTL_PORT_ER1_OFFSET 0x40C
+#define LPFC_CTL_PORT_ER2_OFFSET 0x410
+#define LPFC_CTL_PDEV_CTL_OFFSET 0x414
+
+/* Some SLI4 interface type-2 PDEV_CTL register bits */
+#define LPFC_CTL_PDEV_CTL_DRST 0x00000001
+#define LPFC_CTL_PDEV_CTL_FRST 0x00000002
+#define LPFC_CTL_PDEV_CTL_DD 0x00000004
+#define LPFC_CTL_PDEV_CTL_LC 0x00000008
+#define LPFC_CTL_PDEV_CTL_FRL_ALL 0x00
+#define LPFC_CTL_PDEV_CTL_FRL_FC_FCOE 0x10
+#define LPFC_CTL_PDEV_CTL_FRL_NIC 0x20
+
+#define LPFC_FW_DUMP_REQUEST (LPFC_CTL_PDEV_CTL_DD | LPFC_CTL_PDEV_CTL_FRST)
+
/* Active interrupt test count */
#define LPFC_ACT_INTR_CNT 4
@@ -210,9 +229,26 @@ struct ulp_bde64 {
struct lpfc_sli4_flags {
uint32_t word0;
-#define lpfc_fip_flag_SHIFT 0
-#define lpfc_fip_flag_MASK 0x00000001
-#define lpfc_fip_flag_WORD word0
+#define lpfc_idx_rsrc_rdy_SHIFT 0
+#define lpfc_idx_rsrc_rdy_MASK 0x00000001
+#define lpfc_idx_rsrc_rdy_WORD word0
+#define LPFC_IDX_RSRC_RDY 1
+#define lpfc_xri_rsrc_rdy_SHIFT 1
+#define lpfc_xri_rsrc_rdy_MASK 0x00000001
+#define lpfc_xri_rsrc_rdy_WORD word0
+#define LPFC_XRI_RSRC_RDY 1
+#define lpfc_rpi_rsrc_rdy_SHIFT 2
+#define lpfc_rpi_rsrc_rdy_MASK 0x00000001
+#define lpfc_rpi_rsrc_rdy_WORD word0
+#define LPFC_RPI_RSRC_RDY 1
+#define lpfc_vpi_rsrc_rdy_SHIFT 3
+#define lpfc_vpi_rsrc_rdy_MASK 0x00000001
+#define lpfc_vpi_rsrc_rdy_WORD word0
+#define LPFC_VPI_RSRC_RDY 1
+#define lpfc_vfi_rsrc_rdy_SHIFT 4
+#define lpfc_vfi_rsrc_rdy_MASK 0x00000001
+#define lpfc_vfi_rsrc_rdy_WORD word0
+#define LPFC_VFI_RSRC_RDY 1
};
struct sli4_bls_rsp {
@@ -739,6 +775,12 @@ union lpfc_sli4_cfg_shdr {
#define lpfc_mbox_hdr_version_SHIFT 0
#define lpfc_mbox_hdr_version_MASK 0x000000FF
#define lpfc_mbox_hdr_version_WORD word9
+#define lpfc_mbox_hdr_pf_num_SHIFT 16
+#define lpfc_mbox_hdr_pf_num_MASK 0x000000FF
+#define lpfc_mbox_hdr_pf_num_WORD word9
+#define lpfc_mbox_hdr_vh_num_SHIFT 24
+#define lpfc_mbox_hdr_vh_num_MASK 0x000000FF
+#define lpfc_mbox_hdr_vh_num_WORD word9
#define LPFC_Q_CREATE_VERSION_2 2
#define LPFC_Q_CREATE_VERSION_1 1
#define LPFC_Q_CREATE_VERSION_0 0
@@ -766,12 +808,22 @@ union lpfc_sli4_cfg_shdr {
} response;
};
-/* Mailbox structures */
+/* Mailbox Header structures.
+ * struct mbox_header is defined for first generation SLI4_CFG mailbox
+ * calls deployed for BE-based ports.
+ *
+ * struct sli4_mbox_header is defined for second generation SLI4
+ * ports that don't deploy the SLI4_CFG mechanism.
+ */
struct mbox_header {
struct lpfc_sli4_cfg_mhdr cfg_mhdr;
union lpfc_sli4_cfg_shdr cfg_shdr;
};
+#define LPFC_EXTENT_LOCAL 0
+#define LPFC_TIMEOUT_DEFAULT 0
+#define LPFC_EXTENT_VERSION_DEFAULT 0
+
/* Subsystem Definitions */
#define LPFC_MBOX_SUBSYSTEM_COMMON 0x1
#define LPFC_MBOX_SUBSYSTEM_FCOE 0xC
@@ -794,6 +846,13 @@ struct mbox_header {
#define LPFC_MBOX_OPCODE_QUERY_FW_CFG 0x3A
#define LPFC_MBOX_OPCODE_FUNCTION_RESET 0x3D
#define LPFC_MBOX_OPCODE_MQ_CREATE_EXT 0x5A
+#define LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO 0x9A
+#define LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT 0x9B
+#define LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT 0x9C
+#define LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT 0x9D
+#define LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG 0xA0
+#define LPFC_MBOX_OPCODE_GET_PROFILE_CONFIG 0xA4
+#define LPFC_MBOX_OPCODE_WRITE_OBJECT 0xAC
#define LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS 0xB5
/* FCoE Opcodes */
@@ -808,6 +867,8 @@ struct mbox_header {
#define LPFC_MBOX_OPCODE_FCOE_DELETE_FCF 0x0A
#define LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE 0x0B
#define LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF 0x10
+#define LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE 0x22
+#define LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK 0x23
/* Mailbox command structures */
struct eq_context {
@@ -1210,6 +1271,187 @@ struct lpfc_mbx_mq_destroy {
} u;
};
+/* Start Gen 2 SLI4 Mailbox definitions: */
+
+/* Define allocate-ready Gen 2 SLI4 FCoE Resource Extent Types. */
+#define LPFC_RSC_TYPE_FCOE_VFI 0x20
+#define LPFC_RSC_TYPE_FCOE_VPI 0x21
+#define LPFC_RSC_TYPE_FCOE_RPI 0x22
+#define LPFC_RSC_TYPE_FCOE_XRI 0x23
+
+struct lpfc_mbx_get_rsrc_extent_info {
+ struct mbox_header header;
+ union {
+ struct {
+ uint32_t word4;
+#define lpfc_mbx_get_rsrc_extent_info_type_SHIFT 0
+#define lpfc_mbx_get_rsrc_extent_info_type_MASK 0x0000FFFF
+#define lpfc_mbx_get_rsrc_extent_info_type_WORD word4
+ } req;
+ struct {
+ uint32_t word4;
+#define lpfc_mbx_get_rsrc_extent_info_cnt_SHIFT 0
+#define lpfc_mbx_get_rsrc_extent_info_cnt_MASK 0x0000FFFF
+#define lpfc_mbx_get_rsrc_extent_info_cnt_WORD word4
+#define lpfc_mbx_get_rsrc_extent_info_size_SHIFT 16
+#define lpfc_mbx_get_rsrc_extent_info_size_MASK 0x0000FFFF
+#define lpfc_mbx_get_rsrc_extent_info_size_WORD word4
+ } rsp;
+ } u;
+};
+
+struct lpfc_id_range {
+ uint32_t word5;
+#define lpfc_mbx_rsrc_id_word4_0_SHIFT 0
+#define lpfc_mbx_rsrc_id_word4_0_MASK 0x0000FFFF
+#define lpfc_mbx_rsrc_id_word4_0_WORD word5
+#define lpfc_mbx_rsrc_id_word4_1_SHIFT 16
+#define lpfc_mbx_rsrc_id_word4_1_MASK 0x0000FFFF
+#define lpfc_mbx_rsrc_id_word4_1_WORD word5
+};
+
+struct lpfc_mbx_set_link_diag_state {
+ struct mbox_header header;
+ union {
+ struct {
+ uint32_t word0;
+#define lpfc_mbx_set_diag_state_diag_SHIFT 0
+#define lpfc_mbx_set_diag_state_diag_MASK 0x00000001
+#define lpfc_mbx_set_diag_state_diag_WORD word0
+#define lpfc_mbx_set_diag_state_link_num_SHIFT 16
+#define lpfc_mbx_set_diag_state_link_num_MASK 0x0000003F
+#define lpfc_mbx_set_diag_state_link_num_WORD word0
+#define lpfc_mbx_set_diag_state_link_type_SHIFT 22
+#define lpfc_mbx_set_diag_state_link_type_MASK 0x00000003
+#define lpfc_mbx_set_diag_state_link_type_WORD word0
+ } req;
+ struct {
+ uint32_t word0;
+ } rsp;
+ } u;
+};
+
+struct lpfc_mbx_set_link_diag_loopback {
+ struct mbox_header header;
+ union {
+ struct {
+ uint32_t word0;
+#define lpfc_mbx_set_diag_lpbk_type_SHIFT 0
+#define lpfc_mbx_set_diag_lpbk_type_MASK 0x00000001
+#define lpfc_mbx_set_diag_lpbk_type_WORD word0
+#define LPFC_DIAG_LOOPBACK_TYPE_DISABLE 0x0
+#define LPFC_DIAG_LOOPBACK_TYPE_INTERNAL 0x1
+#define LPFC_DIAG_LOOPBACK_TYPE_EXTERNAL 0x2
+#define lpfc_mbx_set_diag_lpbk_link_num_SHIFT 16
+#define lpfc_mbx_set_diag_lpbk_link_num_MASK 0x0000003F
+#define lpfc_mbx_set_diag_lpbk_link_num_WORD word0
+#define lpfc_mbx_set_diag_lpbk_link_type_SHIFT 22
+#define lpfc_mbx_set_diag_lpbk_link_type_MASK 0x00000003
+#define lpfc_mbx_set_diag_lpbk_link_type_WORD word0
+ } req;
+ struct {
+ uint32_t word0;
+ } rsp;
+ } u;
+};
+
+struct lpfc_mbx_run_link_diag_test {
+ struct mbox_header header;
+ union {
+ struct {
+ uint32_t word0;
+#define lpfc_mbx_run_diag_test_link_num_SHIFT 16
+#define lpfc_mbx_run_diag_test_link_num_MASK 0x0000003F
+#define lpfc_mbx_run_diag_test_link_num_WORD word0
+#define lpfc_mbx_run_diag_test_link_type_SHIFT 22
+#define lpfc_mbx_run_diag_test_link_type_MASK 0x00000003
+#define lpfc_mbx_run_diag_test_link_type_WORD word0
+ uint32_t word1;
+#define lpfc_mbx_run_diag_test_test_id_SHIFT 0
+#define lpfc_mbx_run_diag_test_test_id_MASK 0x0000FFFF
+#define lpfc_mbx_run_diag_test_test_id_WORD word1
+#define lpfc_mbx_run_diag_test_loops_SHIFT 16
+#define lpfc_mbx_run_diag_test_loops_MASK 0x0000FFFF
+#define lpfc_mbx_run_diag_test_loops_WORD word1
+ uint32_t word2;
+#define lpfc_mbx_run_diag_test_test_ver_SHIFT 0
+#define lpfc_mbx_run_diag_test_test_ver_MASK 0x0000FFFF
+#define lpfc_mbx_run_diag_test_test_ver_WORD word2
+#define lpfc_mbx_run_diag_test_err_act_SHIFT 16
+#define lpfc_mbx_run_diag_test_err_act_MASK 0x000000FF
+#define lpfc_mbx_run_diag_test_err_act_WORD word2
+ } req;
+ struct {
+ uint32_t word0;
+ } rsp;
+ } u;
+};
+
+/*
+ * struct lpfc_mbx_alloc_rsrc_extents:
+ * A mbox is generically 256 bytes long. An SLI4_CONFIG mailbox requires
+ * 6 words of header + 4 words of shared subcommand header +
+ * 1 words of Extent-Opcode-specific header = 11 words or 44 bytes total.
+ *
+ * An embedded version of SLI4_CONFIG therefore has 256 - 44 = 212 bytes
+ * for extents payload.
+ *
+ * 212/2 (bytes per extent) = 106 extents.
+ * 106/2 (extents per word) = 53 words.
+ * lpfc_id_range id is statically size to 53.
+ *
+ * This mailbox definition is used for ALLOC or GET_ALLOCATED
+ * extent ranges. For ALLOC, the type and cnt are required.
+ * For GET_ALLOCATED, only the type is required.
+ */
+struct lpfc_mbx_alloc_rsrc_extents {
+ struct mbox_header header;
+ union {
+ struct {
+ uint32_t word4;
+#define lpfc_mbx_alloc_rsrc_extents_type_SHIFT 0
+#define lpfc_mbx_alloc_rsrc_extents_type_MASK 0x0000FFFF
+#define lpfc_mbx_alloc_rsrc_extents_type_WORD word4
+#define lpfc_mbx_alloc_rsrc_extents_cnt_SHIFT 16
+#define lpfc_mbx_alloc_rsrc_extents_cnt_MASK 0x0000FFFF
+#define lpfc_mbx_alloc_rsrc_extents_cnt_WORD word4
+ } req;
+ struct {
+ uint32_t word4;
+#define lpfc_mbx_rsrc_cnt_SHIFT 0
+#define lpfc_mbx_rsrc_cnt_MASK 0x0000FFFF
+#define lpfc_mbx_rsrc_cnt_WORD word4
+ struct lpfc_id_range id[53];
+ } rsp;
+ } u;
+};
+
+/*
+ * This is the non-embedded version of ALLOC or GET RSRC_EXTENTS. Word4 in this
+ * structure shares the same SHIFT/MASK/WORD defines provided in the
+ * mbx_alloc_rsrc_extents and mbx_get_alloc_rsrc_extents, word4, provided in
+ * the structures defined above. This non-embedded structure provides for the
+ * maximum number of extents supported by the port.
+ */
+struct lpfc_mbx_nembed_rsrc_extent {
+ union lpfc_sli4_cfg_shdr cfg_shdr;
+ uint32_t word4;
+ struct lpfc_id_range id;
+};
+
+struct lpfc_mbx_dealloc_rsrc_extents {
+ struct mbox_header header;
+ struct {
+ uint32_t word4;
+#define lpfc_mbx_dealloc_rsrc_extents_type_SHIFT 0
+#define lpfc_mbx_dealloc_rsrc_extents_type_MASK 0x0000FFFF
+#define lpfc_mbx_dealloc_rsrc_extents_type_WORD word4
+ } req;
+
+};
+
+/* Start SLI4 FCoE specific mbox structures. */
+
struct lpfc_mbx_post_hdr_tmpl {
struct mbox_header header;
uint32_t word10;
@@ -1229,7 +1471,7 @@ struct sli4_sge { /* SLI-4 */
uint32_t word2;
#define lpfc_sli4_sge_offset_SHIFT 0 /* Offset of buffer - Not used*/
-#define lpfc_sli4_sge_offset_MASK 0x00FFFFFF
+#define lpfc_sli4_sge_offset_MASK 0x1FFFFFFF
#define lpfc_sli4_sge_offset_WORD word2
#define lpfc_sli4_sge_last_SHIFT 31 /* Last SEG in the SGL sets
this flag !! */
@@ -1773,61 +2015,31 @@ struct lpfc_mbx_read_rev {
struct lpfc_mbx_read_config {
uint32_t word1;
-#define lpfc_mbx_rd_conf_max_bbc_SHIFT 0
-#define lpfc_mbx_rd_conf_max_bbc_MASK 0x000000FF
-#define lpfc_mbx_rd_conf_max_bbc_WORD word1
-#define lpfc_mbx_rd_conf_init_bbc_SHIFT 8
-#define lpfc_mbx_rd_conf_init_bbc_MASK 0x000000FF
-#define lpfc_mbx_rd_conf_init_bbc_WORD word1
+#define lpfc_mbx_rd_conf_extnts_inuse_SHIFT 31
+#define lpfc_mbx_rd_conf_extnts_inuse_MASK 0x00000001
+#define lpfc_mbx_rd_conf_extnts_inuse_WORD word1
uint32_t word2;
-#define lpfc_mbx_rd_conf_nport_did_SHIFT 0
-#define lpfc_mbx_rd_conf_nport_did_MASK 0x00FFFFFF
-#define lpfc_mbx_rd_conf_nport_did_WORD word2
#define lpfc_mbx_rd_conf_topology_SHIFT 24
#define lpfc_mbx_rd_conf_topology_MASK 0x000000FF
#define lpfc_mbx_rd_conf_topology_WORD word2
- uint32_t word3;
-#define lpfc_mbx_rd_conf_ao_SHIFT 0
-#define lpfc_mbx_rd_conf_ao_MASK 0x00000001
-#define lpfc_mbx_rd_conf_ao_WORD word3
-#define lpfc_mbx_rd_conf_bb_scn_SHIFT 8
-#define lpfc_mbx_rd_conf_bb_scn_MASK 0x0000000F
-#define lpfc_mbx_rd_conf_bb_scn_WORD word3
-#define lpfc_mbx_rd_conf_cbb_scn_SHIFT 12
-#define lpfc_mbx_rd_conf_cbb_scn_MASK 0x0000000F
-#define lpfc_mbx_rd_conf_cbb_scn_WORD word3
-#define lpfc_mbx_rd_conf_mc_SHIFT 29
-#define lpfc_mbx_rd_conf_mc_MASK 0x00000001
-#define lpfc_mbx_rd_conf_mc_WORD word3
+ uint32_t rsvd_3;
uint32_t word4;
#define lpfc_mbx_rd_conf_e_d_tov_SHIFT 0
#define lpfc_mbx_rd_conf_e_d_tov_MASK 0x0000FFFF
#define lpfc_mbx_rd_conf_e_d_tov_WORD word4
- uint32_t word5;
-#define lpfc_mbx_rd_conf_lp_tov_SHIFT 0
-#define lpfc_mbx_rd_conf_lp_tov_MASK 0x0000FFFF
-#define lpfc_mbx_rd_conf_lp_tov_WORD word5
+ uint32_t rsvd_5;
uint32_t word6;
#define lpfc_mbx_rd_conf_r_a_tov_SHIFT 0
#define lpfc_mbx_rd_conf_r_a_tov_MASK 0x0000FFFF
#define lpfc_mbx_rd_conf_r_a_tov_WORD word6
- uint32_t word7;
-#define lpfc_mbx_rd_conf_r_t_tov_SHIFT 0
-#define lpfc_mbx_rd_conf_r_t_tov_MASK 0x000000FF
-#define lpfc_mbx_rd_conf_r_t_tov_WORD word7
- uint32_t word8;
-#define lpfc_mbx_rd_conf_al_tov_SHIFT 0
-#define lpfc_mbx_rd_conf_al_tov_MASK 0x0000000F
-#define lpfc_mbx_rd_conf_al_tov_WORD word8
+ uint32_t rsvd_7;
+ uint32_t rsvd_8;
uint32_t word9;
#define lpfc_mbx_rd_conf_lmt_SHIFT 0
#define lpfc_mbx_rd_conf_lmt_MASK 0x0000FFFF
#define lpfc_mbx_rd_conf_lmt_WORD word9
- uint32_t word10;
-#define lpfc_mbx_rd_conf_max_alpa_SHIFT 0
-#define lpfc_mbx_rd_conf_max_alpa_MASK 0x000000FF
-#define lpfc_mbx_rd_conf_max_alpa_WORD word10
- uint32_t word11_rsvd;
+ uint32_t rsvd_10;
+ uint32_t rsvd_11;
uint32_t word12;
#define lpfc_mbx_rd_conf_xri_base_SHIFT 0
#define lpfc_mbx_rd_conf_xri_base_MASK 0x0000FFFF
@@ -1857,9 +2069,6 @@ struct lpfc_mbx_read_config {
#define lpfc_mbx_rd_conf_vfi_count_MASK 0x0000FFFF
#define lpfc_mbx_rd_conf_vfi_count_WORD word15
uint32_t word16;
-#define lpfc_mbx_rd_conf_fcfi_base_SHIFT 0
-#define lpfc_mbx_rd_conf_fcfi_base_MASK 0x0000FFFF
-#define lpfc_mbx_rd_conf_fcfi_base_WORD word16
#define lpfc_mbx_rd_conf_fcfi_count_SHIFT 16
#define lpfc_mbx_rd_conf_fcfi_count_MASK 0x0000FFFF
#define lpfc_mbx_rd_conf_fcfi_count_WORD word16
@@ -2169,6 +2378,12 @@ struct lpfc_sli4_parameters {
#define cfg_fcoe_SHIFT 0
#define cfg_fcoe_MASK 0x00000001
#define cfg_fcoe_WORD word12
+#define cfg_ext_SHIFT 1
+#define cfg_ext_MASK 0x00000001
+#define cfg_ext_WORD word12
+#define cfg_hdrr_SHIFT 2
+#define cfg_hdrr_MASK 0x00000001
+#define cfg_hdrr_WORD word12
#define cfg_phwq_SHIFT 15
#define cfg_phwq_MASK 0x00000001
#define cfg_phwq_WORD word12
@@ -2198,6 +2413,145 @@ struct lpfc_mbx_get_sli4_parameters {
struct lpfc_sli4_parameters sli4_parameters;
};
+struct lpfc_rscr_desc_generic {
+#define LPFC_RSRC_DESC_WSIZE 18
+ uint32_t desc[LPFC_RSRC_DESC_WSIZE];
+};
+
+struct lpfc_rsrc_desc_pcie {
+ uint32_t word0;
+#define lpfc_rsrc_desc_pcie_type_SHIFT 0
+#define lpfc_rsrc_desc_pcie_type_MASK 0x000000ff
+#define lpfc_rsrc_desc_pcie_type_WORD word0
+#define LPFC_RSRC_DESC_TYPE_PCIE 0x40
+ uint32_t word1;
+#define lpfc_rsrc_desc_pcie_pfnum_SHIFT 0
+#define lpfc_rsrc_desc_pcie_pfnum_MASK 0x000000ff
+#define lpfc_rsrc_desc_pcie_pfnum_WORD word1
+ uint32_t reserved;
+ uint32_t word3;
+#define lpfc_rsrc_desc_pcie_sriov_sta_SHIFT 0
+#define lpfc_rsrc_desc_pcie_sriov_sta_MASK 0x000000ff
+#define lpfc_rsrc_desc_pcie_sriov_sta_WORD word3
+#define lpfc_rsrc_desc_pcie_pf_sta_SHIFT 8
+#define lpfc_rsrc_desc_pcie_pf_sta_MASK 0x000000ff
+#define lpfc_rsrc_desc_pcie_pf_sta_WORD word3
+#define lpfc_rsrc_desc_pcie_pf_type_SHIFT 16
+#define lpfc_rsrc_desc_pcie_pf_type_MASK 0x000000ff
+#define lpfc_rsrc_desc_pcie_pf_type_WORD word3
+ uint32_t word4;
+#define lpfc_rsrc_desc_pcie_nr_virtfn_SHIFT 0
+#define lpfc_rsrc_desc_pcie_nr_virtfn_MASK 0x0000ffff
+#define lpfc_rsrc_desc_pcie_nr_virtfn_WORD word4
+};
+
+struct lpfc_rsrc_desc_fcfcoe {
+ uint32_t word0;
+#define lpfc_rsrc_desc_fcfcoe_type_SHIFT 0
+#define lpfc_rsrc_desc_fcfcoe_type_MASK 0x000000ff
+#define lpfc_rsrc_desc_fcfcoe_type_WORD word0
+#define LPFC_RSRC_DESC_TYPE_FCFCOE 0x43
+ uint32_t word1;
+#define lpfc_rsrc_desc_fcfcoe_vfnum_SHIFT 0
+#define lpfc_rsrc_desc_fcfcoe_vfnum_MASK 0x000000ff
+#define lpfc_rsrc_desc_fcfcoe_vfnum_WORD word1
+#define lpfc_rsrc_desc_fcfcoe_pfnum_SHIFT 16
+#define lpfc_rsrc_desc_fcfcoe_pfnum_MASK 0x000007ff
+#define lpfc_rsrc_desc_fcfcoe_pfnum_WORD word1
+ uint32_t word2;
+#define lpfc_rsrc_desc_fcfcoe_rpi_cnt_SHIFT 0
+#define lpfc_rsrc_desc_fcfcoe_rpi_cnt_MASK 0x0000ffff
+#define lpfc_rsrc_desc_fcfcoe_rpi_cnt_WORD word2
+#define lpfc_rsrc_desc_fcfcoe_xri_cnt_SHIFT 16
+#define lpfc_rsrc_desc_fcfcoe_xri_cnt_MASK 0x0000ffff
+#define lpfc_rsrc_desc_fcfcoe_xri_cnt_WORD word2
+ uint32_t word3;
+#define lpfc_rsrc_desc_fcfcoe_wq_cnt_SHIFT 0
+#define lpfc_rsrc_desc_fcfcoe_wq_cnt_MASK 0x0000ffff
+#define lpfc_rsrc_desc_fcfcoe_wq_cnt_WORD word3
+#define lpfc_rsrc_desc_fcfcoe_rq_cnt_SHIFT 16
+#define lpfc_rsrc_desc_fcfcoe_rq_cnt_MASK 0x0000ffff
+#define lpfc_rsrc_desc_fcfcoe_rq_cnt_WORD word3
+ uint32_t word4;
+#define lpfc_rsrc_desc_fcfcoe_cq_cnt_SHIFT 0
+#define lpfc_rsrc_desc_fcfcoe_cq_cnt_MASK 0x0000ffff
+#define lpfc_rsrc_desc_fcfcoe_cq_cnt_WORD word4
+#define lpfc_rsrc_desc_fcfcoe_vpi_cnt_SHIFT 16
+#define lpfc_rsrc_desc_fcfcoe_vpi_cnt_MASK 0x0000ffff
+#define lpfc_rsrc_desc_fcfcoe_vpi_cnt_WORD word4
+ uint32_t word5;
+#define lpfc_rsrc_desc_fcfcoe_fcfi_cnt_SHIFT 0
+#define lpfc_rsrc_desc_fcfcoe_fcfi_cnt_MASK 0x0000ffff
+#define lpfc_rsrc_desc_fcfcoe_fcfi_cnt_WORD word5
+#define lpfc_rsrc_desc_fcfcoe_vfi_cnt_SHIFT 16
+#define lpfc_rsrc_desc_fcfcoe_vfi_cnt_MASK 0x0000ffff
+#define lpfc_rsrc_desc_fcfcoe_vfi_cnt_WORD word5
+ uint32_t word6;
+ uint32_t word7;
+ uint32_t word8;
+ uint32_t word9;
+ uint32_t word10;
+ uint32_t word11;
+ uint32_t word12;
+ uint32_t word13;
+#define lpfc_rsrc_desc_fcfcoe_lnk_nr_SHIFT 0
+#define lpfc_rsrc_desc_fcfcoe_lnk_nr_MASK 0x0000003f
+#define lpfc_rsrc_desc_fcfcoe_lnk_nr_WORD word13
+#define lpfc_rsrc_desc_fcfcoe_lnk_tp_SHIFT 6
+#define lpfc_rsrc_desc_fcfcoe_lnk_tp_MASK 0x00000003
+#define lpfc_rsrc_desc_fcfcoe_lnk_tp_WORD word13
+#define lpfc_rsrc_desc_fcfcoe_lmc_SHIFT 8
+#define lpfc_rsrc_desc_fcfcoe_lmc_MASK 0x00000001
+#define lpfc_rsrc_desc_fcfcoe_lmc_WORD word13
+#define lpfc_rsrc_desc_fcfcoe_lld_SHIFT 9
+#define lpfc_rsrc_desc_fcfcoe_lld_MASK 0x00000001
+#define lpfc_rsrc_desc_fcfcoe_lld_WORD word13
+#define lpfc_rsrc_desc_fcfcoe_eq_cnt_SHIFT 16
+#define lpfc_rsrc_desc_fcfcoe_eq_cnt_MASK 0x0000ffff
+#define lpfc_rsrc_desc_fcfcoe_eq_cnt_WORD word13
+};
+
+struct lpfc_func_cfg {
+#define LPFC_RSRC_DESC_MAX_NUM 2
+ uint32_t rsrc_desc_count;
+ struct lpfc_rscr_desc_generic desc[LPFC_RSRC_DESC_MAX_NUM];
+};
+
+struct lpfc_mbx_get_func_cfg {
+ struct mbox_header header;
+#define LPFC_CFG_TYPE_PERSISTENT_OVERRIDE 0x0
+#define LPFC_CFG_TYPE_FACTURY_DEFAULT 0x1
+#define LPFC_CFG_TYPE_CURRENT_ACTIVE 0x2
+ struct lpfc_func_cfg func_cfg;
+};
+
+struct lpfc_prof_cfg {
+#define LPFC_RSRC_DESC_MAX_NUM 2
+ uint32_t rsrc_desc_count;
+ struct lpfc_rscr_desc_generic desc[LPFC_RSRC_DESC_MAX_NUM];
+};
+
+struct lpfc_mbx_get_prof_cfg {
+ struct mbox_header header;
+#define LPFC_CFG_TYPE_PERSISTENT_OVERRIDE 0x0
+#define LPFC_CFG_TYPE_FACTURY_DEFAULT 0x1
+#define LPFC_CFG_TYPE_CURRENT_ACTIVE 0x2
+ union {
+ struct {
+ uint32_t word10;
+#define lpfc_mbx_get_prof_cfg_prof_id_SHIFT 0
+#define lpfc_mbx_get_prof_cfg_prof_id_MASK 0x000000ff
+#define lpfc_mbx_get_prof_cfg_prof_id_WORD word10
+#define lpfc_mbx_get_prof_cfg_prof_tp_SHIFT 8
+#define lpfc_mbx_get_prof_cfg_prof_tp_MASK 0x00000003
+#define lpfc_mbx_get_prof_cfg_prof_tp_WORD word10
+ } request;
+ struct {
+ struct lpfc_prof_cfg prof_cfg;
+ } response;
+ } u;
+};
+
/* Mailbox Completion Queue Error Messages */
#define MB_CQE_STATUS_SUCCESS 0x0
#define MB_CQE_STATUS_INSUFFICIENT_PRIVILEGES 0x1
@@ -2206,6 +2560,29 @@ struct lpfc_mbx_get_sli4_parameters {
#define MB_CEQ_STATUS_QUEUE_FLUSHING 0x4
#define MB_CQE_STATUS_DMA_FAILED 0x5
+#define LPFC_MBX_WR_CONFIG_MAX_BDE 8
+struct lpfc_mbx_wr_object {
+ struct mbox_header header;
+ union {
+ struct {
+ uint32_t word4;
+#define lpfc_wr_object_eof_SHIFT 31
+#define lpfc_wr_object_eof_MASK 0x00000001
+#define lpfc_wr_object_eof_WORD word4
+#define lpfc_wr_object_write_length_SHIFT 0
+#define lpfc_wr_object_write_length_MASK 0x00FFFFFF
+#define lpfc_wr_object_write_length_WORD word4
+ uint32_t write_offset;
+ uint32_t object_name[26];
+ uint32_t bde_count;
+ struct ulp_bde64 bde[LPFC_MBX_WR_CONFIG_MAX_BDE];
+ } request;
+ struct {
+ uint32_t actual_write_length;
+ } response;
+ } u;
+};
+
/* mailbox queue entry structure */
struct lpfc_mqe {
uint32_t word0;
@@ -2241,6 +2618,9 @@ struct lpfc_mqe {
struct lpfc_mbx_cq_destroy cq_destroy;
struct lpfc_mbx_wq_destroy wq_destroy;
struct lpfc_mbx_rq_destroy rq_destroy;
+ struct lpfc_mbx_get_rsrc_extent_info rsrc_extent_info;
+ struct lpfc_mbx_alloc_rsrc_extents alloc_rsrc_extents;
+ struct lpfc_mbx_dealloc_rsrc_extents dealloc_rsrc_extents;
struct lpfc_mbx_post_sgl_pages post_sgl_pages;
struct lpfc_mbx_nembed_cmd nembed_cmd;
struct lpfc_mbx_read_rev read_rev;
@@ -2252,7 +2632,13 @@ struct lpfc_mqe {
struct lpfc_mbx_supp_pages supp_pages;
struct lpfc_mbx_pc_sli4_params sli4_params;
struct lpfc_mbx_get_sli4_parameters get_sli4_parameters;
+ struct lpfc_mbx_set_link_diag_state link_diag_state;
+ struct lpfc_mbx_set_link_diag_loopback link_diag_loopback;
+ struct lpfc_mbx_run_link_diag_test link_diag_test;
+ struct lpfc_mbx_get_func_cfg get_func_cfg;
+ struct lpfc_mbx_get_prof_cfg get_prof_cfg;
struct lpfc_mbx_nop nop;
+ struct lpfc_mbx_wr_object wr_object;
} un;
};
@@ -2458,7 +2844,7 @@ struct lpfc_bmbx_create {
#define SGL_ALIGN_SZ 64
#define SGL_PAGE_SIZE 4096
/* align SGL addr on a size boundary - adjust address up */
-#define NO_XRI ((uint16_t)-1)
+#define NO_XRI 0xffff
struct wqe_common {
uint32_t word6;
@@ -2798,9 +3184,28 @@ union lpfc_wqe {
struct gen_req64_wqe gen_req;
};
+#define LPFC_GROUP_OJECT_MAGIC_NUM 0xfeaa0001
+#define LPFC_FILE_TYPE_GROUP 0xf7
+#define LPFC_FILE_ID_GROUP 0xa2
+struct lpfc_grp_hdr {
+ uint32_t size;
+ uint32_t magic_number;
+ uint32_t word2;
+#define lpfc_grp_hdr_file_type_SHIFT 24
+#define lpfc_grp_hdr_file_type_MASK 0x000000FF
+#define lpfc_grp_hdr_file_type_WORD word2
+#define lpfc_grp_hdr_id_SHIFT 16
+#define lpfc_grp_hdr_id_MASK 0x000000FF
+#define lpfc_grp_hdr_id_WORD word2
+ uint8_t rev_name[128];
+};
+
#define FCP_COMMAND 0x0
#define FCP_COMMAND_DATA_OUT 0x1
#define ELS_COMMAND_NON_FIP 0xC
#define ELS_COMMAND_FIP 0xD
#define OTHER_COMMAND 0x8
+#define LPFC_FW_DUMP 1
+#define LPFC_FW_RESET 2
+#define LPFC_DV_RESET 3
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 7dda036a1af3..148b98ddbb1d 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -30,6 +30,7 @@
#include <linux/ctype.h>
#include <linux/aer.h>
#include <linux/slab.h>
+#include <linux/firmware.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
@@ -211,7 +212,6 @@ lpfc_config_port_prep(struct lpfc_hba *phba)
lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
if (!lpfc_vpd_data)
goto out_free_mbox;
-
do {
lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
@@ -309,6 +309,45 @@ lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
}
/**
+ * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname,
+ * cfg_soft_wwnn, cfg_soft_wwpn
+ * @vport: pointer to lpfc vport data structure.
+ *
+ *
+ * Return codes
+ * None.
+ **/
+void
+lpfc_update_vport_wwn(struct lpfc_vport *vport)
+{
+ /* If the soft name exists then update it using the service params */
+ if (vport->phba->cfg_soft_wwnn)
+ u64_to_wwn(vport->phba->cfg_soft_wwnn,
+ vport->fc_sparam.nodeName.u.wwn);
+ if (vport->phba->cfg_soft_wwpn)
+ u64_to_wwn(vport->phba->cfg_soft_wwpn,
+ vport->fc_sparam.portName.u.wwn);
+
+ /*
+ * If the name is empty or there exists a soft name
+ * then copy the service params name, otherwise use the fc name
+ */
+ if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn)
+ memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
+ sizeof(struct lpfc_name));
+ else
+ memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename,
+ sizeof(struct lpfc_name));
+
+ if (vport->fc_portname.u.wwn[0] == 0 || vport->phba->cfg_soft_wwpn)
+ memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
+ sizeof(struct lpfc_name));
+ else
+ memcpy(&vport->fc_sparam.portName, &vport->fc_portname,
+ sizeof(struct lpfc_name));
+}
+
+/**
* lpfc_config_port_post - Perform lpfc initialization after config port
* @phba: pointer to lpfc hba data structure.
*
@@ -377,17 +416,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
pmb->context1 = NULL;
-
- if (phba->cfg_soft_wwnn)
- u64_to_wwn(phba->cfg_soft_wwnn,
- vport->fc_sparam.nodeName.u.wwn);
- if (phba->cfg_soft_wwpn)
- u64_to_wwn(phba->cfg_soft_wwpn,
- vport->fc_sparam.portName.u.wwn);
- memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
- sizeof (struct lpfc_name));
- memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
- sizeof (struct lpfc_name));
+ lpfc_update_vport_wwn(vport);
/* Update the fc_host data structures with new wwn. */
fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
@@ -573,7 +602,6 @@ lpfc_config_port_post(struct lpfc_hba *phba)
/* Clear all pending interrupts */
writel(0xffffffff, phba->HAregaddr);
readl(phba->HAregaddr); /* flush */
-
phba->link_state = LPFC_HBA_ERROR;
if (rc != MBX_BUSY)
mempool_free(pmb, phba->mbox_mem_pool);
@@ -1755,7 +1783,9 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
&& descp && descp[0] != '\0')
return;
- if (phba->lmt & LMT_10Gb)
+ if (phba->lmt & LMT_16Gb)
+ max_speed = 16;
+ else if (phba->lmt & LMT_10Gb)
max_speed = 10;
else if (phba->lmt & LMT_8Gb)
max_speed = 8;
@@ -1922,12 +1952,13 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
"Fibre Channel Adapter"};
break;
case PCI_DEVICE_ID_LANCER_FC:
- oneConnect = 1;
- m = (typeof(m)){"Undefined", "PCIe", "Fibre Channel Adapter"};
+ case PCI_DEVICE_ID_LANCER_FC_VF:
+ m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"};
break;
case PCI_DEVICE_ID_LANCER_FCOE:
+ case PCI_DEVICE_ID_LANCER_FCOE_VF:
oneConnect = 1;
- m = (typeof(m)){"Undefined", "PCIe", "FCoE"};
+ m = (typeof(m)){"OCe50100", "PCIe", "FCoE"};
break;
default:
m = (typeof(m)){"Unknown", "", ""};
@@ -1936,7 +1967,8 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
if (mdp && mdp[0] == '\0')
snprintf(mdp, 79,"%s", m.name);
- /* oneConnect hba requires special processing, they are all initiators
+ /*
+ * oneConnect hba requires special processing, they are all initiators
* and we put the port number on the end
*/
if (descp && descp[0] == '\0') {
@@ -2656,6 +2688,7 @@ lpfc_scsi_free(struct lpfc_hba *phba)
kfree(io);
phba->total_iocbq_bufs--;
}
+
spin_unlock_irq(&phba->hbalock);
return 0;
}
@@ -3612,6 +3645,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
"2718 Clear Virtual Link Received for VPI 0x%x"
" tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
+
vport = lpfc_find_vport_by_vpid(phba,
acqe_fip->index - phba->vpi_base);
ndlp = lpfc_sli4_perform_vport_cvl(vport);
@@ -3935,6 +3969,10 @@ lpfc_enable_pci_dev(struct lpfc_hba *phba)
pci_try_set_mwi(pdev);
pci_save_state(pdev);
+ /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
+ if (pci_find_capability(pdev, PCI_CAP_ID_EXP))
+ pdev->needs_freset = 1;
+
return 0;
out_disable_device:
@@ -3997,6 +4035,36 @@ lpfc_reset_hba(struct lpfc_hba *phba)
}
/**
+ * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions
+ * @phba: pointer to lpfc hba data structure.
+ * @nr_vfn: number of virtual functions to be enabled.
+ *
+ * This function enables the PCI SR-IOV virtual functions to a physical
+ * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
+ * enable the number of virtual functions to the physical function. As
+ * not all devices support SR-IOV, the return code from the pci_enable_sriov()
+ * API call does not considered as an error condition for most of the device.
+ **/
+int
+lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn)
+{
+ struct pci_dev *pdev = phba->pcidev;
+ int rc;
+
+ rc = pci_enable_sriov(pdev, nr_vfn);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "2806 Failed to enable sriov on this device "
+ "with vfn number nr_vf:%d, rc:%d\n",
+ nr_vfn, rc);
+ } else
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "2807 Successful enable sriov on this device "
+ "with vfn number nr_vf:%d\n", nr_vfn);
+ return rc;
+}
+
+/**
* lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev.
* @phba: pointer to lpfc hba data structure.
*
@@ -4011,6 +4079,7 @@ static int
lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
{
struct lpfc_sli *psli;
+ int rc;
/*
* Initialize timers used by driver
@@ -4085,6 +4154,23 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
return -ENOMEM;
+ /*
+ * Enable sr-iov virtual functions if supported and configured
+ * through the module parameter.
+ */
+ if (phba->cfg_sriov_nr_virtfn > 0) {
+ rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
+ phba->cfg_sriov_nr_virtfn);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "2808 Requested number of SR-IOV "
+ "virtual functions (%d) is not "
+ "supported\n",
+ phba->cfg_sriov_nr_virtfn);
+ phba->cfg_sriov_nr_virtfn = 0;
+ }
+ }
+
return 0;
}
@@ -4161,6 +4247,14 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
phba->fcf.redisc_wait.data = (unsigned long)phba;
/*
+ * Control structure for handling external multi-buffer mailbox
+ * command pass-through.
+ */
+ memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0,
+ sizeof(struct lpfc_mbox_ext_buf_ctx));
+ INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
+
+ /*
* We need to do a READ_CONFIG mailbox command here before
* calling lpfc_get_cfgparam. For VFs this will report the
* MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings.
@@ -4233,7 +4327,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock);
/*
- * Initialize dirver internal slow-path work queues
+ * Initialize driver internal slow-path work queues
*/
/* Driver internel slow-path CQ Event pool */
@@ -4249,6 +4343,12 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
/* Receive queue CQ Event work queue list */
INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
+ /* Initialize extent block lists. */
+ INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list);
+ INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list);
+ INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list);
+ INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list);
+
/* Initialize the driver internal SLI layer lists. */
lpfc_sli_setup(phba);
lpfc_sli_queue_setup(phba);
@@ -4323,9 +4423,19 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
}
/*
* Get sli4 parameters that override parameters from Port capabilities.
- * If this call fails it is not a critical error so continue loading.
+ * If this call fails, it isn't critical unless the SLI4 parameters come
+ * back in conflict.
*/
- lpfc_get_sli4_parameters(phba, mboxq);
+ rc = lpfc_get_sli4_parameters(phba, mboxq);
+ if (rc) {
+ if (phba->sli4_hba.extents_in_use &&
+ phba->sli4_hba.rpi_hdrs_in_use) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2999 Unsupported SLI4 Parameters "
+ "Extents and RPI headers enabled.\n");
+ goto out_free_bsmbx;
+ }
+ }
mempool_free(mboxq, phba->mbox_mem_pool);
/* Create all the SLI4 queues */
rc = lpfc_sli4_queue_create(phba);
@@ -4350,7 +4460,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
"1430 Failed to initialize sgl list.\n");
goto out_free_sgl_list;
}
-
rc = lpfc_sli4_init_rpi_hdrs(phba);
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -4366,6 +4475,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2759 Failed allocate memory for FCF round "
"robin failover bmask\n");
+ rc = -ENOMEM;
goto out_remove_rpi_hdrs;
}
@@ -4375,6 +4485,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2572 Failed allocate memory for fast-path "
"per-EQ handle array\n");
+ rc = -ENOMEM;
goto out_free_fcf_rr_bmask;
}
@@ -4384,9 +4495,27 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2573 Failed allocate memory for msi-x "
"interrupt vector entries\n");
+ rc = -ENOMEM;
goto out_free_fcp_eq_hdl;
}
+ /*
+ * Enable sr-iov virtual functions if supported and configured
+ * through the module parameter.
+ */
+ if (phba->cfg_sriov_nr_virtfn > 0) {
+ rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
+ phba->cfg_sriov_nr_virtfn);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "3020 Requested number of SR-IOV "
+ "virtual functions (%d) is not "
+ "supported\n",
+ phba->cfg_sriov_nr_virtfn);
+ phba->cfg_sriov_nr_virtfn = 0;
+ }
+ }
+
return rc;
out_free_fcp_eq_hdl:
@@ -4449,6 +4578,9 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
lpfc_sli4_cq_event_release_all(phba);
lpfc_sli4_cq_event_pool_destroy(phba);
+ /* Release resource identifiers. */
+ lpfc_sli4_dealloc_resource_identifiers(phba);
+
/* Free the bsmbx region. */
lpfc_destroy_bootstrap_mbox(phba);
@@ -4649,6 +4781,7 @@ lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
"Unloading driver.\n", __func__);
goto out_free_iocbq;
}
+ iocbq_entry->sli4_lxritag = NO_XRI;
iocbq_entry->sli4_xritag = NO_XRI;
spin_lock_irq(&phba->hbalock);
@@ -4746,7 +4879,7 @@ lpfc_init_sgl_list(struct lpfc_hba *phba)
els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
- "2400 lpfc_init_sgl_list els %d.\n",
+ "2400 ELS XRI count %d.\n",
els_xri_cnt);
/* Initialize and populate the sglq list per host/VF. */
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list);
@@ -4779,7 +4912,6 @@ lpfc_init_sgl_list(struct lpfc_hba *phba)
phba->sli4_hba.scsi_xri_max =
phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
phba->sli4_hba.scsi_xri_cnt = 0;
-
phba->sli4_hba.lpfc_scsi_psb_array =
kzalloc((sizeof(struct lpfc_scsi_buf *) *
phba->sli4_hba.scsi_xri_max), GFP_KERNEL);
@@ -4802,13 +4934,6 @@ lpfc_init_sgl_list(struct lpfc_hba *phba)
goto out_free_mem;
}
- sglq_entry->sli4_xritag = lpfc_sli4_next_xritag(phba);
- if (sglq_entry->sli4_xritag == NO_XRI) {
- kfree(sglq_entry);
- printk(KERN_ERR "%s: failed to allocate XRI.\n"
- "Unloading driver.\n", __func__);
- goto out_free_mem;
- }
sglq_entry->buff_type = GEN_BUFF_TYPE;
sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys);
if (sglq_entry->virt == NULL) {
@@ -4857,24 +4982,20 @@ int
lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
{
int rc = 0;
- int longs;
- uint16_t rpi_count;
struct lpfc_rpi_hdr *rpi_hdr;
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
-
/*
- * Provision an rpi bitmask range for discovery. The total count
- * is the difference between max and base + 1.
+ * If the SLI4 port supports extents, posting the rpi header isn't
+ * required. Set the expected maximum count and let the actual value
+ * get set when extents are fully allocated.
*/
- rpi_count = phba->sli4_hba.max_cfg_param.rpi_base +
- phba->sli4_hba.max_cfg_param.max_rpi - 1;
-
- longs = ((rpi_count) + BITS_PER_LONG - 1) / BITS_PER_LONG;
- phba->sli4_hba.rpi_bmask = kzalloc(longs * sizeof(unsigned long),
- GFP_KERNEL);
- if (!phba->sli4_hba.rpi_bmask)
- return -ENOMEM;
+ if (!phba->sli4_hba.rpi_hdrs_in_use) {
+ phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
+ return rc;
+ }
+ if (phba->sli4_hba.extents_in_use)
+ return -EIO;
rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
if (!rpi_hdr) {
@@ -4908,11 +5029,28 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
struct lpfc_rpi_hdr *rpi_hdr;
uint32_t rpi_count;
+ /*
+ * If the SLI4 port supports extents, posting the rpi header isn't
+ * required. Set the expected maximum count and let the actual value
+ * get set when extents are fully allocated.
+ */
+ if (!phba->sli4_hba.rpi_hdrs_in_use)
+ return NULL;
+ if (phba->sli4_hba.extents_in_use)
+ return NULL;
+
+ /* The limit on the logical index is just the max_rpi count. */
rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base +
- phba->sli4_hba.max_cfg_param.max_rpi - 1;
+ phba->sli4_hba.max_cfg_param.max_rpi - 1;
spin_lock_irq(&phba->hbalock);
- curr_rpi_range = phba->sli4_hba.next_rpi;
+ /*
+ * Establish the starting RPI in this header block. The starting
+ * rpi is normalized to a zero base because the physical rpi is
+ * port based.
+ */
+ curr_rpi_range = phba->sli4_hba.next_rpi -
+ phba->sli4_hba.max_cfg_param.rpi_base;
spin_unlock_irq(&phba->hbalock);
/*
@@ -4925,6 +5063,8 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
else
rpi_count = LPFC_RPI_HDR_COUNT;
+ if (!rpi_count)
+ return NULL;
/*
* First allocate the protocol header region for the port. The
* port expects a 4KB DMA-mapped memory region that is 4K aligned.
@@ -4957,12 +5097,14 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
rpi_hdr->page_count = 1;
spin_lock_irq(&phba->hbalock);
- rpi_hdr->start_rpi = phba->sli4_hba.next_rpi;
+
+ /* The rpi_hdr stores the logical index only. */
+ rpi_hdr->start_rpi = curr_rpi_range;
list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
/*
- * The next_rpi stores the next module-64 rpi value to post
- * in any subsequent rpi memory region postings.
+ * The next_rpi stores the next logical module-64 rpi value used
+ * to post physical rpis in subsequent rpi postings.
*/
phba->sli4_hba.next_rpi += rpi_count;
spin_unlock_irq(&phba->hbalock);
@@ -4981,15 +5123,18 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
* @phba: pointer to lpfc hba data structure.
*
* This routine is invoked to remove all memory resources allocated
- * to support rpis. This routine presumes the caller has released all
- * rpis consumed by fabric or port logins and is prepared to have
- * the header pages removed.
+ * to support rpis for SLI4 ports not supporting extents. This routine
+ * presumes the caller has released all rpis consumed by fabric or port
+ * logins and is prepared to have the header pages removed.
**/
void
lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
{
struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
+ if (!phba->sli4_hba.rpi_hdrs_in_use)
+ goto exit;
+
list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
&phba->sli4_hba.lpfc_rpi_hdr_list, list) {
list_del(&rpi_hdr->list);
@@ -4998,9 +5143,9 @@ lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
kfree(rpi_hdr->dmabuf);
kfree(rpi_hdr);
}
-
- phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base;
- memset(phba->sli4_hba.rpi_bmask, 0, sizeof(*phba->sli4_hba.rpi_bmask));
+ exit:
+ /* There are no rpis available to the port now. */
+ phba->sli4_hba.next_rpi = 0;
}
/**
@@ -5487,7 +5632,8 @@ lpfc_sli4_post_status_check(struct lpfc_hba *phba)
/* Final checks. The port status should be clean. */
if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
&reg_data.word0) ||
- bf_get(lpfc_sliport_status_err, &reg_data)) {
+ (bf_get(lpfc_sliport_status_err, &reg_data) &&
+ !bf_get(lpfc_sliport_status_rn, &reg_data))) {
phba->work_status[0] =
readl(phba->sli4_hba.u.if_type2.
ERR1regaddr);
@@ -5741,7 +5887,12 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
{
LPFC_MBOXQ_t *pmb;
struct lpfc_mbx_read_config *rd_config;
- uint32_t rc = 0;
+ union lpfc_sli4_cfg_shdr *shdr;
+ uint32_t shdr_status, shdr_add_status;
+ struct lpfc_mbx_get_func_cfg *get_func_cfg;
+ struct lpfc_rsrc_desc_fcfcoe *desc;
+ uint32_t desc_count;
+ int length, i, rc = 0;
pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!pmb) {
@@ -5763,6 +5914,8 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
rc = -EIO;
} else {
rd_config = &pmb->u.mqe.un.rd_config;
+ phba->sli4_hba.extents_in_use =
+ bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
phba->sli4_hba.max_cfg_param.max_xri =
bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
phba->sli4_hba.max_cfg_param.xri_base =
@@ -5781,8 +5934,6 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
phba->sli4_hba.max_cfg_param.max_fcfi =
bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
- phba->sli4_hba.max_cfg_param.fcfi_base =
- bf_get(lpfc_mbx_rd_conf_fcfi_base, rd_config);
phba->sli4_hba.max_cfg_param.max_eq =
bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
phba->sli4_hba.max_cfg_param.max_rq =
@@ -5800,11 +5951,13 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
(phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
phba->max_vports = phba->max_vpi;
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
- "2003 cfg params XRI(B:%d M:%d), "
+ "2003 cfg params Extents? %d "
+ "XRI(B:%d M:%d), "
"VPI(B:%d M:%d) "
"VFI(B:%d M:%d) "
"RPI(B:%d M:%d) "
- "FCFI(B:%d M:%d)\n",
+ "FCFI(Count:%d)\n",
+ phba->sli4_hba.extents_in_use,
phba->sli4_hba.max_cfg_param.xri_base,
phba->sli4_hba.max_cfg_param.max_xri,
phba->sli4_hba.max_cfg_param.vpi_base,
@@ -5813,10 +5966,11 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
phba->sli4_hba.max_cfg_param.max_vfi,
phba->sli4_hba.max_cfg_param.rpi_base,
phba->sli4_hba.max_cfg_param.max_rpi,
- phba->sli4_hba.max_cfg_param.fcfi_base,
phba->sli4_hba.max_cfg_param.max_fcfi);
}
- mempool_free(pmb, phba->mbox_mem_pool);
+
+ if (rc)
+ goto read_cfg_out;
/* Reset the DFT_HBA_Q_DEPTH to the max xri */
if (phba->cfg_hba_queue_depth >
@@ -5825,6 +5979,65 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
phba->cfg_hba_queue_depth =
phba->sli4_hba.max_cfg_param.max_xri -
lpfc_sli4_get_els_iocb_cnt(phba);
+
+ if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+ LPFC_SLI_INTF_IF_TYPE_2)
+ goto read_cfg_out;
+
+ /* get the pf# and vf# for SLI4 if_type 2 port */
+ length = (sizeof(struct lpfc_mbx_get_func_cfg) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG,
+ length, LPFC_SLI4_MBX_EMBED);
+
+ rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
+ shdr = (union lpfc_sli4_cfg_shdr *)
+ &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+ if (rc || shdr_status || shdr_add_status) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "3026 Mailbox failed , mbxCmd x%x "
+ "GET_FUNCTION_CONFIG, mbxStatus x%x\n",
+ bf_get(lpfc_mqe_command, &pmb->u.mqe),
+ bf_get(lpfc_mqe_status, &pmb->u.mqe));
+ rc = -EIO;
+ goto read_cfg_out;
+ }
+
+ /* search for fc_fcoe resrouce descriptor */
+ get_func_cfg = &pmb->u.mqe.un.get_func_cfg;
+ desc_count = get_func_cfg->func_cfg.rsrc_desc_count;
+
+ for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
+ desc = (struct lpfc_rsrc_desc_fcfcoe *)
+ &get_func_cfg->func_cfg.desc[i];
+ if (LPFC_RSRC_DESC_TYPE_FCFCOE ==
+ bf_get(lpfc_rsrc_desc_pcie_type, desc)) {
+ phba->sli4_hba.iov.pf_number =
+ bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc);
+ phba->sli4_hba.iov.vf_number =
+ bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc);
+ break;
+ }
+ }
+
+ if (i < LPFC_RSRC_DESC_MAX_NUM)
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "3027 GET_FUNCTION_CONFIG: pf_number:%d, "
+ "vf_number:%d\n", phba->sli4_hba.iov.pf_number,
+ phba->sli4_hba.iov.vf_number);
+ else {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "3028 GET_FUNCTION_CONFIG: failed to find "
+ "Resrouce Descriptor:x%x\n",
+ LPFC_RSRC_DESC_TYPE_FCFCOE);
+ rc = -EIO;
+ }
+
+read_cfg_out:
+ mempool_free(pmb, phba->mbox_mem_pool);
return rc;
}
@@ -6229,8 +6442,10 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
phba->sli4_hba.mbx_cq = NULL;
/* Release FCP response complete queue */
- for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
+ fcp_qidx = 0;
+ do
lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]);
+ while (++fcp_qidx < phba->cfg_fcp_eq_count);
kfree(phba->sli4_hba.fcp_cq);
phba->sli4_hba.fcp_cq = NULL;
@@ -6353,16 +6568,24 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
phba->sli4_hba.sp_eq->queue_id);
/* Set up fast-path FCP Response Complete Queue */
- for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
+ fcp_cqidx = 0;
+ do {
if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0526 Fast-path FCP CQ (%d) not "
"allocated\n", fcp_cqidx);
goto out_destroy_fcp_cq;
}
- rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx],
- phba->sli4_hba.fp_eq[fcp_cqidx],
- LPFC_WCQ, LPFC_FCP);
+ if (phba->cfg_fcp_eq_count)
+ rc = lpfc_cq_create(phba,
+ phba->sli4_hba.fcp_cq[fcp_cqidx],
+ phba->sli4_hba.fp_eq[fcp_cqidx],
+ LPFC_WCQ, LPFC_FCP);
+ else
+ rc = lpfc_cq_create(phba,
+ phba->sli4_hba.fcp_cq[fcp_cqidx],
+ phba->sli4_hba.sp_eq,
+ LPFC_WCQ, LPFC_FCP);
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0527 Failed setup of fast-path FCP "
@@ -6371,12 +6594,15 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
}
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"2588 FCP CQ setup: cq[%d]-id=%d, "
- "parent eq[%d]-id=%d\n",
+ "parent %seq[%d]-id=%d\n",
fcp_cqidx,
phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id,
+ (phba->cfg_fcp_eq_count) ? "" : "sp_",
fcp_cqidx,
- phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id);
- }
+ (phba->cfg_fcp_eq_count) ?
+ phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id :
+ phba->sli4_hba.sp_eq->queue_id);
+ } while (++fcp_cqidx < phba->cfg_fcp_eq_count);
/*
* Set up all the Work Queues (WQs)
@@ -6445,7 +6671,9 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
fcp_cq_index,
phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id);
/* Round robin FCP Work Queue's Completion Queue assignment */
- fcp_cq_index = ((fcp_cq_index + 1) % phba->cfg_fcp_eq_count);
+ if (phba->cfg_fcp_eq_count)
+ fcp_cq_index = ((fcp_cq_index + 1) %
+ phba->cfg_fcp_eq_count);
}
/*
@@ -6827,6 +7055,8 @@ lpfc_pci_function_reset(struct lpfc_hba *phba)
if (rdy_chk < 1000)
break;
}
+ /* delay driver action following IF_TYPE_2 function reset */
+ msleep(100);
break;
case LPFC_SLI_INTF_IF_TYPE_1:
default:
@@ -7419,11 +7649,15 @@ enable_msix_vectors:
/*
* Assign MSI-X vectors to interrupt handlers
*/
-
- /* The first vector must associated to slow-path handler for MQ */
- rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
- &lpfc_sli4_sp_intr_handler, IRQF_SHARED,
- LPFC_SP_DRIVER_HANDLER_NAME, phba);
+ if (vectors > 1)
+ rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
+ &lpfc_sli4_sp_intr_handler, IRQF_SHARED,
+ LPFC_SP_DRIVER_HANDLER_NAME, phba);
+ else
+ /* All Interrupts need to be handled by one EQ */
+ rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
+ &lpfc_sli4_intr_handler, IRQF_SHARED,
+ LPFC_DRIVER_NAME, phba);
if (rc) {
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
"0485 MSI-X slow-path request_irq failed "
@@ -7765,6 +7999,7 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
{
int wait_cnt = 0;
LPFC_MBOXQ_t *mboxq;
+ struct pci_dev *pdev = phba->pcidev;
lpfc_stop_hba_timers(phba);
phba->sli4_hba.intr_enable = 0;
@@ -7804,6 +8039,10 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
/* Disable PCI subsystem interrupt */
lpfc_sli4_disable_intr(phba);
+ /* Disable SR-IOV if enabled */
+ if (phba->cfg_sriov_nr_virtfn)
+ pci_disable_sriov(pdev);
+
/* Stop kthread signal shall trigger work_done one more time */
kthread_stop(phba->worker_thread);
@@ -7878,6 +8117,11 @@ lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params);
sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params);
sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params);
+
+ /* Make sure that sge_supp_len can be handled by the driver */
+ if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
+ sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
+
return rc;
}
@@ -7902,6 +8146,13 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
int length;
struct lpfc_sli4_parameters *mbx_sli4_parameters;
+ /*
+ * By default, the driver assumes the SLI4 port requires RPI
+ * header postings. The SLI4_PARAM response will correct this
+ * assumption.
+ */
+ phba->sli4_hba.rpi_hdrs_in_use = 1;
+
/* Read the port's SLI4 Config Parameters */
length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
sizeof(struct lpfc_sli4_cfg_mhdr));
@@ -7938,6 +8189,13 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
mbx_sli4_parameters);
sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align,
mbx_sli4_parameters);
+ phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters);
+ phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters);
+
+ /* Make sure that sge_supp_len can be handled by the driver */
+ if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
+ sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
+
return 0;
}
@@ -8173,6 +8431,10 @@ lpfc_pci_remove_one_s3(struct pci_dev *pdev)
lpfc_debugfs_terminate(vport);
+ /* Disable SR-IOV if enabled */
+ if (phba->cfg_sriov_nr_virtfn)
+ pci_disable_sriov(pdev);
+
/* Disable interrupt */
lpfc_sli_disable_intr(phba);
@@ -8565,6 +8827,97 @@ lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
}
/**
+ * lpfc_write_firmware - attempt to write a firmware image to the port
+ * @phba: pointer to lpfc hba data structure.
+ * @fw: pointer to firmware image returned from request_firmware.
+ *
+ * returns the number of bytes written if write is successful.
+ * returns a negative error value if there were errors.
+ * returns 0 if firmware matches currently active firmware on port.
+ **/
+int
+lpfc_write_firmware(struct lpfc_hba *phba, const struct firmware *fw)
+{
+ char fwrev[32];
+ struct lpfc_grp_hdr *image = (struct lpfc_grp_hdr *)fw->data;
+ struct list_head dma_buffer_list;
+ int i, rc = 0;
+ struct lpfc_dmabuf *dmabuf, *next;
+ uint32_t offset = 0, temp_offset = 0;
+
+ INIT_LIST_HEAD(&dma_buffer_list);
+ if ((image->magic_number != LPFC_GROUP_OJECT_MAGIC_NUM) ||
+ (bf_get(lpfc_grp_hdr_file_type, image) != LPFC_FILE_TYPE_GROUP) ||
+ (bf_get(lpfc_grp_hdr_id, image) != LPFC_FILE_ID_GROUP) ||
+ (image->size != fw->size)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3022 Invalid FW image found. "
+ "Magic:%d Type:%x ID:%x\n",
+ image->magic_number,
+ bf_get(lpfc_grp_hdr_file_type, image),
+ bf_get(lpfc_grp_hdr_id, image));
+ return -EINVAL;
+ }
+ lpfc_decode_firmware_rev(phba, fwrev, 1);
+ if (strncmp(fwrev, image->rev_name, strnlen(fwrev, 16))) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3023 Updating Firmware. Current Version:%s "
+ "New Version:%s\n",
+ fwrev, image->rev_name);
+ for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) {
+ dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
+ GFP_KERNEL);
+ if (!dmabuf) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
+ SLI4_PAGE_SIZE,
+ &dmabuf->phys,
+ GFP_KERNEL);
+ if (!dmabuf->virt) {
+ kfree(dmabuf);
+ rc = -ENOMEM;
+ goto out;
+ }
+ list_add_tail(&dmabuf->list, &dma_buffer_list);
+ }
+ while (offset < fw->size) {
+ temp_offset = offset;
+ list_for_each_entry(dmabuf, &dma_buffer_list, list) {
+ if (offset + SLI4_PAGE_SIZE > fw->size) {
+ temp_offset += fw->size - offset;
+ memcpy(dmabuf->virt,
+ fw->data + temp_offset,
+ fw->size - offset);
+ break;
+ }
+ temp_offset += SLI4_PAGE_SIZE;
+ memcpy(dmabuf->virt, fw->data + temp_offset,
+ SLI4_PAGE_SIZE);
+ }
+ rc = lpfc_wr_object(phba, &dma_buffer_list,
+ (fw->size - offset), &offset);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3024 Firmware update failed. "
+ "%d\n", rc);
+ goto out;
+ }
+ }
+ rc = offset;
+ }
+out:
+ list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) {
+ list_del(&dmabuf->list);
+ dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
+ dmabuf->virt, dmabuf->phys);
+ kfree(dmabuf);
+ }
+ return rc;
+}
+
+/**
* lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
* @pdev: pointer to PCI device
* @pid: pointer to PCI device identifier
@@ -8591,6 +8944,10 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
int error;
uint32_t cfg_mode, intr_mode;
int mcnt;
+ int adjusted_fcp_eq_count;
+ int fcp_qidx;
+ const struct firmware *fw;
+ uint8_t file_name[16];
/* Allocate memory for HBA structure */
phba = lpfc_hba_alloc(pdev);
@@ -8688,11 +9045,25 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
error = -ENODEV;
goto out_free_sysfs_attr;
}
- /* Default to single FCP EQ for non-MSI-X */
+ /* Default to single EQ for non-MSI-X */
if (phba->intr_type != MSIX)
- phba->cfg_fcp_eq_count = 1;
- else if (phba->sli4_hba.msix_vec_nr < phba->cfg_fcp_eq_count)
- phba->cfg_fcp_eq_count = phba->sli4_hba.msix_vec_nr - 1;
+ adjusted_fcp_eq_count = 0;
+ else if (phba->sli4_hba.msix_vec_nr <
+ phba->cfg_fcp_eq_count + 1)
+ adjusted_fcp_eq_count = phba->sli4_hba.msix_vec_nr - 1;
+ else
+ adjusted_fcp_eq_count = phba->cfg_fcp_eq_count;
+ /* Free unused EQs */
+ for (fcp_qidx = adjusted_fcp_eq_count;
+ fcp_qidx < phba->cfg_fcp_eq_count;
+ fcp_qidx++) {
+ lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]);
+ /* do not delete the first fcp_cq */
+ if (fcp_qidx)
+ lpfc_sli4_queue_free(
+ phba->sli4_hba.fcp_cq[fcp_qidx]);
+ }
+ phba->cfg_fcp_eq_count = adjusted_fcp_eq_count;
/* Set up SLI-4 HBA */
if (lpfc_sli4_hba_setup(phba)) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -8731,6 +9102,14 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
/* Perform post initialization setup */
lpfc_post_init_setup(phba);
+ /* check for firmware upgrade or downgrade */
+ snprintf(file_name, 16, "%s.grp", phba->ModelName);
+ error = request_firmware(&fw, file_name, &phba->pcidev->dev);
+ if (!error) {
+ lpfc_write_firmware(phba, fw);
+ release_firmware(fw);
+ }
+
/* Check if there are static vports to be created. */
lpfc_create_static_vport(phba);
@@ -9498,6 +9877,10 @@ static struct pci_device_id lpfc_id_table[] = {
PCI_ANY_ID, PCI_ANY_ID, },
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE,
PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC_VF,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE_VF,
+ PCI_ANY_ID, PCI_ANY_ID, },
{ 0 }
};
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index e6ce9033f85e..556767028353 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -610,7 +610,8 @@ lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi)
mb->un.varRdSparm.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm);
mb->un.varRdSparm.un.sp64.addrHigh = putPaddrHigh(mp->phys);
mb->un.varRdSparm.un.sp64.addrLow = putPaddrLow(mp->phys);
- mb->un.varRdSparm.vpi = vpi + phba->vpi_base;
+ if (phba->sli_rev >= LPFC_SLI_REV3)
+ mb->un.varRdSparm.vpi = phba->vpi_ids[vpi];
/* save address for completion */
pmb->context1 = mp;
@@ -643,9 +644,10 @@ lpfc_unreg_did(struct lpfc_hba * phba, uint16_t vpi, uint32_t did,
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
mb->un.varUnregDID.did = did;
- if (vpi != 0xffff)
- vpi += phba->vpi_base;
mb->un.varUnregDID.vpi = vpi;
+ if ((vpi != 0xffff) &&
+ (phba->sli_rev == LPFC_SLI_REV4))
+ mb->un.varUnregDID.vpi = phba->vpi_ids[vpi];
mb->mbxCommand = MBX_UNREG_D_ID;
mb->mbxOwner = OWN_HOST;
@@ -738,12 +740,10 @@ lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
mb->un.varRegLogin.rpi = 0;
- if (phba->sli_rev == LPFC_SLI_REV4) {
- mb->un.varRegLogin.rpi = rpi;
- if (mb->un.varRegLogin.rpi == LPFC_RPI_ALLOC_ERROR)
- return 1;
- }
- mb->un.varRegLogin.vpi = vpi + phba->vpi_base;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ mb->un.varRegLogin.rpi = phba->sli4_hba.rpi_ids[rpi];
+ if (phba->sli_rev >= LPFC_SLI_REV3)
+ mb->un.varRegLogin.vpi = phba->vpi_ids[vpi];
mb->un.varRegLogin.did = did;
mb->mbxOwner = OWN_HOST;
/* Get a buffer to hold NPorts Service Parameters */
@@ -757,7 +757,7 @@ lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
"0302 REG_LOGIN: no buffers, VPI:%d DID:x%x, "
"rpi x%x\n", vpi, did, rpi);
- return (1);
+ return 1;
}
INIT_LIST_HEAD(&mp->list);
sparam = mp->virt;
@@ -773,7 +773,7 @@ lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
mb->un.varRegLogin.un.sp64.addrHigh = putPaddrHigh(mp->phys);
mb->un.varRegLogin.un.sp64.addrLow = putPaddrLow(mp->phys);
- return (0);
+ return 0;
}
/**
@@ -789,6 +789,9 @@ lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
*
* This routine prepares the mailbox command for unregistering remote port
* login.
+ *
+ * For SLI4 ports, the rpi passed to this function must be the physical
+ * rpi value, not the logical index.
**/
void
lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi,
@@ -799,9 +802,10 @@ lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi,
mb = &pmb->u.mb;
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
- mb->un.varUnregLogin.rpi = (uint16_t) rpi;
+ mb->un.varUnregLogin.rpi = rpi;
mb->un.varUnregLogin.rsvd1 = 0;
- mb->un.varUnregLogin.vpi = vpi + phba->vpi_base;
+ if (phba->sli_rev >= LPFC_SLI_REV3)
+ mb->un.varUnregLogin.vpi = phba->vpi_ids[vpi];
mb->mbxCommand = MBX_UNREG_LOGIN;
mb->mbxOwner = OWN_HOST;
@@ -825,9 +829,16 @@ lpfc_sli4_unreg_all_rpis(struct lpfc_vport *vport)
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (mbox) {
- lpfc_unreg_login(phba, vport->vpi,
- vport->vpi + phba->vpi_base, mbox);
- mbox->u.mb.un.varUnregLogin.rsvd1 = 0x4000 ;
+ /*
+ * For SLI4 functions, the rpi field is overloaded for
+ * the vport context unreg all. This routine passes
+ * 0 for the rpi field in lpfc_unreg_login for compatibility
+ * with SLI3 and then overrides the rpi field with the
+ * expected value for SLI4.
+ */
+ lpfc_unreg_login(phba, vport->vpi, phba->vpi_ids[vport->vpi],
+ mbox);
+ mbox->u.mb.un.varUnregLogin.rsvd1 = 0x4000;
mbox->vport = vport;
mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
mbox->context1 = NULL;
@@ -865,9 +876,13 @@ lpfc_reg_vpi(struct lpfc_vport *vport, LPFC_MBOXQ_t *pmb)
if ((phba->sli_rev == LPFC_SLI_REV4) &&
!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI))
mb->un.varRegVpi.upd = 1;
- mb->un.varRegVpi.vpi = vport->vpi + vport->phba->vpi_base;
+
+ mb->un.varRegVpi.vpi = phba->vpi_ids[vport->vpi];
mb->un.varRegVpi.sid = vport->fc_myDID;
- mb->un.varRegVpi.vfi = vport->vfi + vport->phba->vfi_base;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ mb->un.varRegVpi.vfi = phba->sli4_hba.vfi_ids[vport->vfi];
+ else
+ mb->un.varRegVpi.vfi = vport->vfi + vport->phba->vfi_base;
memcpy(mb->un.varRegVpi.wwn, &vport->fc_portname,
sizeof(struct lpfc_name));
mb->un.varRegVpi.wwn[0] = cpu_to_le32(mb->un.varRegVpi.wwn[0]);
@@ -901,10 +916,10 @@ lpfc_unreg_vpi(struct lpfc_hba *phba, uint16_t vpi, LPFC_MBOXQ_t *pmb)
MAILBOX_t *mb = &pmb->u.mb;
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
- if (phba->sli_rev < LPFC_SLI_REV4)
- mb->un.varUnregVpi.vpi = vpi + phba->vpi_base;
- else
- mb->un.varUnregVpi.sli4_vpi = vpi + phba->vpi_base;
+ if (phba->sli_rev == LPFC_SLI_REV3)
+ mb->un.varUnregVpi.vpi = phba->vpi_ids[vpi];
+ else if (phba->sli_rev >= LPFC_SLI_REV4)
+ mb->un.varUnregVpi.sli4_vpi = phba->vpi_ids[vpi];
mb->mbxCommand = MBX_UNREG_VPI;
mb->mbxOwner = OWN_HOST;
@@ -1735,12 +1750,12 @@ lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
return length;
}
- /* Setup for the none-embedded mbox command */
+ /* Setup for the non-embedded mbox command */
pcount = (SLI4_PAGE_ALIGN(length))/SLI4_PAGE_SIZE;
pcount = (pcount > LPFC_SLI4_MBX_SGE_MAX_PAGES) ?
LPFC_SLI4_MBX_SGE_MAX_PAGES : pcount;
/* Allocate record for keeping SGE virtual addresses */
- mbox->sge_array = kmalloc(sizeof(struct lpfc_mbx_nembed_sge_virt),
+ mbox->sge_array = kzalloc(sizeof(struct lpfc_mbx_nembed_sge_virt),
GFP_KERNEL);
if (!mbox->sge_array) {
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
@@ -1790,12 +1805,87 @@ lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
/* The sub-header is in DMA memory, which needs endian converstion */
if (cfg_shdr)
lpfc_sli_pcimem_bcopy(cfg_shdr, cfg_shdr,
- sizeof(union lpfc_sli4_cfg_shdr));
-
+ sizeof(union lpfc_sli4_cfg_shdr));
return alloc_len;
}
/**
+ * lpfc_sli4_mbox_rsrc_extent - Initialize the opcode resource extent.
+ * @phba: pointer to lpfc hba data structure.
+ * @mbox: pointer to an allocated lpfc mbox resource.
+ * @exts_count: the number of extents, if required, to allocate.
+ * @rsrc_type: the resource extent type.
+ * @emb: true if LPFC_SLI4_MBX_EMBED. false if LPFC_SLI4_MBX_NEMBED.
+ *
+ * This routine completes the subcommand header for SLI4 resource extent
+ * mailbox commands. It is called after lpfc_sli4_config. The caller must
+ * pass an allocated mailbox and the attributes required to initialize the
+ * mailbox correctly.
+ *
+ * Return: the actual length of the mbox command allocated.
+ **/
+int
+lpfc_sli4_mbox_rsrc_extent(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
+ uint16_t exts_count, uint16_t rsrc_type, bool emb)
+{
+ uint8_t opcode = 0;
+ struct lpfc_mbx_nembed_rsrc_extent *n_rsrc_extnt = NULL;
+ void *virtaddr = NULL;
+
+ /* Set up SLI4 ioctl command header fields */
+ if (emb == LPFC_SLI4_MBX_NEMBED) {
+ /* Get the first SGE entry from the non-embedded DMA memory */
+ virtaddr = mbox->sge_array->addr[0];
+ if (virtaddr == NULL)
+ return 1;
+ n_rsrc_extnt = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
+ }
+
+ /*
+ * The resource type is common to all extent Opcodes and resides in the
+ * same position.
+ */
+ if (emb == LPFC_SLI4_MBX_EMBED)
+ bf_set(lpfc_mbx_alloc_rsrc_extents_type,
+ &mbox->u.mqe.un.alloc_rsrc_extents.u.req,
+ rsrc_type);
+ else {
+ /* This is DMA data. Byteswap is required. */
+ bf_set(lpfc_mbx_alloc_rsrc_extents_type,
+ n_rsrc_extnt, rsrc_type);
+ lpfc_sli_pcimem_bcopy(&n_rsrc_extnt->word4,
+ &n_rsrc_extnt->word4,
+ sizeof(uint32_t));
+ }
+
+ /* Complete the initialization for the particular Opcode. */
+ opcode = lpfc_sli4_mbox_opcode_get(phba, mbox);
+ switch (opcode) {
+ case LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT:
+ if (emb == LPFC_SLI4_MBX_EMBED)
+ bf_set(lpfc_mbx_alloc_rsrc_extents_cnt,
+ &mbox->u.mqe.un.alloc_rsrc_extents.u.req,
+ exts_count);
+ else
+ bf_set(lpfc_mbx_alloc_rsrc_extents_cnt,
+ n_rsrc_extnt, exts_count);
+ break;
+ case LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT:
+ case LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO:
+ case LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT:
+ /* Initialization is complete.*/
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+ "2929 Resource Extent Opcode x%x is "
+ "unsupported\n", opcode);
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
* lpfc_sli4_mbox_opcode_get - Get the opcode from a sli4 mailbox command
* @phba: pointer to lpfc hba data structure.
* @mbox: pointer to lpfc mbox command.
@@ -1939,9 +2029,12 @@ lpfc_init_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport)
bf_set(lpfc_init_vfi_vr, init_vfi, 1);
bf_set(lpfc_init_vfi_vt, init_vfi, 1);
bf_set(lpfc_init_vfi_vp, init_vfi, 1);
- bf_set(lpfc_init_vfi_vfi, init_vfi, vport->vfi + vport->phba->vfi_base);
- bf_set(lpfc_init_vpi_vpi, init_vfi, vport->vpi + vport->phba->vpi_base);
- bf_set(lpfc_init_vfi_fcfi, init_vfi, vport->phba->fcf.fcfi);
+ bf_set(lpfc_init_vfi_vfi, init_vfi,
+ vport->phba->sli4_hba.vfi_ids[vport->vfi]);
+ bf_set(lpfc_init_vpi_vpi, init_vfi,
+ vport->phba->vpi_ids[vport->vpi]);
+ bf_set(lpfc_init_vfi_fcfi, init_vfi,
+ vport->phba->fcf.fcfi);
}
/**
@@ -1964,9 +2057,10 @@ lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
reg_vfi = &mbox->u.mqe.un.reg_vfi;
bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_VFI);
bf_set(lpfc_reg_vfi_vp, reg_vfi, 1);
- bf_set(lpfc_reg_vfi_vfi, reg_vfi, vport->vfi + vport->phba->vfi_base);
+ bf_set(lpfc_reg_vfi_vfi, reg_vfi,
+ vport->phba->sli4_hba.vfi_ids[vport->vfi]);
bf_set(lpfc_reg_vfi_fcfi, reg_vfi, vport->phba->fcf.fcfi);
- bf_set(lpfc_reg_vfi_vpi, reg_vfi, vport->vpi + vport->phba->vpi_base);
+ bf_set(lpfc_reg_vfi_vpi, reg_vfi, vport->phba->vpi_ids[vport->vpi]);
memcpy(reg_vfi->wwn, &vport->fc_portname, sizeof(struct lpfc_name));
reg_vfi->wwn[0] = cpu_to_le32(reg_vfi->wwn[0]);
reg_vfi->wwn[1] = cpu_to_le32(reg_vfi->wwn[1]);
@@ -1997,9 +2091,9 @@ lpfc_init_vpi(struct lpfc_hba *phba, struct lpfcMboxq *mbox, uint16_t vpi)
memset(mbox, 0, sizeof(*mbox));
bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VPI);
bf_set(lpfc_init_vpi_vpi, &mbox->u.mqe.un.init_vpi,
- vpi + phba->vpi_base);
+ phba->vpi_ids[vpi]);
bf_set(lpfc_init_vpi_vfi, &mbox->u.mqe.un.init_vpi,
- phba->pport->vfi + phba->vfi_base);
+ phba->sli4_hba.vfi_ids[phba->pport->vfi]);
}
/**
@@ -2019,7 +2113,7 @@ lpfc_unreg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport)
memset(mbox, 0, sizeof(*mbox));
bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_VFI);
bf_set(lpfc_unreg_vfi_vfi, &mbox->u.mqe.un.unreg_vfi,
- vport->vfi + vport->phba->vfi_base);
+ vport->phba->sli4_hba.vfi_ids[vport->vfi]);
}
/**
@@ -2131,12 +2225,14 @@ lpfc_unreg_fcfi(struct lpfcMboxq *mbox, uint16_t fcfi)
void
lpfc_resume_rpi(struct lpfcMboxq *mbox, struct lpfc_nodelist *ndlp)
{
+ struct lpfc_hba *phba = ndlp->phba;
struct lpfc_mbx_resume_rpi *resume_rpi;
memset(mbox, 0, sizeof(*mbox));
resume_rpi = &mbox->u.mqe.un.resume_rpi;
bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_RESUME_RPI);
- bf_set(lpfc_resume_rpi_index, resume_rpi, ndlp->nlp_rpi);
+ bf_set(lpfc_resume_rpi_index, resume_rpi,
+ phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
bf_set(lpfc_resume_rpi_ii, resume_rpi, RESUME_INDEX_RPI);
resume_rpi->event_tag = ndlp->phba->fc_eventTag;
}
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index cbb48ee8b0bb..10d5b5e41499 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -62,7 +62,6 @@ int
lpfc_mem_alloc(struct lpfc_hba *phba, int align)
{
struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
- int longs;
int i;
if (phba->sli_rev == LPFC_SLI_REV4)
@@ -138,17 +137,8 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align)
phba->lpfc_hrb_pool = NULL;
phba->lpfc_drb_pool = NULL;
}
- /* vpi zero is reserved for the physical port so add 1 to max */
- longs = ((phba->max_vpi + 1) + BITS_PER_LONG - 1) / BITS_PER_LONG;
- phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long), GFP_KERNEL);
- if (!phba->vpi_bmask)
- goto fail_free_dbq_pool;
return 0;
-
- fail_free_dbq_pool:
- pci_pool_destroy(phba->lpfc_drb_pool);
- phba->lpfc_drb_pool = NULL;
fail_free_hrb_pool:
pci_pool_destroy(phba->lpfc_hrb_pool);
phba->lpfc_hrb_pool = NULL;
@@ -191,9 +181,6 @@ lpfc_mem_free(struct lpfc_hba *phba)
int i;
struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
- /* Free VPI bitmask memory */
- kfree(phba->vpi_bmask);
-
/* Free HBQ pools */
lpfc_sli_hbqbuf_free_all(phba);
if (phba->lpfc_drb_pool)
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 0d92d4205ea6..2ddd02f7c603 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -350,11 +350,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_maxframe =
((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
- /*
- * Need to unreg_login if we are already in one of these states and
- * change to NPR state. This will block the port until after the ACC
- * completes and the reg_login is issued and completed.
- */
+ /* no need to reg_login if we are already in one of these states */
switch (ndlp->nlp_state) {
case NLP_STE_NPR_NODE:
if (!(ndlp->nlp_flag & NLP_NPR_ADISC))
@@ -363,9 +359,8 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
case NLP_STE_PRLI_ISSUE:
case NLP_STE_UNMAPPED_NODE:
case NLP_STE_MAPPED_NODE:
- lpfc_unreg_rpi(vport, ndlp);
- ndlp->nlp_prev_state = ndlp->nlp_state;
- lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+ lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL);
+ return 1;
}
if ((vport->fc_flag & FC_PT2PT) &&
@@ -657,6 +652,7 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
lpfc_unreg_rpi(vport, ndlp);
return 0;
}
+
/**
* lpfc_release_rpi - Release a RPI by issuing unreg_login mailbox cmd.
* @phba : Pointer to lpfc_hba structure.
@@ -1399,8 +1395,11 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
if (mb->mbxStatus) {
/* RegLogin failed */
lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
- "0246 RegLogin failed Data: x%x x%x x%x\n",
- did, mb->mbxStatus, vport->port_state);
+ "0246 RegLogin failed Data: x%x x%x x%x x%x "
+ "x%x\n",
+ did, mb->mbxStatus, vport->port_state,
+ mb->un.varRegLogin.vpi,
+ mb->un.varRegLogin.rpi);
/*
* If RegLogin failed due to lack of HBA resources do not
* retry discovery.
@@ -1424,7 +1423,10 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
return ndlp->nlp_state;
}
- ndlp->nlp_rpi = mb->un.varWords[0];
+ /* SLI4 ports have preallocated logical rpis. */
+ if (vport->phba->sli_rev < LPFC_SLI_REV4)
+ ndlp->nlp_rpi = mb->un.varWords[0];
+
ndlp->nlp_flag |= NLP_RPI_REGISTERED;
/* Only if we are not a fabric nport do we issue PRLI */
@@ -2025,7 +2027,9 @@ lpfc_cmpl_reglogin_npr_node(struct lpfc_vport *vport,
MAILBOX_t *mb = &pmb->u.mb;
if (!mb->mbxStatus) {
- ndlp->nlp_rpi = mb->un.varWords[0];
+ /* SLI4 ports have preallocated logical rpis. */
+ if (vport->phba->sli_rev < LPFC_SLI_REV4)
+ ndlp->nlp_rpi = mb->un.varWords[0];
ndlp->nlp_flag |= NLP_RPI_REGISTERED;
} else {
if (ndlp->nlp_flag & NLP_NODEV_REMOVE) {
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 84e4481b2406..3ccc97496ebf 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -743,7 +743,14 @@ lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba)
if (bcnt == 0)
continue;
/* Now, post the SCSI buffer list sgls as a block */
- status = lpfc_sli4_post_scsi_sgl_block(phba, &sblist, bcnt);
+ if (!phba->sli4_hba.extents_in_use)
+ status = lpfc_sli4_post_scsi_sgl_block(phba,
+ &sblist,
+ bcnt);
+ else
+ status = lpfc_sli4_post_scsi_sgl_blk_ext(phba,
+ &sblist,
+ bcnt);
/* Reset SCSI buffer count for next round of posting */
bcnt = 0;
while (!list_empty(&sblist)) {
@@ -787,7 +794,7 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
dma_addr_t pdma_phys_fcp_cmd;
dma_addr_t pdma_phys_fcp_rsp;
dma_addr_t pdma_phys_bpl, pdma_phys_bpl1;
- uint16_t iotag, last_xritag = NO_XRI;
+ uint16_t iotag, last_xritag = NO_XRI, lxri = 0;
int status = 0, index;
int bcnt;
int non_sequential_xri = 0;
@@ -823,13 +830,15 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
break;
}
- psb->cur_iocbq.sli4_xritag = lpfc_sli4_next_xritag(phba);
- if (psb->cur_iocbq.sli4_xritag == NO_XRI) {
+ lxri = lpfc_sli4_next_xritag(phba);
+ if (lxri == NO_XRI) {
pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
psb->data, psb->dma_handle);
kfree(psb);
break;
}
+ psb->cur_iocbq.sli4_lxritag = lxri;
+ psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
if (last_xritag != NO_XRI
&& psb->cur_iocbq.sli4_xritag != (last_xritag+1)) {
non_sequential_xri = 1;
@@ -861,6 +870,7 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
*/
sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
+ sgl->word2 = le32_to_cpu(sgl->word2);
bf_set(lpfc_sli4_sge_last, sgl, 0);
sgl->word2 = cpu_to_le32(sgl->word2);
sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd));
@@ -869,6 +879,7 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
/* Setup the physical region for the FCP RSP */
sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp));
sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp));
+ sgl->word2 = le32_to_cpu(sgl->word2);
bf_set(lpfc_sli4_sge_last, sgl, 1);
sgl->word2 = cpu_to_le32(sgl->word2);
sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp));
@@ -914,7 +925,21 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
}
}
if (bcnt) {
- status = lpfc_sli4_post_scsi_sgl_block(phba, &sblist, bcnt);
+ if (!phba->sli4_hba.extents_in_use)
+ status = lpfc_sli4_post_scsi_sgl_block(phba,
+ &sblist,
+ bcnt);
+ else
+ status = lpfc_sli4_post_scsi_sgl_blk_ext(phba,
+ &sblist,
+ bcnt);
+
+ if (status) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ "3021 SCSI SGL post error %d\n",
+ status);
+ bcnt = 0;
+ }
/* Reset SCSI buffer count for next round of posting */
while (!list_empty(&sblist)) {
list_remove_head(&sblist, psb, struct lpfc_scsi_buf,
@@ -2081,6 +2106,7 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
dma_len = sg_dma_len(sgel);
sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
+ sgl->word2 = le32_to_cpu(sgl->word2);
if ((num_bde + 1) == nseg)
bf_set(lpfc_sli4_sge_last, sgl, 1);
else
@@ -2794,6 +2820,9 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
* of the scsi_cmnd request_buffer
*/
piocbq->iocb.ulpContext = pnode->nlp_rpi;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ piocbq->iocb.ulpContext =
+ phba->sli4_hba.rpi_ids[pnode->nlp_rpi];
if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
piocbq->iocb.ulpFCP2Rcvy = 1;
else
@@ -2807,7 +2836,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
}
/**
- * lpfc_scsi_prep_task_mgmt_cmnd - Convert SLI3 scsi TM cmd to FCP info unit
+ * lpfc_scsi_prep_task_mgmt_cmd - Convert SLI3 scsi TM cmd to FCP info unit
* @vport: The virtual port for which this call is being executed.
* @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
* @lun: Logical unit number.
@@ -2851,6 +2880,10 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd);
piocb->ulpCommand = CMD_FCP_ICMND64_CR;
piocb->ulpContext = ndlp->nlp_rpi;
+ if (vport->phba->sli_rev == LPFC_SLI_REV4) {
+ piocb->ulpContext =
+ vport->phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
+ }
if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
piocb->ulpFCP2Rcvy = 1;
}
@@ -3405,9 +3438,10 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
"0702 Issue %s to TGT %d LUN %d "
- "rpi x%x nlp_flag x%x\n",
+ "rpi x%x nlp_flag x%x Data: x%x x%x\n",
lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id,
- pnode->nlp_rpi, pnode->nlp_flag);
+ pnode->nlp_rpi, pnode->nlp_flag, iocbq->sli4_xritag,
+ iocbq->iocb_flag);
status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
iocbq, iocbqrsp, lpfc_cmd->timeout);
@@ -3419,10 +3453,12 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
ret = FAILED;
lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
- "0727 TMF %s to TGT %d LUN %d failed (%d, %d)\n",
+ "0727 TMF %s to TGT %d LUN %d failed (%d, %d) "
+ "iocb_flag x%x\n",
lpfc_taskmgmt_name(task_mgmt_cmd),
tgt_id, lun_id, iocbqrsp->iocb.ulpStatus,
- iocbqrsp->iocb.un.ulpWord[4]);
+ iocbqrsp->iocb.un.ulpWord[4],
+ iocbq->iocb_flag);
} else if (status == IOCB_BUSY)
ret = FAILED;
else
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index fd5835e1c039..98999bbd8cbf 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -65,6 +65,9 @@ static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
struct lpfc_iocbq *);
static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
struct hbq_dmabuf *);
+static int lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *, struct lpfc_queue *,
+ struct lpfc_cqe *);
+
static IOCB_t *
lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
{
@@ -456,7 +459,6 @@ __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
struct lpfc_iocbq * iocbq = NULL;
list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
-
if (iocbq)
phba->iocb_cnt++;
if (phba->iocb_cnt > phba->iocb_max)
@@ -479,13 +481,10 @@ __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
static struct lpfc_sglq *
__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
{
- uint16_t adj_xri;
struct lpfc_sglq *sglq;
- adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
- if (adj_xri > phba->sli4_hba.max_cfg_param.max_xri)
- return NULL;
- sglq = phba->sli4_hba.lpfc_sglq_active_list[adj_xri];
- phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = NULL;
+
+ sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
+ phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL;
return sglq;
}
@@ -504,12 +503,9 @@ __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
struct lpfc_sglq *
__lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
{
- uint16_t adj_xri;
struct lpfc_sglq *sglq;
- adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
- if (adj_xri > phba->sli4_hba.max_cfg_param.max_xri)
- return NULL;
- sglq = phba->sli4_hba.lpfc_sglq_active_list[adj_xri];
+
+ sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
return sglq;
}
@@ -532,7 +528,6 @@ static int
__lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
{
- uint16_t adj_xri;
struct lpfc_node_rrq *rrq;
int empty;
uint32_t did = 0;
@@ -553,21 +548,19 @@ __lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
/*
* set the active bit even if there is no mem available.
*/
- adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
-
if (NLP_CHK_FREE_REQ(ndlp))
goto out;
if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
goto out;
- if (test_and_set_bit(adj_xri, ndlp->active_rrqs.xri_bitmap))
+ if (test_and_set_bit(xritag, ndlp->active_rrqs.xri_bitmap))
goto out;
rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
if (rrq) {
rrq->send_rrq = send_rrq;
- rrq->xritag = xritag;
+ rrq->xritag = phba->sli4_hba.xri_ids[xritag];
rrq->rrq_stop_time = jiffies + HZ * (phba->fc_ratov + 1);
rrq->ndlp = ndlp;
rrq->nlp_DID = ndlp->nlp_DID;
@@ -603,7 +596,6 @@ lpfc_clr_rrq_active(struct lpfc_hba *phba,
uint16_t xritag,
struct lpfc_node_rrq *rrq)
{
- uint16_t adj_xri;
struct lpfc_nodelist *ndlp = NULL;
if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp))
@@ -619,8 +611,7 @@ lpfc_clr_rrq_active(struct lpfc_hba *phba,
if (!ndlp)
goto out;
- adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
- if (test_and_clear_bit(adj_xri, ndlp->active_rrqs.xri_bitmap)) {
+ if (test_and_clear_bit(xritag, ndlp->active_rrqs.xri_bitmap)) {
rrq->send_rrq = 0;
rrq->xritag = 0;
rrq->rrq_stop_time = 0;
@@ -796,12 +787,9 @@ int
lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
uint16_t xritag)
{
- uint16_t adj_xri;
-
- adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
if (!ndlp)
return 0;
- if (test_bit(adj_xri, ndlp->active_rrqs.xri_bitmap))
+ if (test_bit(xritag, ndlp->active_rrqs.xri_bitmap))
return 1;
else
return 0;
@@ -841,7 +829,7 @@ lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
* @piocb: Pointer to the iocbq.
*
* This function is called with hbalock held. This function
- * Gets a new driver sglq object from the sglq list. If the
+ * gets a new driver sglq object from the sglq list. If the
* list is not empty then it is successful, it returns pointer to the newly
* allocated sglq object else it returns NULL.
**/
@@ -851,7 +839,6 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
struct list_head *lpfc_sgl_list = &phba->sli4_hba.lpfc_sgl_list;
struct lpfc_sglq *sglq = NULL;
struct lpfc_sglq *start_sglq = NULL;
- uint16_t adj_xri;
struct lpfc_scsi_buf *lpfc_cmd;
struct lpfc_nodelist *ndlp;
int found = 0;
@@ -870,8 +857,6 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
while (!found) {
if (!sglq)
return NULL;
- adj_xri = sglq->sli4_xritag -
- phba->sli4_hba.max_cfg_param.xri_base;
if (lpfc_test_rrq_active(phba, ndlp, sglq->sli4_xritag)) {
/* This xri has an rrq outstanding for this DID.
* put it back in the list and get another xri.
@@ -888,7 +873,7 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
}
sglq->ndlp = ndlp;
found = 1;
- phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = sglq;
+ phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
sglq->state = SGL_ALLOCATED;
}
return sglq;
@@ -944,7 +929,8 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
if (iocbq->sli4_xritag == NO_XRI)
sglq = NULL;
else
- sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_xritag);
+ sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
+
if (sglq) {
if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
(sglq->state != SGL_XRI_ABORTED)) {
@@ -971,6 +957,7 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
* Clean all volatile data fields, preserve iotag and node struct.
*/
memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
+ iocbq->sli4_lxritag = NO_XRI;
iocbq->sli4_xritag = NO_XRI;
list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
}
@@ -2113,7 +2100,7 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
!pmb->u.mb.mbxStatus) {
rpi = pmb->u.mb.un.varWords[0];
- vpi = pmb->u.mb.un.varRegLogin.vpi - phba->vpi_base;
+ vpi = pmb->u.mb.un.varRegLogin.vpi;
lpfc_unreg_login(phba, vpi, rpi, pmb);
pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
@@ -3881,8 +3868,10 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
list_del_init(&phba->sli4_hba.els_cq->list);
for (qindx = 0; qindx < phba->cfg_fcp_wq_count; qindx++)
list_del_init(&phba->sli4_hba.fcp_wq[qindx]->list);
- for (qindx = 0; qindx < phba->cfg_fcp_eq_count; qindx++)
+ qindx = 0;
+ do
list_del_init(&phba->sli4_hba.fcp_cq[qindx]->list);
+ while (++qindx < phba->cfg_fcp_eq_count);
spin_unlock_irq(&phba->hbalock);
/* Now physically reset the device */
@@ -4318,6 +4307,7 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
continue;
} else if (rc)
break;
+
phba->link_state = LPFC_INIT_MBX_CMDS;
lpfc_config_port(phba, pmb);
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
@@ -4421,7 +4411,8 @@ int
lpfc_sli_hba_setup(struct lpfc_hba *phba)
{
uint32_t rc;
- int mode = 3;
+ int mode = 3, i;
+ int longs;
switch (lpfc_sli_mode) {
case 2:
@@ -4491,6 +4482,35 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba)
if (rc)
goto lpfc_sli_hba_setup_error;
+ /* Initialize VPIs. */
+ if (phba->sli_rev == LPFC_SLI_REV3) {
+ /*
+ * The VPI bitmask and physical ID array are allocated
+ * and initialized once only - at driver load. A port
+ * reset doesn't need to reinitialize this memory.
+ */
+ if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) {
+ longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG;
+ phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long),
+ GFP_KERNEL);
+ if (!phba->vpi_bmask) {
+ rc = -ENOMEM;
+ goto lpfc_sli_hba_setup_error;
+ }
+
+ phba->vpi_ids = kzalloc(
+ (phba->max_vpi+1) * sizeof(uint16_t),
+ GFP_KERNEL);
+ if (!phba->vpi_ids) {
+ kfree(phba->vpi_bmask);
+ rc = -ENOMEM;
+ goto lpfc_sli_hba_setup_error;
+ }
+ for (i = 0; i < phba->max_vpi; i++)
+ phba->vpi_ids[i] = i;
+ }
+ }
+
/* Init HBQs */
if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
rc = lpfc_sli_hbq_setup(phba);
@@ -4677,9 +4697,11 @@ lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM);
lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM);
- for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++)
+ fcp_eqidx = 0;
+ do
lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx],
LPFC_QUEUE_REARM);
+ while (++fcp_eqidx < phba->cfg_fcp_eq_count);
lpfc_sli4_eq_release(phba->sli4_hba.sp_eq, LPFC_QUEUE_REARM);
for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++)
lpfc_sli4_eq_release(phba->sli4_hba.fp_eq[fcp_eqidx],
@@ -4687,6 +4709,803 @@ lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
}
/**
+ * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count.
+ * @phba: Pointer to HBA context object.
+ * @type: The resource extent type.
+ *
+ * This function allocates all SLI4 resource identifiers.
+ **/
+static int
+lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
+ uint16_t *extnt_count, uint16_t *extnt_size)
+{
+ int rc = 0;
+ uint32_t length;
+ uint32_t mbox_tmo;
+ struct lpfc_mbx_get_rsrc_extent_info *rsrc_info;
+ LPFC_MBOXQ_t *mbox;
+
+ mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+
+ /* Find out how many extents are available for this resource type */
+ length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO,
+ length, LPFC_SLI4_MBX_EMBED);
+
+ /* Send an extents count of 0 - the GET doesn't use it. */
+ rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
+ LPFC_SLI4_MBX_EMBED);
+ if (unlikely(rc)) {
+ rc = -EIO;
+ goto err_exit;
+ }
+
+ if (!phba->sli4_hba.intr_enable)
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+ else {
+ mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
+ rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+ }
+ if (unlikely(rc)) {
+ rc = -EIO;
+ goto err_exit;
+ }
+
+ rsrc_info = &mbox->u.mqe.un.rsrc_extent_info;
+ if (bf_get(lpfc_mbox_hdr_status,
+ &rsrc_info->header.cfg_shdr.response)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
+ "2930 Failed to get resource extents "
+ "Status 0x%x Add'l Status 0x%x\n",
+ bf_get(lpfc_mbox_hdr_status,
+ &rsrc_info->header.cfg_shdr.response),
+ bf_get(lpfc_mbox_hdr_add_status,
+ &rsrc_info->header.cfg_shdr.response));
+ rc = -EIO;
+ goto err_exit;
+ }
+
+ *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt,
+ &rsrc_info->u.rsp);
+ *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size,
+ &rsrc_info->u.rsp);
+ err_exit:
+ mempool_free(mbox, phba->mbox_mem_pool);
+ return rc;
+}
+
+/**
+ * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents.
+ * @phba: Pointer to HBA context object.
+ * @type: The extent type to check.
+ *
+ * This function reads the current available extents from the port and checks
+ * if the extent count or extent size has changed since the last access.
+ * Callers use this routine post port reset to understand if there is a
+ * extent reprovisioning requirement.
+ *
+ * Returns:
+ * -Error: error indicates problem.
+ * 1: Extent count or size has changed.
+ * 0: No changes.
+ **/
+static int
+lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type)
+{
+ uint16_t curr_ext_cnt, rsrc_ext_cnt;
+ uint16_t size_diff, rsrc_ext_size;
+ int rc = 0;
+ struct lpfc_rsrc_blks *rsrc_entry;
+ struct list_head *rsrc_blk_list = NULL;
+
+ size_diff = 0;
+ curr_ext_cnt = 0;
+ rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
+ &rsrc_ext_cnt,
+ &rsrc_ext_size);
+ if (unlikely(rc))
+ return -EIO;
+
+ switch (type) {
+ case LPFC_RSC_TYPE_FCOE_RPI:
+ rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
+ break;
+ case LPFC_RSC_TYPE_FCOE_VPI:
+ rsrc_blk_list = &phba->lpfc_vpi_blk_list;
+ break;
+ case LPFC_RSC_TYPE_FCOE_XRI:
+ rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
+ break;
+ case LPFC_RSC_TYPE_FCOE_VFI:
+ rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
+ break;
+ default:
+ break;
+ }
+
+ list_for_each_entry(rsrc_entry, rsrc_blk_list, list) {
+ curr_ext_cnt++;
+ if (rsrc_entry->rsrc_size != rsrc_ext_size)
+ size_diff++;
+ }
+
+ if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0)
+ rc = 1;
+
+ return rc;
+}
+
+/**
+ * lpfc_sli4_cfg_post_extnts -
+ * @phba: Pointer to HBA context object.
+ * @extnt_cnt - number of available extents.
+ * @type - the extent type (rpi, xri, vfi, vpi).
+ * @emb - buffer to hold either MBX_EMBED or MBX_NEMBED operation.
+ * @mbox - pointer to the caller's allocated mailbox structure.
+ *
+ * This function executes the extents allocation request. It also
+ * takes care of the amount of memory needed to allocate or get the
+ * allocated extents. It is the caller's responsibility to evaluate
+ * the response.
+ *
+ * Returns:
+ * -Error: Error value describes the condition found.
+ * 0: if successful
+ **/
+static int
+lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t *extnt_cnt,
+ uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox)
+{
+ int rc = 0;
+ uint32_t req_len;
+ uint32_t emb_len;
+ uint32_t alloc_len, mbox_tmo;
+
+ /* Calculate the total requested length of the dma memory */
+ req_len = *extnt_cnt * sizeof(uint16_t);
+
+ /*
+ * Calculate the size of an embedded mailbox. The uint32_t
+ * accounts for extents-specific word.
+ */
+ emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
+ sizeof(uint32_t);
+
+ /*
+ * Presume the allocation and response will fit into an embedded
+ * mailbox. If not true, reconfigure to a non-embedded mailbox.
+ */
+ *emb = LPFC_SLI4_MBX_EMBED;
+ if (req_len > emb_len) {
+ req_len = *extnt_cnt * sizeof(uint16_t) +
+ sizeof(union lpfc_sli4_cfg_shdr) +
+ sizeof(uint32_t);
+ *emb = LPFC_SLI4_MBX_NEMBED;
+ }
+
+ alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT,
+ req_len, *emb);
+ if (alloc_len < req_len) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "9000 Allocated DMA memory size (x%x) is "
+ "less than the requested DMA memory "
+ "size (x%x)\n", alloc_len, req_len);
+ return -ENOMEM;
+ }
+ rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, *extnt_cnt, type, *emb);
+ if (unlikely(rc))
+ return -EIO;
+
+ if (!phba->sli4_hba.intr_enable)
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+ else {
+ mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
+ rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+ }
+
+ if (unlikely(rc))
+ rc = -EIO;
+ return rc;
+}
+
+/**
+ * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent.
+ * @phba: Pointer to HBA context object.
+ * @type: The resource extent type to allocate.
+ *
+ * This function allocates the number of elements for the specified
+ * resource type.
+ **/
+static int
+lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
+{
+ bool emb = false;
+ uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size;
+ uint16_t rsrc_id, rsrc_start, j, k;
+ uint16_t *ids;
+ int i, rc;
+ unsigned long longs;
+ unsigned long *bmask;
+ struct lpfc_rsrc_blks *rsrc_blks;
+ LPFC_MBOXQ_t *mbox;
+ uint32_t length;
+ struct lpfc_id_range *id_array = NULL;
+ void *virtaddr = NULL;
+ struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
+ struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
+ struct list_head *ext_blk_list;
+
+ rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
+ &rsrc_cnt,
+ &rsrc_size);
+ if (unlikely(rc))
+ return -EIO;
+
+ if ((rsrc_cnt == 0) || (rsrc_size == 0)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
+ "3009 No available Resource Extents "
+ "for resource type 0x%x: Count: 0x%x, "
+ "Size 0x%x\n", type, rsrc_cnt,
+ rsrc_size);
+ return -ENOMEM;
+ }
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT,
+ "2903 Available Resource Extents "
+ "for resource type 0x%x: Count: 0x%x, "
+ "Size 0x%x\n", type, rsrc_cnt,
+ rsrc_size);
+
+ mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+
+ rc = lpfc_sli4_cfg_post_extnts(phba, &rsrc_cnt, type, &emb, mbox);
+ if (unlikely(rc)) {
+ rc = -EIO;
+ goto err_exit;
+ }
+
+ /*
+ * Figure out where the response is located. Then get local pointers
+ * to the response data. The port does not guarantee to respond to
+ * all extents counts request so update the local variable with the
+ * allocated count from the port.
+ */
+ if (emb == LPFC_SLI4_MBX_EMBED) {
+ rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
+ id_array = &rsrc_ext->u.rsp.id[0];
+ rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
+ } else {
+ virtaddr = mbox->sge_array->addr[0];
+ n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
+ rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
+ id_array = &n_rsrc->id;
+ }
+
+ longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG;
+ rsrc_id_cnt = rsrc_cnt * rsrc_size;
+
+ /*
+ * Based on the resource size and count, correct the base and max
+ * resource values.
+ */
+ length = sizeof(struct lpfc_rsrc_blks);
+ switch (type) {
+ case LPFC_RSC_TYPE_FCOE_RPI:
+ phba->sli4_hba.rpi_bmask = kzalloc(longs *
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ if (unlikely(!phba->sli4_hba.rpi_bmask)) {
+ rc = -ENOMEM;
+ goto err_exit;
+ }
+ phba->sli4_hba.rpi_ids = kzalloc(rsrc_id_cnt *
+ sizeof(uint16_t),
+ GFP_KERNEL);
+ if (unlikely(!phba->sli4_hba.rpi_ids)) {
+ kfree(phba->sli4_hba.rpi_bmask);
+ rc = -ENOMEM;
+ goto err_exit;
+ }
+
+ /*
+ * The next_rpi was initialized with the maximum available
+ * count but the port may allocate a smaller number. Catch
+ * that case and update the next_rpi.
+ */
+ phba->sli4_hba.next_rpi = rsrc_id_cnt;
+
+ /* Initialize local ptrs for common extent processing later. */
+ bmask = phba->sli4_hba.rpi_bmask;
+ ids = phba->sli4_hba.rpi_ids;
+ ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
+ break;
+ case LPFC_RSC_TYPE_FCOE_VPI:
+ phba->vpi_bmask = kzalloc(longs *
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ if (unlikely(!phba->vpi_bmask)) {
+ rc = -ENOMEM;
+ goto err_exit;
+ }
+ phba->vpi_ids = kzalloc(rsrc_id_cnt *
+ sizeof(uint16_t),
+ GFP_KERNEL);
+ if (unlikely(!phba->vpi_ids)) {
+ kfree(phba->vpi_bmask);
+ rc = -ENOMEM;
+ goto err_exit;
+ }
+
+ /* Initialize local ptrs for common extent processing later. */
+ bmask = phba->vpi_bmask;
+ ids = phba->vpi_ids;
+ ext_blk_list = &phba->lpfc_vpi_blk_list;
+ break;
+ case LPFC_RSC_TYPE_FCOE_XRI:
+ phba->sli4_hba.xri_bmask = kzalloc(longs *
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ if (unlikely(!phba->sli4_hba.xri_bmask)) {
+ rc = -ENOMEM;
+ goto err_exit;
+ }
+ phba->sli4_hba.xri_ids = kzalloc(rsrc_id_cnt *
+ sizeof(uint16_t),
+ GFP_KERNEL);
+ if (unlikely(!phba->sli4_hba.xri_ids)) {
+ kfree(phba->sli4_hba.xri_bmask);
+ rc = -ENOMEM;
+ goto err_exit;
+ }
+
+ /* Initialize local ptrs for common extent processing later. */
+ bmask = phba->sli4_hba.xri_bmask;
+ ids = phba->sli4_hba.xri_ids;
+ ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
+ break;
+ case LPFC_RSC_TYPE_FCOE_VFI:
+ phba->sli4_hba.vfi_bmask = kzalloc(longs *
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ if (unlikely(!phba->sli4_hba.vfi_bmask)) {
+ rc = -ENOMEM;
+ goto err_exit;
+ }
+ phba->sli4_hba.vfi_ids = kzalloc(rsrc_id_cnt *
+ sizeof(uint16_t),
+ GFP_KERNEL);
+ if (unlikely(!phba->sli4_hba.vfi_ids)) {
+ kfree(phba->sli4_hba.vfi_bmask);
+ rc = -ENOMEM;
+ goto err_exit;
+ }
+
+ /* Initialize local ptrs for common extent processing later. */
+ bmask = phba->sli4_hba.vfi_bmask;
+ ids = phba->sli4_hba.vfi_ids;
+ ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
+ break;
+ default:
+ /* Unsupported Opcode. Fail call. */
+ id_array = NULL;
+ bmask = NULL;
+ ids = NULL;
+ ext_blk_list = NULL;
+ goto err_exit;
+ }
+
+ /*
+ * Complete initializing the extent configuration with the
+ * allocated ids assigned to this function. The bitmask serves
+ * as an index into the array and manages the available ids. The
+ * array just stores the ids communicated to the port via the wqes.
+ */
+ for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) {
+ if ((i % 2) == 0)
+ rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0,
+ &id_array[k]);
+ else
+ rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1,
+ &id_array[k]);
+
+ rsrc_blks = kzalloc(length, GFP_KERNEL);
+ if (unlikely(!rsrc_blks)) {
+ rc = -ENOMEM;
+ kfree(bmask);
+ kfree(ids);
+ goto err_exit;
+ }
+ rsrc_blks->rsrc_start = rsrc_id;
+ rsrc_blks->rsrc_size = rsrc_size;
+ list_add_tail(&rsrc_blks->list, ext_blk_list);
+ rsrc_start = rsrc_id;
+ if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0))
+ phba->sli4_hba.scsi_xri_start = rsrc_start +
+ lpfc_sli4_get_els_iocb_cnt(phba);
+
+ while (rsrc_id < (rsrc_start + rsrc_size)) {
+ ids[j] = rsrc_id;
+ rsrc_id++;
+ j++;
+ }
+ /* Entire word processed. Get next word.*/
+ if ((i % 2) == 1)
+ k++;
+ }
+ err_exit:
+ lpfc_sli4_mbox_cmd_free(phba, mbox);
+ return rc;
+}
+
+/**
+ * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent.
+ * @phba: Pointer to HBA context object.
+ * @type: the extent's type.
+ *
+ * This function deallocates all extents of a particular resource type.
+ * SLI4 does not allow for deallocating a particular extent range. It
+ * is the caller's responsibility to release all kernel memory resources.
+ **/
+static int
+lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
+{
+ int rc;
+ uint32_t length, mbox_tmo = 0;
+ LPFC_MBOXQ_t *mbox;
+ struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc;
+ struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next;
+
+ mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+
+ /*
+ * This function sends an embedded mailbox because it only sends the
+ * the resource type. All extents of this type are released by the
+ * port.
+ */
+ length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT,
+ length, LPFC_SLI4_MBX_EMBED);
+
+ /* Send an extents count of 0 - the dealloc doesn't use it. */
+ rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
+ LPFC_SLI4_MBX_EMBED);
+ if (unlikely(rc)) {
+ rc = -EIO;
+ goto out_free_mbox;
+ }
+ if (!phba->sli4_hba.intr_enable)
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+ else {
+ mbox_tmo = lpfc_mbox_tmo_val(phba, mbox_tmo);
+ rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+ }
+ if (unlikely(rc)) {
+ rc = -EIO;
+ goto out_free_mbox;
+ }
+
+ dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents;
+ if (bf_get(lpfc_mbox_hdr_status,
+ &dealloc_rsrc->header.cfg_shdr.response)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
+ "2919 Failed to release resource extents "
+ "for type %d - Status 0x%x Add'l Status 0x%x. "
+ "Resource memory not released.\n",
+ type,
+ bf_get(lpfc_mbox_hdr_status,
+ &dealloc_rsrc->header.cfg_shdr.response),
+ bf_get(lpfc_mbox_hdr_add_status,
+ &dealloc_rsrc->header.cfg_shdr.response));
+ rc = -EIO;
+ goto out_free_mbox;
+ }
+
+ /* Release kernel memory resources for the specific type. */
+ switch (type) {
+ case LPFC_RSC_TYPE_FCOE_VPI:
+ kfree(phba->vpi_bmask);
+ kfree(phba->vpi_ids);
+ bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
+ list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
+ &phba->lpfc_vpi_blk_list, list) {
+ list_del_init(&rsrc_blk->list);
+ kfree(rsrc_blk);
+ }
+ break;
+ case LPFC_RSC_TYPE_FCOE_XRI:
+ kfree(phba->sli4_hba.xri_bmask);
+ kfree(phba->sli4_hba.xri_ids);
+ bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
+ list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
+ &phba->sli4_hba.lpfc_xri_blk_list, list) {
+ list_del_init(&rsrc_blk->list);
+ kfree(rsrc_blk);
+ }
+ break;
+ case LPFC_RSC_TYPE_FCOE_VFI:
+ kfree(phba->sli4_hba.vfi_bmask);
+ kfree(phba->sli4_hba.vfi_ids);
+ bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
+ list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
+ &phba->sli4_hba.lpfc_vfi_blk_list, list) {
+ list_del_init(&rsrc_blk->list);
+ kfree(rsrc_blk);
+ }
+ break;
+ case LPFC_RSC_TYPE_FCOE_RPI:
+ /* RPI bitmask and physical id array are cleaned up earlier. */
+ list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
+ &phba->sli4_hba.lpfc_rpi_blk_list, list) {
+ list_del_init(&rsrc_blk->list);
+ kfree(rsrc_blk);
+ }
+ break;
+ default:
+ break;
+ }
+
+ bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
+
+ out_free_mbox:
+ mempool_free(mbox, phba->mbox_mem_pool);
+ return rc;
+}
+
+/**
+ * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents.
+ * @phba: Pointer to HBA context object.
+ *
+ * This function allocates all SLI4 resource identifiers.
+ **/
+int
+lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
+{
+ int i, rc, error = 0;
+ uint16_t count, base;
+ unsigned long longs;
+
+ if (phba->sli4_hba.extents_in_use) {
+ /*
+ * The port supports resource extents. The XRI, VPI, VFI, RPI
+ * resource extent count must be read and allocated before
+ * provisioning the resource id arrays.
+ */
+ if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
+ LPFC_IDX_RSRC_RDY) {
+ /*
+ * Extent-based resources are set - the driver could
+ * be in a port reset. Figure out if any corrective
+ * actions need to be taken.
+ */
+ rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
+ LPFC_RSC_TYPE_FCOE_VFI);
+ if (rc != 0)
+ error++;
+ rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
+ LPFC_RSC_TYPE_FCOE_VPI);
+ if (rc != 0)
+ error++;
+ rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
+ LPFC_RSC_TYPE_FCOE_XRI);
+ if (rc != 0)
+ error++;
+ rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
+ LPFC_RSC_TYPE_FCOE_RPI);
+ if (rc != 0)
+ error++;
+
+ /*
+ * It's possible that the number of resources
+ * provided to this port instance changed between
+ * resets. Detect this condition and reallocate
+ * resources. Otherwise, there is no action.
+ */
+ if (error) {
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_MBOX | LOG_INIT,
+ "2931 Detected extent resource "
+ "change. Reallocating all "
+ "extents.\n");
+ rc = lpfc_sli4_dealloc_extent(phba,
+ LPFC_RSC_TYPE_FCOE_VFI);
+ rc = lpfc_sli4_dealloc_extent(phba,
+ LPFC_RSC_TYPE_FCOE_VPI);
+ rc = lpfc_sli4_dealloc_extent(phba,
+ LPFC_RSC_TYPE_FCOE_XRI);
+ rc = lpfc_sli4_dealloc_extent(phba,
+ LPFC_RSC_TYPE_FCOE_RPI);
+ } else
+ return 0;
+ }
+
+ rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
+ if (unlikely(rc))
+ goto err_exit;
+
+ rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
+ if (unlikely(rc))
+ goto err_exit;
+
+ rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
+ if (unlikely(rc))
+ goto err_exit;
+
+ rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
+ if (unlikely(rc))
+ goto err_exit;
+ bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
+ LPFC_IDX_RSRC_RDY);
+ return rc;
+ } else {
+ /*
+ * The port does not support resource extents. The XRI, VPI,
+ * VFI, RPI resource ids were determined from READ_CONFIG.
+ * Just allocate the bitmasks and provision the resource id
+ * arrays. If a port reset is active, the resources don't
+ * need any action - just exit.
+ */
+ if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
+ LPFC_IDX_RSRC_RDY)
+ return 0;
+
+ /* RPIs. */
+ count = phba->sli4_hba.max_cfg_param.max_rpi;
+ base = phba->sli4_hba.max_cfg_param.rpi_base;
+ longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
+ phba->sli4_hba.rpi_bmask = kzalloc(longs *
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ if (unlikely(!phba->sli4_hba.rpi_bmask)) {
+ rc = -ENOMEM;
+ goto err_exit;
+ }
+ phba->sli4_hba.rpi_ids = kzalloc(count *
+ sizeof(uint16_t),
+ GFP_KERNEL);
+ if (unlikely(!phba->sli4_hba.rpi_ids)) {
+ rc = -ENOMEM;
+ goto free_rpi_bmask;
+ }
+
+ for (i = 0; i < count; i++)
+ phba->sli4_hba.rpi_ids[i] = base + i;
+
+ /* VPIs. */
+ count = phba->sli4_hba.max_cfg_param.max_vpi;
+ base = phba->sli4_hba.max_cfg_param.vpi_base;
+ longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
+ phba->vpi_bmask = kzalloc(longs *
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ if (unlikely(!phba->vpi_bmask)) {
+ rc = -ENOMEM;
+ goto free_rpi_ids;
+ }
+ phba->vpi_ids = kzalloc(count *
+ sizeof(uint16_t),
+ GFP_KERNEL);
+ if (unlikely(!phba->vpi_ids)) {
+ rc = -ENOMEM;
+ goto free_vpi_bmask;
+ }
+
+ for (i = 0; i < count; i++)
+ phba->vpi_ids[i] = base + i;
+
+ /* XRIs. */
+ count = phba->sli4_hba.max_cfg_param.max_xri;
+ base = phba->sli4_hba.max_cfg_param.xri_base;
+ longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
+ phba->sli4_hba.xri_bmask = kzalloc(longs *
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ if (unlikely(!phba->sli4_hba.xri_bmask)) {
+ rc = -ENOMEM;
+ goto free_vpi_ids;
+ }
+ phba->sli4_hba.xri_ids = kzalloc(count *
+ sizeof(uint16_t),
+ GFP_KERNEL);
+ if (unlikely(!phba->sli4_hba.xri_ids)) {
+ rc = -ENOMEM;
+ goto free_xri_bmask;
+ }
+
+ for (i = 0; i < count; i++)
+ phba->sli4_hba.xri_ids[i] = base + i;
+
+ /* VFIs. */
+ count = phba->sli4_hba.max_cfg_param.max_vfi;
+ base = phba->sli4_hba.max_cfg_param.vfi_base;
+ longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
+ phba->sli4_hba.vfi_bmask = kzalloc(longs *
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ if (unlikely(!phba->sli4_hba.vfi_bmask)) {
+ rc = -ENOMEM;
+ goto free_xri_ids;
+ }
+ phba->sli4_hba.vfi_ids = kzalloc(count *
+ sizeof(uint16_t),
+ GFP_KERNEL);
+ if (unlikely(!phba->sli4_hba.vfi_ids)) {
+ rc = -ENOMEM;
+ goto free_vfi_bmask;
+ }
+
+ for (i = 0; i < count; i++)
+ phba->sli4_hba.vfi_ids[i] = base + i;
+
+ /*
+ * Mark all resources ready. An HBA reset doesn't need
+ * to reset the initialization.
+ */
+ bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
+ LPFC_IDX_RSRC_RDY);
+ return 0;
+ }
+
+ free_vfi_bmask:
+ kfree(phba->sli4_hba.vfi_bmask);
+ free_xri_ids:
+ kfree(phba->sli4_hba.xri_ids);
+ free_xri_bmask:
+ kfree(phba->sli4_hba.xri_bmask);
+ free_vpi_ids:
+ kfree(phba->vpi_ids);
+ free_vpi_bmask:
+ kfree(phba->vpi_bmask);
+ free_rpi_ids:
+ kfree(phba->sli4_hba.rpi_ids);
+ free_rpi_bmask:
+ kfree(phba->sli4_hba.rpi_bmask);
+ err_exit:
+ return rc;
+}
+
+/**
+ * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents.
+ * @phba: Pointer to HBA context object.
+ *
+ * This function allocates the number of elements for the specified
+ * resource type.
+ **/
+int
+lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
+{
+ if (phba->sli4_hba.extents_in_use) {
+ lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
+ lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
+ lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
+ lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
+ } else {
+ kfree(phba->vpi_bmask);
+ kfree(phba->vpi_ids);
+ bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
+ kfree(phba->sli4_hba.xri_bmask);
+ kfree(phba->sli4_hba.xri_ids);
+ bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
+ kfree(phba->sli4_hba.vfi_bmask);
+ kfree(phba->sli4_hba.vfi_ids);
+ bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
+ bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
+ }
+
+ return 0;
+}
+
+/**
* lpfc_sli4_hba_setup - SLI4 device intialization PCI function
* @phba: Pointer to HBA context object.
*
@@ -4708,10 +5527,6 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
struct lpfc_vport *vport = phba->pport;
struct lpfc_dmabuf *mp;
- /*
- * TODO: Why does this routine execute these task in a different
- * order from probe?
- */
/* Perform a PCI function reset to start from clean */
rc = lpfc_pci_function_reset(phba);
if (unlikely(rc))
@@ -4740,7 +5555,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
* to read FCoE param config regions
*/
if (lpfc_sli4_read_fcoe_params(phba, mboxq))
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
+ lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
"2570 Failed to read FCoE parameters\n");
/* Issue READ_REV to collect vpd and FW information. */
@@ -4873,6 +5688,18 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
spin_unlock_irq(&phba->hbalock);
+ /*
+ * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent
+ * calls depends on these resources to complete port setup.
+ */
+ rc = lpfc_sli4_alloc_resource_identifiers(phba);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ "2920 Failed to alloc Resource IDs "
+ "rc = x%x\n", rc);
+ goto out_free_mbox;
+ }
+
/* Read the port's service parameters. */
rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
if (rc) {
@@ -4906,35 +5733,37 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
goto out_free_mbox;
}
- if (phba->cfg_soft_wwnn)
- u64_to_wwn(phba->cfg_soft_wwnn,
- vport->fc_sparam.nodeName.u.wwn);
- if (phba->cfg_soft_wwpn)
- u64_to_wwn(phba->cfg_soft_wwpn,
- vport->fc_sparam.portName.u.wwn);
- memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
- sizeof(struct lpfc_name));
- memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
- sizeof(struct lpfc_name));
+ lpfc_update_vport_wwn(vport);
/* Update the fc_host data structures with new wwn. */
fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
/* Register SGL pool to the device using non-embedded mailbox command */
- rc = lpfc_sli4_post_sgl_list(phba);
- if (unlikely(rc)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
- "0582 Error %d during sgl post operation\n",
- rc);
- rc = -ENODEV;
- goto out_free_mbox;
+ if (!phba->sli4_hba.extents_in_use) {
+ rc = lpfc_sli4_post_els_sgl_list(phba);
+ if (unlikely(rc)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ "0582 Error %d during els sgl post "
+ "operation\n", rc);
+ rc = -ENODEV;
+ goto out_free_mbox;
+ }
+ } else {
+ rc = lpfc_sli4_post_els_sgl_list_ext(phba);
+ if (unlikely(rc)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ "2560 Error %d during els sgl post "
+ "operation\n", rc);
+ rc = -ENODEV;
+ goto out_free_mbox;
+ }
}
/* Register SCSI SGL pool to the device */
rc = lpfc_sli4_repost_scsi_sgl_list(phba);
if (unlikely(rc)) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
"0383 Error %d during scsi sgl post "
"operation\n", rc);
/* Some Scsi buffers were moved to the abort scsi list */
@@ -5747,10 +6576,15 @@ lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
lpfc_sli_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
sizeof(struct lpfc_mcqe));
mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
-
- /* Prefix the mailbox status with range x4000 to note SLI4 status. */
+ /*
+ * When the CQE status indicates a failure and the mailbox status
+ * indicates success then copy the CQE status into the mailbox status
+ * (and prefix it with x4000).
+ */
if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
- bf_set(lpfc_mqe_status, mb, LPFC_MBX_ERROR_RANGE | mcqe_status);
+ if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS)
+ bf_set(lpfc_mqe_status, mb,
+ (LPFC_MBX_ERROR_RANGE | mcqe_status));
rc = MBXERR_ERROR;
} else
lpfc_sli4_swap_str(phba, mboxq);
@@ -5819,7 +6653,7 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
else
rc = -EIO;
if (rc != MBX_SUCCESS)
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
"(%d):2541 Mailbox command x%x "
"(x%x) cannot issue Data: x%x x%x\n",
mboxq->vport ? mboxq->vport->vpi : 0,
@@ -6307,6 +7141,7 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
sgl->addr_hi = bpl->addrHigh;
sgl->addr_lo = bpl->addrLow;
+ sgl->word2 = le32_to_cpu(sgl->word2);
if ((i+1) == numBdes)
bf_set(lpfc_sli4_sge_last, sgl, 1);
else
@@ -6343,6 +7178,7 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
sgl->addr_lo =
cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
+ sgl->word2 = le32_to_cpu(sgl->word2);
bf_set(lpfc_sli4_sge_last, sgl, 1);
sgl->word2 = cpu_to_le32(sgl->word2);
sgl->sge_len =
@@ -6474,7 +7310,8 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
>> LPFC_FIP_ELS_ID_SHIFT);
}
- bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com, ndlp->nlp_rpi);
+ bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
+ phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
@@ -6623,14 +7460,15 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
iocbq->iocb.ulpContext);
if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
- iocbq->vport->vpi + phba->vpi_base);
+ phba->vpi_ids[iocbq->vport->vpi]);
bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
LPFC_WQE_LENLOC_WORD3);
bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
- bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp, ndlp->nlp_rpi);
+ bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
+ phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
command_type = OTHER_COMMAND;
break;
case CMD_CLOSE_XRI_CN:
@@ -6729,6 +7567,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
return IOCB_ERROR;
break;
}
+
bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
wqe->generic.wqe_com.abort_tag = abort_tag;
@@ -6776,7 +7615,7 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
return IOCB_BUSY;
}
} else {
- sglq = __lpfc_sli_get_sglq(phba, piocb);
+ sglq = __lpfc_sli_get_sglq(phba, piocb);
if (!sglq) {
if (!(flag & SLI_IOCB_RET_IOCB)) {
__lpfc_sli_ringtx_put(phba,
@@ -6789,11 +7628,11 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
}
}
} else if (piocb->iocb_flag & LPFC_IO_FCP) {
- sglq = NULL; /* These IO's already have an XRI and
- * a mapped sgl.
- */
+ /* These IO's already have an XRI and a mapped sgl. */
+ sglq = NULL;
} else {
- /* This is a continuation of a commandi,(CX) so this
+ /*
+ * This is a continuation of a commandi,(CX) so this
* sglq is on the active list
*/
sglq = __lpfc_get_active_sglq(phba, piocb->sli4_xritag);
@@ -6802,8 +7641,8 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
}
if (sglq) {
+ piocb->sli4_lxritag = sglq->sli4_lxritag;
piocb->sli4_xritag = sglq->sli4_xritag;
-
if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq))
return IOCB_ERROR;
}
@@ -9799,7 +10638,12 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
break;
case LPFC_WCQ:
while ((cqe = lpfc_sli4_cq_get(cq))) {
- workposted |= lpfc_sli4_sp_handle_cqe(phba, cq, cqe);
+ if (cq->subtype == LPFC_FCP)
+ workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq,
+ cqe);
+ else
+ workposted |= lpfc_sli4_sp_handle_cqe(phba, cq,
+ cqe);
if (!(++ecount % LPFC_GET_QE_REL_INT))
lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
}
@@ -11446,6 +12290,7 @@ lpfc_sli4_post_sgl(struct lpfc_hba *phba,
LPFC_MBOXQ_t *mbox;
int rc;
uint32_t shdr_status, shdr_add_status;
+ uint32_t mbox_tmo;
union lpfc_sli4_cfg_shdr *shdr;
if (xritag == NO_XRI) {
@@ -11479,8 +12324,10 @@ lpfc_sli4_post_sgl(struct lpfc_hba *phba,
cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
if (!phba->sli4_hba.intr_enable)
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
- else
- rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
+ else {
+ mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
+ rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+ }
/* The IOCTL status is embedded in the mailbox subheader. */
shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
@@ -11498,6 +12345,76 @@ lpfc_sli4_post_sgl(struct lpfc_hba *phba,
}
/**
+ * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to post rpi header templates to the
+ * port for those SLI4 ports that do not support extents. This routine
+ * posts a PAGE_SIZE memory region to the port to hold up to
+ * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine
+ * and should be called only when interrupts are disabled.
+ *
+ * Return codes
+ * 0 - successful
+ * -ERROR - otherwise.
+ */
+uint16_t
+lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
+{
+ unsigned long xri;
+
+ /*
+ * Fetch the next logical xri. Because this index is logical,
+ * the driver starts at 0 each time.
+ */
+ spin_lock_irq(&phba->hbalock);
+ xri = find_next_zero_bit(phba->sli4_hba.xri_bmask,
+ phba->sli4_hba.max_cfg_param.max_xri, 0);
+ if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
+ spin_unlock_irq(&phba->hbalock);
+ return NO_XRI;
+ } else {
+ set_bit(xri, phba->sli4_hba.xri_bmask);
+ phba->sli4_hba.max_cfg_param.xri_used++;
+ phba->sli4_hba.xri_count++;
+ }
+
+ spin_unlock_irq(&phba->hbalock);
+ return xri;
+}
+
+/**
+ * lpfc_sli4_free_xri - Release an xri for reuse.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to release an xri to the pool of
+ * available rpis maintained by the driver.
+ **/
+void
+__lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
+{
+ if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
+ phba->sli4_hba.xri_count--;
+ phba->sli4_hba.max_cfg_param.xri_used--;
+ }
+}
+
+/**
+ * lpfc_sli4_free_xri - Release an xri for reuse.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to release an xri to the pool of
+ * available rpis maintained by the driver.
+ **/
+void
+lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
+{
+ spin_lock_irq(&phba->hbalock);
+ __lpfc_sli4_free_xri(phba, xri);
+ spin_unlock_irq(&phba->hbalock);
+}
+
+/**
* lpfc_sli4_next_xritag - Get an xritag for the io
* @phba: Pointer to HBA context object.
*
@@ -11510,30 +12427,23 @@ lpfc_sli4_post_sgl(struct lpfc_hba *phba,
uint16_t
lpfc_sli4_next_xritag(struct lpfc_hba *phba)
{
- uint16_t xritag;
+ uint16_t xri_index;
- spin_lock_irq(&phba->hbalock);
- xritag = phba->sli4_hba.next_xri;
- if ((xritag != (uint16_t) -1) && xritag <
- (phba->sli4_hba.max_cfg_param.max_xri
- + phba->sli4_hba.max_cfg_param.xri_base)) {
- phba->sli4_hba.next_xri++;
- phba->sli4_hba.max_cfg_param.xri_used++;
- spin_unlock_irq(&phba->hbalock);
- return xritag;
- }
- spin_unlock_irq(&phba->hbalock);
- lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ xri_index = lpfc_sli4_alloc_xri(phba);
+ if (xri_index != NO_XRI)
+ return xri_index;
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"2004 Failed to allocate XRI.last XRITAG is %d"
" Max XRI is %d, Used XRI is %d\n",
- phba->sli4_hba.next_xri,
+ xri_index,
phba->sli4_hba.max_cfg_param.max_xri,
phba->sli4_hba.max_cfg_param.xri_used);
- return -1;
+ return NO_XRI;
}
/**
- * lpfc_sli4_post_sgl_list - post a block of sgl list to the firmware.
+ * lpfc_sli4_post_els_sgl_list - post a block of ELS sgls to the port.
* @phba: pointer to lpfc hba data structure.
*
* This routine is invoked to post a block of driver's sgl pages to the
@@ -11542,7 +12452,7 @@ lpfc_sli4_next_xritag(struct lpfc_hba *phba)
* stopped.
**/
int
-lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
+lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba)
{
struct lpfc_sglq *sglq_entry;
struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
@@ -11551,7 +12461,7 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
LPFC_MBOXQ_t *mbox;
uint32_t reqlen, alloclen, pg_pairs;
uint32_t mbox_tmo;
- uint16_t xritag_start = 0;
+ uint16_t xritag_start = 0, lxri = 0;
int els_xri_cnt, rc = 0;
uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr;
@@ -11568,11 +12478,8 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
return -ENOMEM;
}
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- if (!mbox) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "2560 Failed to allocate mbox cmd memory\n");
+ if (!mbox)
return -ENOMEM;
- }
/* Allocate DMA memory and set up the non-embedded mailbox command */
alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
@@ -11587,15 +12494,30 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
lpfc_sli4_mbox_cmd_free(phba, mbox);
return -ENOMEM;
}
- /* Get the first SGE entry from the non-embedded DMA memory */
- viraddr = mbox->sge_array->addr[0];
-
/* Set up the SGL pages in the non-embedded DMA pages */
+ viraddr = mbox->sge_array->addr[0];
sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
sgl_pg_pairs = &sgl->sgl_pg_pairs;
for (pg_pairs = 0; pg_pairs < els_xri_cnt; pg_pairs++) {
sglq_entry = phba->sli4_hba.lpfc_els_sgl_array[pg_pairs];
+
+ /*
+ * Assign the sglq a physical xri only if the driver has not
+ * initialized those resources. A port reset only needs
+ * the sglq's posted.
+ */
+ if (bf_get(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
+ LPFC_XRI_RSRC_RDY) {
+ lxri = lpfc_sli4_next_xritag(phba);
+ if (lxri == NO_XRI) {
+ lpfc_sli4_mbox_cmd_free(phba, mbox);
+ return -ENOMEM;
+ }
+ sglq_entry->sli4_lxritag = lxri;
+ sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
+ }
+
/* Set up the sge entry */
sgl_pg_pairs->sgl_pg0_addr_lo =
cpu_to_le32(putPaddrLow(sglq_entry->phys));
@@ -11605,16 +12527,17 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
cpu_to_le32(putPaddrLow(0));
sgl_pg_pairs->sgl_pg1_addr_hi =
cpu_to_le32(putPaddrHigh(0));
+
/* Keep the first xritag on the list */
if (pg_pairs == 0)
xritag_start = sglq_entry->sli4_xritag;
sgl_pg_pairs++;
}
+
+ /* Complete initialization and perform endian conversion. */
bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
bf_set(lpfc_post_sgl_pages_xricnt, sgl, els_xri_cnt);
- /* Perform endian conversion if necessary */
sgl->word0 = cpu_to_le32(sgl->word0);
-
if (!phba->sli4_hba.intr_enable)
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
else {
@@ -11633,6 +12556,181 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
shdr_status, shdr_add_status, rc);
rc = -ENXIO;
}
+
+ if (rc == 0)
+ bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags,
+ LPFC_XRI_RSRC_RDY);
+ return rc;
+}
+
+/**
+ * lpfc_sli4_post_els_sgl_list_ext - post a block of ELS sgls to the port.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to post a block of driver's sgl pages to the
+ * HBA using non-embedded mailbox command. No Lock is held. This routine
+ * is only called when the driver is loading and after all IO has been
+ * stopped.
+ **/
+int
+lpfc_sli4_post_els_sgl_list_ext(struct lpfc_hba *phba)
+{
+ struct lpfc_sglq *sglq_entry;
+ struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
+ struct sgl_page_pairs *sgl_pg_pairs;
+ void *viraddr;
+ LPFC_MBOXQ_t *mbox;
+ uint32_t reqlen, alloclen, index;
+ uint32_t mbox_tmo;
+ uint16_t rsrc_start, rsrc_size, els_xri_cnt;
+ uint16_t xritag_start = 0, lxri = 0;
+ struct lpfc_rsrc_blks *rsrc_blk;
+ int cnt, ttl_cnt, rc = 0;
+ int loop_cnt;
+ uint32_t shdr_status, shdr_add_status;
+ union lpfc_sli4_cfg_shdr *shdr;
+
+ /* The number of sgls to be posted */
+ els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
+
+ reqlen = els_xri_cnt * sizeof(struct sgl_page_pairs) +
+ sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
+ if (reqlen > SLI4_PAGE_SIZE) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "2989 Block sgl registration required DMA "
+ "size (%d) great than a page\n", reqlen);
+ return -ENOMEM;
+ }
+
+ cnt = 0;
+ ttl_cnt = 0;
+ list_for_each_entry(rsrc_blk, &phba->sli4_hba.lpfc_xri_blk_list,
+ list) {
+ rsrc_start = rsrc_blk->rsrc_start;
+ rsrc_size = rsrc_blk->rsrc_size;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "3014 Working ELS Extent start %d, cnt %d\n",
+ rsrc_start, rsrc_size);
+
+ loop_cnt = min(els_xri_cnt, rsrc_size);
+ if (ttl_cnt + loop_cnt >= els_xri_cnt) {
+ loop_cnt = els_xri_cnt - ttl_cnt;
+ ttl_cnt = els_xri_cnt;
+ }
+
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+ /*
+ * Allocate DMA memory and set up the non-embedded mailbox
+ * command.
+ */
+ alloclen = lpfc_sli4_config(phba, mbox,
+ LPFC_MBOX_SUBSYSTEM_FCOE,
+ LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
+ reqlen, LPFC_SLI4_MBX_NEMBED);
+ if (alloclen < reqlen) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2987 Allocated DMA memory size (%d) "
+ "is less than the requested DMA memory "
+ "size (%d)\n", alloclen, reqlen);
+ lpfc_sli4_mbox_cmd_free(phba, mbox);
+ return -ENOMEM;
+ }
+
+ /* Set up the SGL pages in the non-embedded DMA pages */
+ viraddr = mbox->sge_array->addr[0];
+ sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
+ sgl_pg_pairs = &sgl->sgl_pg_pairs;
+
+ /*
+ * The starting resource may not begin at zero. Control
+ * the loop variants via the block resource parameters,
+ * but handle the sge pointers with a zero-based index
+ * that doesn't get reset per loop pass.
+ */
+ for (index = rsrc_start;
+ index < rsrc_start + loop_cnt;
+ index++) {
+ sglq_entry = phba->sli4_hba.lpfc_els_sgl_array[cnt];
+
+ /*
+ * Assign the sglq a physical xri only if the driver
+ * has not initialized those resources. A port reset
+ * only needs the sglq's posted.
+ */
+ if (bf_get(lpfc_xri_rsrc_rdy,
+ &phba->sli4_hba.sli4_flags) !=
+ LPFC_XRI_RSRC_RDY) {
+ lxri = lpfc_sli4_next_xritag(phba);
+ if (lxri == NO_XRI) {
+ lpfc_sli4_mbox_cmd_free(phba, mbox);
+ rc = -ENOMEM;
+ goto err_exit;
+ }
+ sglq_entry->sli4_lxritag = lxri;
+ sglq_entry->sli4_xritag =
+ phba->sli4_hba.xri_ids[lxri];
+ }
+
+ /* Set up the sge entry */
+ sgl_pg_pairs->sgl_pg0_addr_lo =
+ cpu_to_le32(putPaddrLow(sglq_entry->phys));
+ sgl_pg_pairs->sgl_pg0_addr_hi =
+ cpu_to_le32(putPaddrHigh(sglq_entry->phys));
+ sgl_pg_pairs->sgl_pg1_addr_lo =
+ cpu_to_le32(putPaddrLow(0));
+ sgl_pg_pairs->sgl_pg1_addr_hi =
+ cpu_to_le32(putPaddrHigh(0));
+
+ /* Track the starting physical XRI for the mailbox. */
+ if (index == rsrc_start)
+ xritag_start = sglq_entry->sli4_xritag;
+ sgl_pg_pairs++;
+ cnt++;
+ }
+
+ /* Complete initialization and perform endian conversion. */
+ rsrc_blk->rsrc_used += loop_cnt;
+ bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
+ bf_set(lpfc_post_sgl_pages_xricnt, sgl, loop_cnt);
+ sgl->word0 = cpu_to_le32(sgl->word0);
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "3015 Post ELS Extent SGL, start %d, "
+ "cnt %d, used %d\n",
+ xritag_start, loop_cnt, rsrc_blk->rsrc_used);
+ if (!phba->sli4_hba.intr_enable)
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+ else {
+ mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
+ rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+ }
+ shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status,
+ &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
+ &shdr->response);
+ if (rc != MBX_TIMEOUT)
+ lpfc_sli4_mbox_cmd_free(phba, mbox);
+ if (shdr_status || shdr_add_status || rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2988 POST_SGL_BLOCK mailbox "
+ "command failed status x%x "
+ "add_status x%x mbx status x%x\n",
+ shdr_status, shdr_add_status, rc);
+ rc = -ENXIO;
+ goto err_exit;
+ }
+ if (ttl_cnt >= els_xri_cnt)
+ break;
+ }
+
+ err_exit:
+ if (rc == 0)
+ bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags,
+ LPFC_XRI_RSRC_RDY);
return rc;
}
@@ -11693,6 +12791,7 @@ lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist,
lpfc_sli4_mbox_cmd_free(phba, mbox);
return -ENOMEM;
}
+
/* Get the first SGE entry from the non-embedded DMA memory */
viraddr = mbox->sge_array->addr[0];
@@ -11748,6 +12847,169 @@ lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist,
}
/**
+ * lpfc_sli4_post_scsi_sgl_blk_ext - post a block of scsi sgls to the port.
+ * @phba: pointer to lpfc hba data structure.
+ * @sblist: pointer to scsi buffer list.
+ * @count: number of scsi buffers on the list.
+ *
+ * This routine is invoked to post a block of @count scsi sgl pages from a
+ * SCSI buffer list @sblist to the HBA using non-embedded mailbox command.
+ * No Lock is held.
+ *
+ **/
+int
+lpfc_sli4_post_scsi_sgl_blk_ext(struct lpfc_hba *phba, struct list_head *sblist,
+ int cnt)
+{
+ struct lpfc_scsi_buf *psb = NULL;
+ struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
+ struct sgl_page_pairs *sgl_pg_pairs;
+ void *viraddr;
+ LPFC_MBOXQ_t *mbox;
+ uint32_t reqlen, alloclen, pg_pairs;
+ uint32_t mbox_tmo;
+ uint16_t xri_start = 0, scsi_xri_start;
+ uint16_t rsrc_range;
+ int rc = 0, avail_cnt;
+ uint32_t shdr_status, shdr_add_status;
+ dma_addr_t pdma_phys_bpl1;
+ union lpfc_sli4_cfg_shdr *shdr;
+ struct lpfc_rsrc_blks *rsrc_blk;
+ uint32_t xri_cnt = 0;
+
+ /* Calculate the total requested length of the dma memory */
+ reqlen = cnt * sizeof(struct sgl_page_pairs) +
+ sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
+ if (reqlen > SLI4_PAGE_SIZE) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "2932 Block sgl registration required DMA "
+ "size (%d) great than a page\n", reqlen);
+ return -ENOMEM;
+ }
+
+ /*
+ * The use of extents requires the driver to post the sgl headers
+ * in multiple postings to meet the contiguous resource assignment.
+ */
+ psb = list_prepare_entry(psb, sblist, list);
+ scsi_xri_start = phba->sli4_hba.scsi_xri_start;
+ list_for_each_entry(rsrc_blk, &phba->sli4_hba.lpfc_xri_blk_list,
+ list) {
+ rsrc_range = rsrc_blk->rsrc_start + rsrc_blk->rsrc_size;
+ if (rsrc_range < scsi_xri_start)
+ continue;
+ else if (rsrc_blk->rsrc_used >= rsrc_blk->rsrc_size)
+ continue;
+ else
+ avail_cnt = rsrc_blk->rsrc_size - rsrc_blk->rsrc_used;
+
+ reqlen = (avail_cnt * sizeof(struct sgl_page_pairs)) +
+ sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
+ /*
+ * Allocate DMA memory and set up the non-embedded mailbox
+ * command. The mbox is used to post an SGL page per loop
+ * but the DMA memory has a use-once semantic so the mailbox
+ * is used and freed per loop pass.
+ */
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2933 Failed to allocate mbox cmd "
+ "memory\n");
+ return -ENOMEM;
+ }
+ alloclen = lpfc_sli4_config(phba, mbox,
+ LPFC_MBOX_SUBSYSTEM_FCOE,
+ LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
+ reqlen,
+ LPFC_SLI4_MBX_NEMBED);
+ if (alloclen < reqlen) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2934 Allocated DMA memory size (%d) "
+ "is less than the requested DMA memory "
+ "size (%d)\n", alloclen, reqlen);
+ lpfc_sli4_mbox_cmd_free(phba, mbox);
+ return -ENOMEM;
+ }
+
+ /* Get the first SGE entry from the non-embedded DMA memory */
+ viraddr = mbox->sge_array->addr[0];
+
+ /* Set up the SGL pages in the non-embedded DMA pages */
+ sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
+ sgl_pg_pairs = &sgl->sgl_pg_pairs;
+
+ /* pg_pairs tracks posted SGEs per loop iteration. */
+ pg_pairs = 0;
+ list_for_each_entry_continue(psb, sblist, list) {
+ /* Set up the sge entry */
+ sgl_pg_pairs->sgl_pg0_addr_lo =
+ cpu_to_le32(putPaddrLow(psb->dma_phys_bpl));
+ sgl_pg_pairs->sgl_pg0_addr_hi =
+ cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl));
+ if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
+ pdma_phys_bpl1 = psb->dma_phys_bpl +
+ SGL_PAGE_SIZE;
+ else
+ pdma_phys_bpl1 = 0;
+ sgl_pg_pairs->sgl_pg1_addr_lo =
+ cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
+ sgl_pg_pairs->sgl_pg1_addr_hi =
+ cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
+ /* Keep the first xri for this extent. */
+ if (pg_pairs == 0)
+ xri_start = psb->cur_iocbq.sli4_xritag;
+ sgl_pg_pairs++;
+ pg_pairs++;
+ xri_cnt++;
+
+ /*
+ * Track two exit conditions - the loop has constructed
+ * all of the caller's SGE pairs or all available
+ * resource IDs in this extent are consumed.
+ */
+ if ((xri_cnt == cnt) || (pg_pairs >= avail_cnt))
+ break;
+ }
+ rsrc_blk->rsrc_used += pg_pairs;
+ bf_set(lpfc_post_sgl_pages_xri, sgl, xri_start);
+ bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "3016 Post SCSI Extent SGL, start %d, cnt %d "
+ "blk use %d\n",
+ xri_start, pg_pairs, rsrc_blk->rsrc_used);
+ /* Perform endian conversion if necessary */
+ sgl->word0 = cpu_to_le32(sgl->word0);
+ if (!phba->sli4_hba.intr_enable)
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+ else {
+ mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
+ rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+ }
+ shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
+ &shdr->response);
+ if (rc != MBX_TIMEOUT)
+ lpfc_sli4_mbox_cmd_free(phba, mbox);
+ if (shdr_status || shdr_add_status || rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2935 POST_SGL_BLOCK mailbox command "
+ "failed status x%x add_status x%x "
+ "mbx status x%x\n",
+ shdr_status, shdr_add_status, rc);
+ return -ENXIO;
+ }
+
+ /* Post only what is requested. */
+ if (xri_cnt >= cnt)
+ break;
+ }
+ return rc;
+}
+
+/**
* lpfc_fc_frame_check - Check that this frame is a valid frame to handle
* @phba: pointer to lpfc_hba struct that the frame was received on
* @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
@@ -12137,6 +13399,28 @@ lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
}
/**
+ * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver.
+ * @phba: Pointer to HBA context object.
+ * @xri: xri id in transaction.
+ *
+ * This function validates the xri maps to the known range of XRIs allocated an
+ * used by the driver.
+ **/
+static uint16_t
+lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
+ uint16_t xri)
+{
+ int i;
+
+ for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) {
+ if (xri == phba->sli4_hba.xri_ids[i])
+ return i;
+ }
+ return NO_XRI;
+}
+
+
+/**
* lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
* @phba: Pointer to HBA context object.
* @fc_hdr: pointer to a FC frame header.
@@ -12169,9 +13453,7 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba,
"SID:x%x\n", oxid, sid);
return;
}
- if (rxid >= phba->sli4_hba.max_cfg_param.xri_base
- && rxid <= (phba->sli4_hba.max_cfg_param.max_xri
- + phba->sli4_hba.max_cfg_param.xri_base))
+ if (lpfc_sli4_xri_inrange(phba, rxid))
lpfc_set_rrq_active(phba, ndlp, rxid, oxid, 0);
/* Allocate buffer for rsp iocb */
@@ -12194,12 +13476,13 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba,
icmd->ulpBdeCount = 0;
icmd->ulpLe = 1;
icmd->ulpClass = CLASS3;
- icmd->ulpContext = ndlp->nlp_rpi;
+ icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
ctiocb->context1 = ndlp;
ctiocb->iocb_cmpl = NULL;
ctiocb->vport = phba->pport;
ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
+ ctiocb->sli4_lxritag = NO_XRI;
ctiocb->sli4_xritag = NO_XRI;
/* If the oxid maps to the FCP XRI range or if it is out of range,
@@ -12380,8 +13663,8 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
first_iocbq->iocb.ulpContext = be16_to_cpu(fc_hdr->fh_ox_id);
- first_iocbq->iocb.unsli3.rcvsli3.vpi =
- vport->vpi + vport->phba->vpi_base;
+ /* iocbq is prepped for internal consumption. Logical vpi. */
+ first_iocbq->iocb.unsli3.rcvsli3.vpi = vport->vpi;
/* put the first buffer into the first IOCBq */
first_iocbq->context2 = &seq_dmabuf->dbuf;
first_iocbq->context3 = NULL;
@@ -12461,7 +13744,7 @@ lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
&phba->sli.ring[LPFC_ELS_RING],
iocbq, fc_hdr->fh_r_ctl,
fc_hdr->fh_type))
- lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"2540 Ring %d handler: unexpected Rctl "
"x%x Type x%x received\n",
LPFC_ELS_RING,
@@ -12558,9 +13841,24 @@ lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
{
struct lpfc_rpi_hdr *rpi_page;
uint32_t rc = 0;
+ uint16_t lrpi = 0;
+
+ /* SLI4 ports that support extents do not require RPI headers. */
+ if (!phba->sli4_hba.rpi_hdrs_in_use)
+ goto exit;
+ if (phba->sli4_hba.extents_in_use)
+ return -EIO;
- /* Post all rpi memory regions to the port. */
list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
+ /*
+ * Assign the rpi headers a physical rpi only if the driver
+ * has not initialized those resources. A port reset only
+ * needs the headers posted.
+ */
+ if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
+ LPFC_RPI_RSRC_RDY)
+ rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
+
rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
if (rc != MBX_SUCCESS) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
@@ -12571,6 +13869,9 @@ lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
}
}
+ exit:
+ bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags,
+ LPFC_RPI_RSRC_RDY);
return rc;
}
@@ -12594,10 +13895,15 @@ lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
LPFC_MBOXQ_t *mboxq;
struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
uint32_t rc = 0;
- uint32_t mbox_tmo;
uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr;
+ /* SLI4 ports that support extents do not require RPI headers. */
+ if (!phba->sli4_hba.rpi_hdrs_in_use)
+ return rc;
+ if (phba->sli4_hba.extents_in_use)
+ return -EIO;
+
/* The port is notified of the header region via a mailbox command. */
mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mboxq) {
@@ -12609,16 +13915,19 @@ lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
/* Post all rpi memory regions to the port. */
hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
- mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
sizeof(struct lpfc_mbx_post_hdr_tmpl) -
sizeof(struct lpfc_sli4_cfg_mhdr),
LPFC_SLI4_MBX_EMBED);
- bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
- hdr_tmpl, rpi_page->page_count);
+
+
+ /* Post the physical rpi to the port for this rpi header. */
bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
rpi_page->start_rpi);
+ bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
+ hdr_tmpl, rpi_page->page_count);
+
hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
@@ -12653,22 +13962,21 @@ lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
int
lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
{
- int rpi;
- uint16_t max_rpi, rpi_base, rpi_limit;
- uint16_t rpi_remaining;
+ unsigned long rpi;
+ uint16_t max_rpi, rpi_limit;
+ uint16_t rpi_remaining, lrpi = 0;
struct lpfc_rpi_hdr *rpi_hdr;
max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
- rpi_base = phba->sli4_hba.max_cfg_param.rpi_base;
rpi_limit = phba->sli4_hba.next_rpi;
/*
- * The valid rpi range is not guaranteed to be zero-based. Start
- * the search at the rpi_base as reported by the port.
+ * Fetch the next logical rpi. Because this index is logical,
+ * the driver starts at 0 each time.
*/
spin_lock_irq(&phba->hbalock);
- rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, rpi_base);
- if (rpi >= rpi_limit || rpi < rpi_base)
+ rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0);
+ if (rpi >= rpi_limit)
rpi = LPFC_RPI_ALLOC_ERROR;
else {
set_bit(rpi, phba->sli4_hba.rpi_bmask);
@@ -12678,7 +13986,7 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
/*
* Don't try to allocate more rpi header regions if the device limit
- * on available rpis max has been exhausted.
+ * has been exhausted.
*/
if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
(phba->sli4_hba.rpi_count >= max_rpi)) {
@@ -12687,13 +13995,21 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
}
/*
+ * RPI header postings are not required for SLI4 ports capable of
+ * extents.
+ */
+ if (!phba->sli4_hba.rpi_hdrs_in_use) {
+ spin_unlock_irq(&phba->hbalock);
+ return rpi;
+ }
+
+ /*
* If the driver is running low on rpi resources, allocate another
* page now. Note that the next_rpi value is used because
* it represents how many are actually in use whereas max_rpi notes
* how many are supported max by the device.
*/
- rpi_remaining = phba->sli4_hba.next_rpi - rpi_base -
- phba->sli4_hba.rpi_count;
+ rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
spin_unlock_irq(&phba->hbalock);
if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
@@ -12702,6 +14018,8 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
"2002 Error Could not grow rpi "
"count\n");
} else {
+ lrpi = rpi_hdr->start_rpi;
+ rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
}
}
@@ -12751,6 +14069,8 @@ void
lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
{
kfree(phba->sli4_hba.rpi_bmask);
+ kfree(phba->sli4_hba.rpi_ids);
+ bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
}
/**
@@ -13490,6 +14810,96 @@ out:
}
/**
+ * lpfc_wr_object - write an object to the firmware
+ * @phba: HBA structure that indicates port to create a queue on.
+ * @dmabuf_list: list of dmabufs to write to the port.
+ * @size: the total byte value of the objects to write to the port.
+ * @offset: the current offset to be used to start the transfer.
+ *
+ * This routine will create a wr_object mailbox command to send to the port.
+ * the mailbox command will be constructed using the dma buffers described in
+ * @dmabuf_list to create a list of BDEs. This routine will fill in as many
+ * BDEs that the imbedded mailbox can support. The @offset variable will be
+ * used to indicate the starting offset of the transfer and will also return
+ * the offset after the write object mailbox has completed. @size is used to
+ * determine the end of the object and whether the eof bit should be set.
+ *
+ * Return 0 is successful and offset will contain the the new offset to use
+ * for the next write.
+ * Return negative value for error cases.
+ **/
+int
+lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
+ uint32_t size, uint32_t *offset)
+{
+ struct lpfc_mbx_wr_object *wr_object;
+ LPFC_MBOXQ_t *mbox;
+ int rc = 0, i = 0;
+ uint32_t shdr_status, shdr_add_status;
+ uint32_t mbox_tmo;
+ union lpfc_sli4_cfg_shdr *shdr;
+ struct lpfc_dmabuf *dmabuf;
+ uint32_t written = 0;
+
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+
+ lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_WRITE_OBJECT,
+ sizeof(struct lpfc_mbx_wr_object) -
+ sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
+
+ wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object;
+ wr_object->u.request.write_offset = *offset;
+ sprintf((uint8_t *)wr_object->u.request.object_name, "/");
+ wr_object->u.request.object_name[0] =
+ cpu_to_le32(wr_object->u.request.object_name[0]);
+ bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0);
+ list_for_each_entry(dmabuf, dmabuf_list, list) {
+ if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size)
+ break;
+ wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys);
+ wr_object->u.request.bde[i].addrHigh =
+ putPaddrHigh(dmabuf->phys);
+ if (written + SLI4_PAGE_SIZE >= size) {
+ wr_object->u.request.bde[i].tus.f.bdeSize =
+ (size - written);
+ written += (size - written);
+ bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1);
+ } else {
+ wr_object->u.request.bde[i].tus.f.bdeSize =
+ SLI4_PAGE_SIZE;
+ written += SLI4_PAGE_SIZE;
+ }
+ i++;
+ }
+ wr_object->u.request.bde_count = i;
+ bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written);
+ if (!phba->sli4_hba.intr_enable)
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+ else {
+ mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
+ rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+ }
+ /* The IOCTL status is embedded in the mailbox subheader. */
+ shdr = (union lpfc_sli4_cfg_shdr *) &wr_object->header.cfg_shdr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+ if (rc != MBX_TIMEOUT)
+ mempool_free(mbox, phba->mbox_mem_pool);
+ if (shdr_status || shdr_add_status || rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3025 Write Object mailbox failed with "
+ "status x%x add_status x%x, mbx status x%x\n",
+ shdr_status, shdr_add_status, rc);
+ rc = -ENXIO;
+ } else
+ *offset += wr_object->u.response.actual_write_length;
+ return rc;
+}
+
+/**
* lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands.
* @vport: pointer to vport data structure.
*
@@ -13644,7 +15054,7 @@ lpfc_drain_txq(struct lpfc_hba *phba)
* never happen
*/
sglq = __lpfc_clear_active_sglq(phba,
- sglq->sli4_xritag);
+ sglq->sli4_lxritag);
spin_unlock_irqrestore(&phba->hbalock, iflags);
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"2823 txq empty and txq_cnt is %d\n ",
@@ -13656,6 +15066,7 @@ lpfc_drain_txq(struct lpfc_hba *phba)
/* The xri and iocb resources secured,
* attempt to issue request
*/
+ piocbq->sli4_lxritag = sglq->sli4_lxritag;
piocbq->sli4_xritag = sglq->sli4_xritag;
if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq))
fail_msg = "to convert bpl to sgl";
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 453577c21c14..a0075b0af142 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -52,6 +52,7 @@ struct lpfc_iocbq {
struct list_head clist;
struct list_head dlist;
uint16_t iotag; /* pre-assigned IO tag */
+ uint16_t sli4_lxritag; /* logical pre-assigned XRI. */
uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */
struct lpfc_cq_event cq_event;
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 1a3cbf88f2ce..4b1703554a26 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -310,7 +310,6 @@ struct lpfc_max_cfg_param {
uint16_t vfi_base;
uint16_t vfi_used;
uint16_t max_fcfi;
- uint16_t fcfi_base;
uint16_t fcfi_used;
uint16_t max_eq;
uint16_t max_rq;
@@ -365,6 +364,11 @@ struct lpfc_pc_sli4_params {
uint8_t rqv;
};
+struct lpfc_iov {
+ uint32_t pf_number;
+ uint32_t vf_number;
+};
+
/* SLI4 HBA data structure entries */
struct lpfc_sli4_hba {
void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for
@@ -444,10 +448,13 @@ struct lpfc_sli4_hba {
uint32_t intr_enable;
struct lpfc_bmbx bmbx;
struct lpfc_max_cfg_param max_cfg_param;
+ uint16_t extents_in_use; /* must allocate resource extents. */
+ uint16_t rpi_hdrs_in_use; /* must post rpi hdrs if set. */
uint16_t next_xri; /* last_xri - max_cfg_param.xri_base = used */
uint16_t next_rpi;
uint16_t scsi_xri_max;
uint16_t scsi_xri_cnt;
+ uint16_t scsi_xri_start;
struct list_head lpfc_free_sgl_list;
struct list_head lpfc_sgl_list;
struct lpfc_sglq **lpfc_els_sgl_array;
@@ -458,7 +465,17 @@ struct lpfc_sli4_hba {
struct lpfc_sglq **lpfc_sglq_active_list;
struct list_head lpfc_rpi_hdr_list;
unsigned long *rpi_bmask;
+ uint16_t *rpi_ids;
uint16_t rpi_count;
+ struct list_head lpfc_rpi_blk_list;
+ unsigned long *xri_bmask;
+ uint16_t *xri_ids;
+ uint16_t xri_count;
+ struct list_head lpfc_xri_blk_list;
+ unsigned long *vfi_bmask;
+ uint16_t *vfi_ids;
+ uint16_t vfi_count;
+ struct list_head lpfc_vfi_blk_list;
struct lpfc_sli4_flags sli4_flags;
struct list_head sp_queue_event;
struct list_head sp_cqe_event_pool;
@@ -467,6 +484,7 @@ struct lpfc_sli4_hba {
struct list_head sp_els_xri_aborted_work_queue;
struct list_head sp_unsol_work_queue;
struct lpfc_sli4_link link_state;
+ struct lpfc_iov iov;
spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */
spinlock_t abts_sgl_list_lock; /* list of aborted els IOs */
};
@@ -490,6 +508,7 @@ struct lpfc_sglq {
enum lpfc_sgl_state state;
struct lpfc_nodelist *ndlp; /* ndlp associated with IO */
uint16_t iotag; /* pre-assigned IO tag */
+ uint16_t sli4_lxritag; /* logical pre-assigned xri. */
uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */
struct sli4_sge *sgl; /* pre-assigned SGL */
void *virt; /* virtual address. */
@@ -504,6 +523,13 @@ struct lpfc_rpi_hdr {
uint32_t start_rpi;
};
+struct lpfc_rsrc_blks {
+ struct list_head list;
+ uint16_t rsrc_start;
+ uint16_t rsrc_size;
+ uint16_t rsrc_used;
+};
+
/*
* SLI4 specific function prototypes
*/
@@ -543,8 +569,11 @@ int lpfc_sli4_post_sgl(struct lpfc_hba *, dma_addr_t, dma_addr_t, uint16_t);
int lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *);
uint16_t lpfc_sli4_next_xritag(struct lpfc_hba *);
int lpfc_sli4_post_async_mbox(struct lpfc_hba *);
-int lpfc_sli4_post_sgl_list(struct lpfc_hba *phba);
+int lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba);
+int lpfc_sli4_post_els_sgl_list_ext(struct lpfc_hba *phba);
int lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *, struct list_head *, int);
+int lpfc_sli4_post_scsi_sgl_blk_ext(struct lpfc_hba *, struct list_head *,
+ int);
struct lpfc_cq_event *__lpfc_sli4_cq_event_alloc(struct lpfc_hba *);
struct lpfc_cq_event *lpfc_sli4_cq_event_alloc(struct lpfc_hba *);
void __lpfc_sli4_cq_event_release(struct lpfc_hba *, struct lpfc_cq_event *);
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index 30ba5440c67a..1feb551a57bc 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -83,7 +83,7 @@ inline void lpfc_vport_set_state(struct lpfc_vport *vport,
static int
lpfc_alloc_vpi(struct lpfc_hba *phba)
{
- int vpi;
+ unsigned long vpi;
spin_lock_irq(&phba->hbalock);
/* Start at bit 1 because vpi zero is reserved for the physical port */
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 046dcc672ec1..7370c084b178 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -33,9 +33,9 @@
/*
* MegaRAID SAS Driver meta data
*/
-#define MEGASAS_VERSION "00.00.05.34-rc1"
-#define MEGASAS_RELDATE "Feb. 24, 2011"
-#define MEGASAS_EXT_VERSION "Thu. Feb. 24 17:00:00 PDT 2011"
+#define MEGASAS_VERSION "00.00.05.38-rc1"
+#define MEGASAS_RELDATE "May. 11, 2011"
+#define MEGASAS_EXT_VERSION "Wed. May. 11 17:00:00 PDT 2011"
/*
* Device IDs
@@ -76,8 +76,8 @@
#define MFI_STATE_READY 0xB0000000
#define MFI_STATE_OPERATIONAL 0xC0000000
#define MFI_STATE_FAULT 0xF0000000
-#define MFI_RESET_REQUIRED 0x00000001
-
+#define MFI_RESET_REQUIRED 0x00000001
+#define MFI_RESET_ADAPTER 0x00000002
#define MEGAMFI_FRAME_SIZE 64
/*
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 89c623ebadbc..2d8cdce7b2f5 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -18,7 +18,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* FILE: megaraid_sas_base.c
- * Version : v00.00.05.34-rc1
+ * Version : v00.00.05.38-rc1
*
* Authors: LSI Corporation
* Sreenivas Bagalkote
@@ -437,15 +437,18 @@ megasas_read_fw_status_reg_ppc(struct megasas_register_set __iomem * regs)
static int
megasas_clear_intr_ppc(struct megasas_register_set __iomem * regs)
{
- u32 status;
+ u32 status, mfiStatus = 0;
+
/*
* Check if it is our interrupt
*/
status = readl(&regs->outbound_intr_status);
- if (!(status & MFI_REPLY_1078_MESSAGE_INTERRUPT)) {
- return 0;
- }
+ if (status & MFI_REPLY_1078_MESSAGE_INTERRUPT)
+ mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
+
+ if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT)
+ mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
/*
* Clear the interrupt by writing back the same value
@@ -455,8 +458,9 @@ megasas_clear_intr_ppc(struct megasas_register_set __iomem * regs)
/* Dummy readl to force pci flush */
readl(&regs->outbound_doorbell_clear);
- return 1;
+ return mfiStatus;
}
+
/**
* megasas_fire_cmd_ppc - Sends command to the FW
* @frame_phys_addr : Physical address of cmd
@@ -477,17 +481,6 @@ megasas_fire_cmd_ppc(struct megasas_instance *instance,
}
/**
- * megasas_adp_reset_ppc - For controller reset
- * @regs: MFI register set
- */
-static int
-megasas_adp_reset_ppc(struct megasas_instance *instance,
- struct megasas_register_set __iomem *regs)
-{
- return 0;
-}
-
-/**
* megasas_check_reset_ppc - For controller reset check
* @regs: MFI register set
*/
@@ -495,8 +488,12 @@ static int
megasas_check_reset_ppc(struct megasas_instance *instance,
struct megasas_register_set __iomem *regs)
{
+ if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL)
+ return 1;
+
return 0;
}
+
static struct megasas_instance_template megasas_instance_template_ppc = {
.fire_cmd = megasas_fire_cmd_ppc,
@@ -504,7 +501,7 @@ static struct megasas_instance_template megasas_instance_template_ppc = {
.disable_intr = megasas_disable_intr_ppc,
.clear_intr = megasas_clear_intr_ppc,
.read_fw_status_reg = megasas_read_fw_status_reg_ppc,
- .adp_reset = megasas_adp_reset_ppc,
+ .adp_reset = megasas_adp_reset_xscale,
.check_reset = megasas_check_reset_ppc,
.service_isr = megasas_isr,
.tasklet = megasas_complete_cmd_dpc,
@@ -620,6 +617,9 @@ static int
megasas_check_reset_skinny(struct megasas_instance *instance,
struct megasas_register_set __iomem *regs)
{
+ if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL)
+ return 1;
+
return 0;
}
@@ -3454,7 +3454,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
{
u32 max_sectors_1;
u32 max_sectors_2;
- u32 tmp_sectors;
+ u32 tmp_sectors, msix_enable;
struct megasas_register_set __iomem *reg_set;
struct megasas_ctrl_info *ctrl_info;
unsigned long bar_list;
@@ -3507,6 +3507,13 @@ static int megasas_init_fw(struct megasas_instance *instance)
if (megasas_transition_to_ready(instance))
goto fail_ready_state;
+ /* Check if MSI-X is supported while in ready state */
+ msix_enable = (instance->instancet->read_fw_status_reg(reg_set) &
+ 0x4000000) >> 0x1a;
+ if (msix_enable && !msix_disable &&
+ !pci_enable_msix(instance->pdev, &instance->msixentry, 1))
+ instance->msi_flag = 1;
+
/* Get operational params, sge flags, send init cmd to controller */
if (instance->instancet->init_adapter(instance))
goto fail_init_adapter;
@@ -4076,14 +4083,6 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
else
INIT_WORK(&instance->work_init, process_fw_state_change_wq);
- /* Try to enable MSI-X */
- if ((instance->pdev->device != PCI_DEVICE_ID_LSI_SAS1078R) &&
- (instance->pdev->device != PCI_DEVICE_ID_LSI_SAS1078DE) &&
- (instance->pdev->device != PCI_DEVICE_ID_LSI_VERDE_ZCR) &&
- !msix_disable && !pci_enable_msix(instance->pdev,
- &instance->msixentry, 1))
- instance->msi_flag = 1;
-
/*
* Initialize MFI Firmware
*/
@@ -4116,6 +4115,14 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
megasas_mgmt_info.max_index++;
/*
+ * Register with SCSI mid-layer
+ */
+ if (megasas_io_attach(instance))
+ goto fail_io_attach;
+
+ instance->unload = 0;
+
+ /*
* Initiate AEN (Asynchronous Event Notification)
*/
if (megasas_start_aen(instance)) {
@@ -4123,13 +4130,6 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
goto fail_start_aen;
}
- /*
- * Register with SCSI mid-layer
- */
- if (megasas_io_attach(instance))
- goto fail_io_attach;
-
- instance->unload = 0;
return 0;
fail_start_aen:
@@ -4332,10 +4332,6 @@ megasas_resume(struct pci_dev *pdev)
if (megasas_set_dma_mask(pdev))
goto fail_set_dma_mask;
- /* Now re-enable MSI-X */
- if (instance->msi_flag)
- pci_enable_msix(instance->pdev, &instance->msixentry, 1);
-
/*
* Initialize MFI Firmware
*/
@@ -4348,6 +4344,10 @@ megasas_resume(struct pci_dev *pdev)
if (megasas_transition_to_ready(instance))
goto fail_ready_state;
+ /* Now re-enable MSI-X */
+ if (instance->msi_flag)
+ pci_enable_msix(instance->pdev, &instance->msixentry, 1);
+
switch (instance->pdev->device) {
case PCI_DEVICE_ID_LSI_FUSION:
{
@@ -4384,12 +4384,6 @@ megasas_resume(struct pci_dev *pdev)
instance->instancet->enable_intr(instance->reg_set);
- /*
- * Initiate AEN (Asynchronous Event Notification)
- */
- if (megasas_start_aen(instance))
- printk(KERN_ERR "megasas: Start AEN failed\n");
-
/* Initialize the cmd completion timer */
if (poll_mode_io)
megasas_start_timer(instance, &instance->io_completion_timer,
@@ -4397,6 +4391,12 @@ megasas_resume(struct pci_dev *pdev)
MEGASAS_COMPLETION_TIMER_INTERVAL);
instance->unload = 0;
+ /*
+ * Initiate AEN (Asynchronous Event Notification)
+ */
+ if (megasas_start_aen(instance))
+ printk(KERN_ERR "megasas: Start AEN failed\n");
+
return 0;
fail_irq:
@@ -4527,6 +4527,11 @@ static void megasas_shutdown(struct pci_dev *pdev)
instance->unload = 1;
megasas_flush_cache(instance);
megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
+ instance->instancet->disable_intr(instance->reg_set);
+ free_irq(instance->msi_flag ? instance->msixentry.vector :
+ instance->pdev->irq, instance);
+ if (instance->msi_flag)
+ pci_disable_msix(instance->pdev);
}
/**
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 145a8cffb1fa..f13e7abd345a 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -696,22 +696,6 @@ fail_get_cmd:
}
/*
- * megasas_return_cmd_for_smid - Returns a cmd_fusion for a SMID
- * @instance: Adapter soft state
- *
- */
-void
-megasas_return_cmd_for_smid(struct megasas_instance *instance, u16 smid)
-{
- struct fusion_context *fusion;
- struct megasas_cmd_fusion *cmd;
-
- fusion = instance->ctrl_context;
- cmd = fusion->cmd_list[smid - 1];
- megasas_return_cmd_fusion(instance, cmd);
-}
-
-/*
* megasas_get_ld_map_info - Returns FW's ld_map structure
* @instance: Adapter soft state
* @pend: Pend the command or not
@@ -1153,7 +1137,7 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
u64 start_blk = io_info->pdBlock;
u8 *cdb = io_request->CDB.CDB32;
u32 num_blocks = io_info->numBlocks;
- u8 opcode, flagvals, groupnum, control;
+ u8 opcode = 0, flagvals = 0, groupnum = 0, control = 0;
/* Check if T10 PI (DIF) is enabled for this LD */
ld = MR_TargetIdToLdGet(io_info->ldTgtId, local_map_ptr);
@@ -1235,7 +1219,46 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
cdb[8] = (u8)(num_blocks & 0xff);
cdb[7] = (u8)((num_blocks >> 8) & 0xff);
+ io_request->IoFlags = 10; /* Specify 10-byte cdb */
cdb_len = 10;
+ } else if ((cdb_len < 16) && (start_blk > 0xffffffff)) {
+ /* Convert to 16 byte CDB for large LBA's */
+ switch (cdb_len) {
+ case 6:
+ opcode = cdb[0] == READ_6 ? READ_16 : WRITE_16;
+ control = cdb[5];
+ break;
+ case 10:
+ opcode =
+ cdb[0] == READ_10 ? READ_16 : WRITE_16;
+ flagvals = cdb[1];
+ groupnum = cdb[6];
+ control = cdb[9];
+ break;
+ case 12:
+ opcode =
+ cdb[0] == READ_12 ? READ_16 : WRITE_16;
+ flagvals = cdb[1];
+ groupnum = cdb[10];
+ control = cdb[11];
+ break;
+ }
+
+ memset(cdb, 0, sizeof(io_request->CDB.CDB32));
+
+ cdb[0] = opcode;
+ cdb[1] = flagvals;
+ cdb[14] = groupnum;
+ cdb[15] = control;
+
+ /* Transfer length */
+ cdb[13] = (u8)(num_blocks & 0xff);
+ cdb[12] = (u8)((num_blocks >> 8) & 0xff);
+ cdb[11] = (u8)((num_blocks >> 16) & 0xff);
+ cdb[10] = (u8)((num_blocks >> 24) & 0xff);
+
+ io_request->IoFlags = 16; /* Specify 16-byte cdb */
+ cdb_len = 16;
}
/* Normal case, just load LBA here */
@@ -2026,17 +2049,11 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
struct fusion_context *fusion;
struct megasas_cmd *cmd_mfi;
union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
- u32 host_diag, abs_state;
+ u32 host_diag, abs_state, status_reg, reset_adapter;
instance = (struct megasas_instance *)shost->hostdata;
fusion = instance->ctrl_context;
- mutex_lock(&instance->reset_mutex);
- set_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
- instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT;
- instance->instancet->disable_intr(instance->reg_set);
- msleep(1000);
-
if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
printk(KERN_WARNING "megaraid_sas: Hardware critical error, "
"returning FAILED.\n");
@@ -2044,6 +2061,12 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
goto out;
}
+ mutex_lock(&instance->reset_mutex);
+ set_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
+ instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT;
+ instance->instancet->disable_intr(instance->reg_set);
+ msleep(1000);
+
/* First try waiting for commands to complete */
if (megasas_wait_for_outstanding_fusion(instance)) {
printk(KERN_WARNING "megaraid_sas: resetting fusion "
@@ -2060,7 +2083,12 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
}
}
- if (instance->disableOnlineCtrlReset == 1) {
+ status_reg = instance->instancet->read_fw_status_reg(
+ instance->reg_set);
+ abs_state = status_reg & MFI_STATE_MASK;
+ reset_adapter = status_reg & MFI_RESET_ADAPTER;
+ if (instance->disableOnlineCtrlReset ||
+ (abs_state == MFI_STATE_FAULT && !reset_adapter)) {
/* Reset not supported, kill adapter */
printk(KERN_WARNING "megaraid_sas: Reset not supported"
", killing adapter.\n");
@@ -2089,6 +2117,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
/* Check that the diag write enable (DRWE) bit is on */
host_diag = readl(&instance->reg_set->fusion_host_diag);
+ retry = 0;
while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) {
msleep(100);
host_diag =
@@ -2126,7 +2155,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
abs_state =
instance->instancet->read_fw_status_reg(
- instance->reg_set);
+ instance->reg_set) & MFI_STATE_MASK;
retry = 0;
while ((abs_state <= MFI_STATE_FW_INIT) &&
@@ -2134,7 +2163,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
msleep(100);
abs_state =
instance->instancet->read_fw_status_reg(
- instance->reg_set);
+ instance->reg_set) & MFI_STATE_MASK;
}
if (abs_state <= MFI_STATE_FW_INIT) {
printk(KERN_WARNING "megaraid_sas: firmware "
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
index 2a3c05f6db8b..dcc289c25459 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
@@ -69,11 +69,11 @@
#define MPT2SAS_DRIVER_NAME "mpt2sas"
#define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>"
#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver"
-#define MPT2SAS_DRIVER_VERSION "08.100.00.01"
+#define MPT2SAS_DRIVER_VERSION "08.100.00.02"
#define MPT2SAS_MAJOR_VERSION 08
#define MPT2SAS_MINOR_VERSION 100
#define MPT2SAS_BUILD_VERSION 00
-#define MPT2SAS_RELEASE_VERSION 01
+#define MPT2SAS_RELEASE_VERSION 02
/*
* Set MPT2SAS_SG_DEPTH value based on user input.
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index f12e02358d6d..a7dbc6825f5f 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -113,6 +113,7 @@ struct sense_info {
};
+#define MPT2SAS_TURN_ON_FAULT_LED (0xFFFC)
#define MPT2SAS_RESCAN_AFTER_HOST_RESET (0xFFFF)
/**
@@ -121,6 +122,7 @@ struct sense_info {
* @work: work object (ioc->fault_reset_work_q)
* @cancel_pending_work: flag set during reset handling
* @ioc: per adapter object
+ * @device_handle: device handle
* @VF_ID: virtual function id
* @VP_ID: virtual port id
* @ignore: flag meaning this event has been marked to ignore
@@ -134,6 +136,7 @@ struct fw_event_work {
u8 cancel_pending_work;
struct delayed_work delayed_work;
struct MPT2SAS_ADAPTER *ioc;
+ u16 device_handle;
u8 VF_ID;
u8 VP_ID;
u8 ignore;
@@ -3499,6 +3502,7 @@ _scsih_setup_eedp(struct scsi_cmnd *scmd, Mpi2SCSIIORequest_t *mpi_request)
switch (prot_type) {
case SCSI_PROT_DIF_TYPE1:
+ case SCSI_PROT_DIF_TYPE2:
/*
* enable ref/guard checking
@@ -3511,13 +3515,6 @@ _scsih_setup_eedp(struct scsi_cmnd *scmd, Mpi2SCSIIORequest_t *mpi_request)
cpu_to_be32(scsi_get_lba(scmd));
break;
- case SCSI_PROT_DIF_TYPE2:
-
- eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
- MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
- MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
- break;
-
case SCSI_PROT_DIF_TYPE3:
/*
@@ -4047,17 +4044,75 @@ _scsih_scsi_ioc_info(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
#endif
/**
- * _scsih_smart_predicted_fault - illuminate Fault LED
+ * _scsih_turn_on_fault_led - illuminate Fault LED
* @ioc: per adapter object
* @handle: device handle
+ * Context: process
*
* Return nothing.
*/
static void
-_scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle)
+_scsih_turn_on_fault_led(struct MPT2SAS_ADAPTER *ioc, u16 handle)
{
Mpi2SepReply_t mpi_reply;
Mpi2SepRequest_t mpi_request;
+
+ memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
+ mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
+ mpi_request.SlotStatus =
+ cpu_to_le32(MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT);
+ mpi_request.DevHandle = cpu_to_le16(handle);
+ mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS;
+ if ((mpt2sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
+ &mpi_request)) != 0) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name,
+ __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "enclosure_processor: "
+ "ioc_status (0x%04x), loginfo(0x%08x)\n", ioc->name,
+ le16_to_cpu(mpi_reply.IOCStatus),
+ le32_to_cpu(mpi_reply.IOCLogInfo)));
+ return;
+ }
+}
+
+/**
+ * _scsih_send_event_to_turn_on_fault_led - fire delayed event
+ * @ioc: per adapter object
+ * @handle: device handle
+ * Context: interrupt.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_send_event_to_turn_on_fault_led(struct MPT2SAS_ADAPTER *ioc, u16 handle)
+{
+ struct fw_event_work *fw_event;
+
+ fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC);
+ if (!fw_event)
+ return;
+ fw_event->event = MPT2SAS_TURN_ON_FAULT_LED;
+ fw_event->device_handle = handle;
+ fw_event->ioc = ioc;
+ _scsih_fw_event_add(ioc, fw_event);
+}
+
+/**
+ * _scsih_smart_predicted_fault - process smart errors
+ * @ioc: per adapter object
+ * @handle: device handle
+ * Context: interrupt.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle)
+{
struct scsi_target *starget;
struct MPT2SAS_TARGET *sas_target_priv_data;
Mpi2EventNotificationReply_t *event_reply;
@@ -4084,30 +4139,8 @@ _scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle)
starget_printk(KERN_WARNING, starget, "predicted fault\n");
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
- if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) {
- memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
- mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
- mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
- mpi_request.SlotStatus =
- cpu_to_le32(MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT);
- mpi_request.DevHandle = cpu_to_le16(handle);
- mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS;
- if ((mpt2sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
- &mpi_request)) != 0) {
- printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- return;
- }
-
- if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
- dewtprintk(ioc, printk(MPT2SAS_INFO_FMT
- "enclosure_processor: ioc_status (0x%04x), "
- "loginfo(0x%08x)\n", ioc->name,
- le16_to_cpu(mpi_reply.IOCStatus),
- le32_to_cpu(mpi_reply.IOCLogInfo)));
- return;
- }
- }
+ if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM)
+ _scsih_send_event_to_turn_on_fault_led(ioc, handle);
/* insert into event log */
sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
@@ -6753,6 +6786,9 @@ _firmware_event_work(struct work_struct *work)
}
switch (fw_event->event) {
+ case MPT2SAS_TURN_ON_FAULT_LED:
+ _scsih_turn_on_fault_led(ioc, fw_event->device_handle);
+ break;
case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
_scsih_sas_topology_change_event(ioc, fw_event);
break;
diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c
index 58f5be4740e9..de0b1a704fb5 100644
--- a/drivers/scsi/osst.c
+++ b/drivers/scsi/osst.c
@@ -4698,12 +4698,14 @@ static int __os_scsi_tape_open(struct inode * inode, struct file * filp)
break;
if ((SRpnt->sense[2] & 0x0f) == UNIT_ATTENTION) {
+ int j;
+
STp->pos_unknown = 0;
STp->partition = STp->new_partition = 0;
if (STp->can_partitions)
STp->nbr_partitions = 1; /* This guess will be updated later if necessary */
- for (i=0; i < ST_NBR_PARTITIONS; i++) {
- STps = &(STp->ps[i]);
+ for (j = 0; j < ST_NBR_PARTITIONS; j++) {
+ STps = &(STp->ps[j]);
STps->rw = ST_IDLE;
STps->eof = ST_NOEOF;
STps->at_sm = 0;
diff --git a/drivers/scsi/qla4xxx/Makefile b/drivers/scsi/qla4xxx/Makefile
index 0339ff03a535..252523d7847e 100644
--- a/drivers/scsi/qla4xxx/Makefile
+++ b/drivers/scsi/qla4xxx/Makefile
@@ -1,5 +1,5 @@
qla4xxx-y := ql4_os.o ql4_init.o ql4_mbx.o ql4_iocb.o ql4_isr.o \
- ql4_nx.o ql4_nvram.o ql4_dbg.o
+ ql4_nx.o ql4_nvram.o ql4_dbg.o ql4_attr.o
obj-$(CONFIG_SCSI_QLA_ISCSI) += qla4xxx.o
diff --git a/drivers/scsi/qla4xxx/ql4_attr.c b/drivers/scsi/qla4xxx/ql4_attr.c
new file mode 100644
index 000000000000..864d018631c0
--- /dev/null
+++ b/drivers/scsi/qla4xxx/ql4_attr.c
@@ -0,0 +1,69 @@
+/*
+ * QLogic iSCSI HBA Driver
+ * Copyright (c) 2003-2011 QLogic Corporation
+ *
+ * See LICENSE.qla4xxx for copyright and licensing details.
+ */
+
+#include "ql4_def.h"
+#include "ql4_glbl.h"
+#include "ql4_dbg.h"
+
+/* Scsi_Host attributes. */
+static ssize_t
+qla4xxx_fw_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
+
+ if (is_qla8022(ha))
+ return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n",
+ ha->firmware_version[0],
+ ha->firmware_version[1],
+ ha->patch_number, ha->build_number);
+ else
+ return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d.%02d\n",
+ ha->firmware_version[0],
+ ha->firmware_version[1],
+ ha->patch_number, ha->build_number);
+}
+
+static ssize_t
+qla4xxx_serial_num_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
+ return snprintf(buf, PAGE_SIZE, "%s\n", ha->serial_number);
+}
+
+static ssize_t
+qla4xxx_iscsi_version_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
+ return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->iscsi_major,
+ ha->iscsi_minor);
+}
+
+static ssize_t
+qla4xxx_optrom_version_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
+ return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d.%02d\n",
+ ha->bootload_major, ha->bootload_minor,
+ ha->bootload_patch, ha->bootload_build);
+}
+
+static DEVICE_ATTR(fw_version, S_IRUGO, qla4xxx_fw_version_show, NULL);
+static DEVICE_ATTR(serial_num, S_IRUGO, qla4xxx_serial_num_show, NULL);
+static DEVICE_ATTR(iscsi_version, S_IRUGO, qla4xxx_iscsi_version_show, NULL);
+static DEVICE_ATTR(optrom_version, S_IRUGO, qla4xxx_optrom_version_show, NULL);
+
+struct device_attribute *qla4xxx_host_attrs[] = {
+ &dev_attr_fw_version,
+ &dev_attr_serial_num,
+ &dev_attr_iscsi_version,
+ &dev_attr_optrom_version,
+ NULL,
+};
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index 4757878d59dd..473c5c872b39 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -115,7 +115,7 @@
#define INVALID_ENTRY 0xFFFF
#define MAX_CMDS_TO_RISC 1024
#define MAX_SRBS MAX_CMDS_TO_RISC
-#define MBOX_AEN_REG_COUNT 5
+#define MBOX_AEN_REG_COUNT 8
#define MAX_INIT_RETRIES 5
/*
@@ -368,7 +368,6 @@ struct scsi_qla_host {
#define AF_INIT_DONE 1 /* 0x00000002 */
#define AF_MBOX_COMMAND 2 /* 0x00000004 */
#define AF_MBOX_COMMAND_DONE 3 /* 0x00000008 */
-#define AF_DPC_SCHEDULED 5 /* 0x00000020 */
#define AF_INTERRUPTS_ON 6 /* 0x00000040 */
#define AF_GET_CRASH_RECORD 7 /* 0x00000080 */
#define AF_LINK_UP 8 /* 0x00000100 */
@@ -584,6 +583,14 @@ struct scsi_qla_host {
uint32_t nx_reset_timeout;
struct completion mbx_intr_comp;
+
+ /* --- From About Firmware --- */
+ uint16_t iscsi_major;
+ uint16_t iscsi_minor;
+ uint16_t bootload_major;
+ uint16_t bootload_minor;
+ uint16_t bootload_patch;
+ uint16_t bootload_build;
};
static inline int is_ipv4_enabled(struct scsi_qla_host *ha)
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h
index 31e2bf97198c..01082aa77098 100644
--- a/drivers/scsi/qla4xxx/ql4_fw.h
+++ b/drivers/scsi/qla4xxx/ql4_fw.h
@@ -690,6 +690,29 @@ struct mbx_sys_info {
uint8_t reserved[12]; /* 34-3f */
};
+struct about_fw_info {
+ uint16_t fw_major; /* 00 - 01 */
+ uint16_t fw_minor; /* 02 - 03 */
+ uint16_t fw_patch; /* 04 - 05 */
+ uint16_t fw_build; /* 06 - 07 */
+ uint8_t fw_build_date[16]; /* 08 - 17 ASCII String */
+ uint8_t fw_build_time[16]; /* 18 - 27 ASCII String */
+ uint8_t fw_build_user[16]; /* 28 - 37 ASCII String */
+ uint16_t fw_load_source; /* 38 - 39 */
+ /* 1 = Flash Primary,
+ 2 = Flash Secondary,
+ 3 = Host Download
+ */
+ uint8_t reserved1[6]; /* 3A - 3F */
+ uint16_t iscsi_major; /* 40 - 41 */
+ uint16_t iscsi_minor; /* 42 - 43 */
+ uint16_t bootload_major; /* 44 - 45 */
+ uint16_t bootload_minor; /* 46 - 47 */
+ uint16_t bootload_patch; /* 48 - 49 */
+ uint16_t bootload_build; /* 4A - 4B */
+ uint8_t reserved2[180]; /* 4C - FF */
+};
+
struct crash_record {
uint16_t fw_major_version; /* 00 - 01 */
uint16_t fw_minor_version; /* 02 - 03 */
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index cc53e3fbd78c..a53a256c1f8d 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -61,7 +61,7 @@ struct ddb_entry *qla4xxx_alloc_sess(struct scsi_qla_host *ha);
int qla4xxx_add_sess(struct ddb_entry *);
void qla4xxx_destroy_sess(struct ddb_entry *ddb_entry);
int qla4xxx_is_nvram_configuration_valid(struct scsi_qla_host *ha);
-int qla4xxx_get_fw_version(struct scsi_qla_host * ha);
+int qla4xxx_about_firmware(struct scsi_qla_host *ha);
void qla4xxx_interrupt_service_routine(struct scsi_qla_host *ha,
uint32_t intr_status);
int qla4xxx_init_rings(struct scsi_qla_host *ha);
@@ -139,4 +139,5 @@ extern int ql4xextended_error_logging;
extern int ql4xdontresethba;
extern int ql4xenablemsix;
+extern struct device_attribute *qla4xxx_host_attrs[];
#endif /* _QLA4x_GBL_H */
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 48e2241ddaf4..42ed5db2d530 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -1275,7 +1275,7 @@ int qla4xxx_initialize_adapter(struct scsi_qla_host *ha,
if (ha->isp_ops->start_firmware(ha) == QLA_ERROR)
goto exit_init_hba;
- if (qla4xxx_get_fw_version(ha) == QLA_ERROR)
+ if (qla4xxx_about_firmware(ha) == QLA_ERROR)
goto exit_init_hba;
if (ha->isp_ops->get_sys_info(ha) == QLA_ERROR)
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index 2f40ac761cd4..0e72921c752d 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -25,9 +25,14 @@ static void qla4xxx_copy_sense(struct scsi_qla_host *ha,
memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
sense_len = le16_to_cpu(sts_entry->senseDataByteCnt);
- if (sense_len == 0)
+ if (sense_len == 0) {
+ DEBUG2(ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%d:%d: %s:"
+ " sense len 0\n", ha->host_no,
+ cmd->device->channel, cmd->device->id,
+ cmd->device->lun, __func__));
+ ha->status_srb = NULL;
return;
-
+ }
/* Save total available sense length,
* not to exceed cmd's sense buffer size */
sense_len = min_t(uint16_t, sense_len, SCSI_SENSE_BUFFERSIZE);
@@ -541,6 +546,7 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
case MBOX_ASTS_UNSOLICITED_PDU_RECEIVED: /* Connection mode */
case MBOX_ASTS_IPSEC_SYSTEM_FATAL_ERROR:
case MBOX_ASTS_SUBNET_STATE_CHANGE:
+ case MBOX_ASTS_DUPLICATE_IP:
/* No action */
DEBUG2(printk("scsi%ld: AEN %04x\n", ha->host_no,
mbox_status));
@@ -593,11 +599,13 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
mbox_sts[i];
/* print debug message */
- DEBUG2(printk("scsi%ld: AEN[%d] %04x queued"
- " mb1:0x%x mb2:0x%x mb3:0x%x mb4:0x%x\n",
- ha->host_no, ha->aen_in, mbox_sts[0],
- mbox_sts[1], mbox_sts[2], mbox_sts[3],
- mbox_sts[4]));
+ DEBUG2(printk("scsi%ld: AEN[%d] %04x queued "
+ "mb1:0x%x mb2:0x%x mb3:0x%x "
+ "mb4:0x%x mb5:0x%x\n",
+ ha->host_no, ha->aen_in,
+ mbox_sts[0], mbox_sts[1],
+ mbox_sts[2], mbox_sts[3],
+ mbox_sts[4], mbox_sts[5]));
/* advance pointer */
ha->aen_in++;
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index d78b58dc5011..fce8289e9752 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -86,22 +86,8 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
msleep(10);
}
- /* To prevent overwriting mailbox registers for a command that has
- * not yet been serviced, check to see if an active command
- * (AEN, IOCB, etc.) is interrupting, then service it.
- * -----------------------------------------------------------------
- */
spin_lock_irqsave(&ha->hardware_lock, flags);
- if (!is_qla8022(ha)) {
- intr_status = readl(&ha->reg->ctrl_status);
- if (intr_status & CSR_SCSI_PROCESSOR_INTR) {
- /* Service existing interrupt */
- ha->isp_ops->interrupt_service_routine(ha, intr_status);
- clear_bit(AF_MBOX_COMMAND_DONE, &ha->flags);
- }
- }
-
ha->mbox_status_count = outCount;
for (i = 0; i < outCount; i++)
ha->mbox_status[i] = 0;
@@ -1057,38 +1043,65 @@ int qla4xxx_get_flash(struct scsi_qla_host * ha, dma_addr_t dma_addr,
}
/**
- * qla4xxx_get_fw_version - gets firmware version
+ * qla4xxx_about_firmware - gets FW, iscsi draft and boot loader version
* @ha: Pointer to host adapter structure.
*
- * Retrieves the firmware version on HBA. In QLA4010, mailboxes 2 & 3 may
- * hold an address for data. Make sure that we write 0 to those mailboxes,
- * if unused.
+ * Retrieves the FW version, iSCSI draft version & bootloader version of HBA.
+ * Mailboxes 2 & 3 may hold an address for data. Make sure that we write 0 to
+ * those mailboxes, if unused.
**/
-int qla4xxx_get_fw_version(struct scsi_qla_host * ha)
+int qla4xxx_about_firmware(struct scsi_qla_host *ha)
{
+ struct about_fw_info *about_fw = NULL;
+ dma_addr_t about_fw_dma;
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
+ int status = QLA_ERROR;
+
+ about_fw = dma_alloc_coherent(&ha->pdev->dev,
+ sizeof(struct about_fw_info),
+ &about_fw_dma, GFP_KERNEL);
+ if (!about_fw) {
+ DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Unable to alloc memory "
+ "for about_fw\n", __func__));
+ return status;
+ }
- /* Get firmware version. */
+ memset(about_fw, 0, sizeof(struct about_fw_info));
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_sts));
mbox_cmd[0] = MBOX_CMD_ABOUT_FW;
-
- if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0], &mbox_sts[0]) !=
- QLA_SUCCESS) {
- DEBUG2(printk("scsi%ld: %s: MBOX_CMD_ABOUT_FW failed w/ "
- "status %04X\n", ha->host_no, __func__, mbox_sts[0]));
- return QLA_ERROR;
+ mbox_cmd[2] = LSDW(about_fw_dma);
+ mbox_cmd[3] = MSDW(about_fw_dma);
+ mbox_cmd[4] = sizeof(struct about_fw_info);
+
+ status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, MBOX_REG_COUNT,
+ &mbox_cmd[0], &mbox_sts[0]);
+ if (status != QLA_SUCCESS) {
+ DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: MBOX_CMD_ABOUT_FW "
+ "failed w/ status %04X\n", __func__,
+ mbox_sts[0]));
+ goto exit_about_fw;
}
- /* Save firmware version information. */
- ha->firmware_version[0] = mbox_sts[1];
- ha->firmware_version[1] = mbox_sts[2];
- ha->patch_number = mbox_sts[3];
- ha->build_number = mbox_sts[4];
+ /* Save version information. */
+ ha->firmware_version[0] = le16_to_cpu(about_fw->fw_major);
+ ha->firmware_version[1] = le16_to_cpu(about_fw->fw_minor);
+ ha->patch_number = le16_to_cpu(about_fw->fw_patch);
+ ha->build_number = le16_to_cpu(about_fw->fw_build);
+ ha->iscsi_major = le16_to_cpu(about_fw->iscsi_major);
+ ha->iscsi_minor = le16_to_cpu(about_fw->iscsi_minor);
+ ha->bootload_major = le16_to_cpu(about_fw->bootload_major);
+ ha->bootload_minor = le16_to_cpu(about_fw->bootload_minor);
+ ha->bootload_patch = le16_to_cpu(about_fw->bootload_patch);
+ ha->bootload_build = le16_to_cpu(about_fw->bootload_build);
+ status = QLA_SUCCESS;
- return QLA_SUCCESS;
+exit_about_fw:
+ dma_free_coherent(&ha->pdev->dev, sizeof(struct about_fw_info),
+ about_fw, about_fw_dma);
+ return status;
}
static int qla4xxx_get_default_ddb(struct scsi_qla_host *ha,
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index 03e522b2fe0b..fdfe27b38698 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -964,12 +964,26 @@ qla4_8xxx_pinit_from_rom(struct scsi_qla_host *ha, int verbose)
/* Halt all the indiviual PEGs and other blocks of the ISP */
qla4_8xxx_rom_lock(ha);
- /* mask all niu interrupts */
+ /* disable all I2Q */
+ qla4_8xxx_wr_32(ha, QLA82XX_CRB_I2Q + 0x10, 0x0);
+ qla4_8xxx_wr_32(ha, QLA82XX_CRB_I2Q + 0x14, 0x0);
+ qla4_8xxx_wr_32(ha, QLA82XX_CRB_I2Q + 0x18, 0x0);
+ qla4_8xxx_wr_32(ha, QLA82XX_CRB_I2Q + 0x1c, 0x0);
+ qla4_8xxx_wr_32(ha, QLA82XX_CRB_I2Q + 0x20, 0x0);
+ qla4_8xxx_wr_32(ha, QLA82XX_CRB_I2Q + 0x24, 0x0);
+
+ /* disable all niu interrupts */
qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x40, 0xff);
/* disable xge rx/tx */
qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x70000, 0x00);
/* disable xg1 rx/tx */
qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x80000, 0x00);
+ /* disable sideband mac */
+ qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x90000, 0x00);
+ /* disable ap0 mac */
+ qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0xa0000, 0x00);
+ /* disable ap1 mac */
+ qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0xb0000, 0x00);
/* halt sre */
val = qla4_8xxx_rd_32(ha, QLA82XX_CRB_SRE + 0x1000);
@@ -984,6 +998,7 @@ qla4_8xxx_pinit_from_rom(struct scsi_qla_host *ha, int verbose)
qla4_8xxx_wr_32(ha, QLA82XX_CRB_TIMER + 0x10, 0x0);
qla4_8xxx_wr_32(ha, QLA82XX_CRB_TIMER + 0x18, 0x0);
qla4_8xxx_wr_32(ha, QLA82XX_CRB_TIMER + 0x100, 0x0);
+ qla4_8xxx_wr_32(ha, QLA82XX_CRB_TIMER + 0x200, 0x0);
/* halt pegs */
qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x3c, 1);
@@ -991,9 +1006,9 @@ qla4_8xxx_pinit_from_rom(struct scsi_qla_host *ha, int verbose)
qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_2 + 0x3c, 1);
qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_3 + 0x3c, 1);
qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_4 + 0x3c, 1);
+ msleep(5);
/* big hammer */
- msleep(1000);
if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
/* don't reset CAM block on reset */
qla4_8xxx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xfeffffff);
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index c22f2a764d9d..f2364ec59f03 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -124,6 +124,7 @@ static struct scsi_host_template qla4xxx_driver_template = {
.sg_tablesize = SG_ALL,
.max_sectors = 0xFFFF,
+ .shost_attrs = qla4xxx_host_attrs,
};
static struct iscsi_transport qla4xxx_iscsi_transport = {
@@ -412,8 +413,7 @@ void qla4xxx_mark_all_devices_missing(struct scsi_qla_host *ha)
static struct srb* qla4xxx_get_new_srb(struct scsi_qla_host *ha,
struct ddb_entry *ddb_entry,
- struct scsi_cmnd *cmd,
- void (*done)(struct scsi_cmnd *))
+ struct scsi_cmnd *cmd)
{
struct srb *srb;
@@ -427,7 +427,6 @@ static struct srb* qla4xxx_get_new_srb(struct scsi_qla_host *ha,
srb->cmd = cmd;
srb->flags = 0;
CMD_SP(cmd) = (void *)srb;
- cmd->scsi_done = done;
return srb;
}
@@ -458,9 +457,8 @@ void qla4xxx_srb_compl(struct kref *ref)
/**
* qla4xxx_queuecommand - scsi layer issues scsi command to driver.
+ * @host: scsi host
* @cmd: Pointer to Linux's SCSI command structure
- * @done_fn: Function that the driver calls to notify the SCSI mid-layer
- * that the command has been processed.
*
* Remarks:
* This routine is invoked by Linux to send a SCSI command to the driver.
@@ -470,10 +468,9 @@ void qla4xxx_srb_compl(struct kref *ref)
* completion handling). Unfortunely, it sometimes calls the scheduler
* in interrupt context which is a big NO! NO!.
**/
-static int qla4xxx_queuecommand_lck(struct scsi_cmnd *cmd,
- void (*done)(struct scsi_cmnd *))
+static int qla4xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
{
- struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
+ struct scsi_qla_host *ha = to_qla_host(host);
struct ddb_entry *ddb_entry = cmd->device->hostdata;
struct iscsi_cls_session *sess = ddb_entry->sess;
struct srb *srb;
@@ -515,37 +512,29 @@ static int qla4xxx_queuecommand_lck(struct scsi_cmnd *cmd,
test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))
goto qc_host_busy;
- spin_unlock_irq(ha->host->host_lock);
-
- srb = qla4xxx_get_new_srb(ha, ddb_entry, cmd, done);
+ srb = qla4xxx_get_new_srb(ha, ddb_entry, cmd);
if (!srb)
- goto qc_host_busy_lock;
+ goto qc_host_busy;
rval = qla4xxx_send_command_to_isp(ha, srb);
if (rval != QLA_SUCCESS)
goto qc_host_busy_free_sp;
- spin_lock_irq(ha->host->host_lock);
return 0;
qc_host_busy_free_sp:
qla4xxx_srb_free_dma(ha, srb);
mempool_free(srb, ha->srb_mempool);
-qc_host_busy_lock:
- spin_lock_irq(ha->host->host_lock);
-
qc_host_busy:
return SCSI_MLQUEUE_HOST_BUSY;
qc_fail_command:
- done(cmd);
+ cmd->scsi_done(cmd);
return 0;
}
-static DEF_SCSI_QCMD(qla4xxx_queuecommand)
-
/**
* qla4xxx_mem_free - frees memory allocated to adapter
* @ha: Pointer to host adapter structure.
@@ -679,7 +668,27 @@ static void qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
if (ha->seconds_since_last_heartbeat == 2) {
ha->seconds_since_last_heartbeat = 0;
halt_status = qla4_8xxx_rd_32(ha,
- QLA82XX_PEG_HALT_STATUS1);
+ QLA82XX_PEG_HALT_STATUS1);
+
+ ql4_printk(KERN_INFO, ha,
+ "scsi(%ld): %s, Dumping hw/fw registers:\n "
+ " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2:"
+ " 0x%x,\n PEG_NET_0_PC: 0x%x, PEG_NET_1_PC:"
+ " 0x%x,\n PEG_NET_2_PC: 0x%x, PEG_NET_3_PC:"
+ " 0x%x,\n PEG_NET_4_PC: 0x%x\n",
+ ha->host_no, __func__, halt_status,
+ qla4_8xxx_rd_32(ha,
+ QLA82XX_PEG_HALT_STATUS2),
+ qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_0 +
+ 0x3c),
+ qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_1 +
+ 0x3c),
+ qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_2 +
+ 0x3c),
+ qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_3 +
+ 0x3c),
+ qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_4 +
+ 0x3c));
/* Since we cannot change dev_state in interrupt
* context, set appropriate DPC flag then wakeup
@@ -715,7 +724,7 @@ void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
/* don't poll if reset is going on */
if (!(test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
- test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags))) {
+ test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags))) {
if (dev_state == QLA82XX_DEV_NEED_RESET &&
!test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
if (!ql4xdontresethba) {
@@ -839,7 +848,7 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
}
/* Wakeup the dpc routine for this adapter, if needed. */
- if ((start_dpc ||
+ if (start_dpc ||
test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags) ||
test_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags) ||
@@ -849,9 +858,7 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
test_bit(DPC_LINK_CHANGED, &ha->dpc_flags) ||
test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) ||
test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) ||
- test_bit(DPC_AEN, &ha->dpc_flags)) &&
- !test_bit(AF_DPC_SCHEDULED, &ha->flags) &&
- ha->dpc_thread) {
+ test_bit(DPC_AEN, &ha->dpc_flags)) {
DEBUG2(printk("scsi%ld: %s: scheduling dpc routine"
" - dpc flags = 0x%lx\n",
ha->host_no, __func__, ha->dpc_flags));
@@ -1241,11 +1248,8 @@ static void qla4xxx_relogin_all_devices(struct scsi_qla_host *ha)
void qla4xxx_wake_dpc(struct scsi_qla_host *ha)
{
- if (ha->dpc_thread &&
- !test_bit(AF_DPC_SCHEDULED, &ha->flags)) {
- set_bit(AF_DPC_SCHEDULED, &ha->flags);
+ if (ha->dpc_thread)
queue_work(ha->dpc_thread, &ha->dpc_work);
- }
}
/**
@@ -1272,12 +1276,12 @@ static void qla4xxx_do_dpc(struct work_struct *work)
/* Initialization not yet finished. Don't do anything yet. */
if (!test_bit(AF_INIT_DONE, &ha->flags))
- goto do_dpc_exit;
+ return;
if (test_bit(AF_EEH_BUSY, &ha->flags)) {
DEBUG2(printk(KERN_INFO "scsi%ld: %s: flags = %lx\n",
ha->host_no, __func__, ha->flags));
- goto do_dpc_exit;
+ return;
}
if (is_qla8022(ha)) {
@@ -1384,8 +1388,6 @@ dpc_post_reset_ha:
}
}
-do_dpc_exit:
- clear_bit(AF_DPC_SCHEDULED, &ha->flags);
}
/**
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index 603155769407..610492877253 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -5,4 +5,4 @@
* See LICENSE.qla4xxx for copyright and licensing details.
*/
-#define QLA4XXX_DRIVER_VERSION "5.02.00-k6"
+#define QLA4XXX_DRIVER_VERSION "5.02.00-k7"
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index abea2cf05c2e..a4b9cdbaaa0b 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -50,6 +50,8 @@
#define BUS_RESET_SETTLE_TIME (10)
#define HOST_RESET_SETTLE_TIME (10)
+static int scsi_eh_try_stu(struct scsi_cmnd *scmd);
+
/* called with shost->host_lock held */
void scsi_eh_wakeup(struct Scsi_Host *shost)
{
@@ -947,6 +949,48 @@ retry_tur:
}
/**
+ * scsi_eh_test_devices - check if devices are responding from error recovery.
+ * @cmd_list: scsi commands in error recovery.
+ * @work_q: queue for commands which still need more error recovery
+ * @done_q: queue for commands which are finished
+ * @try_stu: boolean on if a STU command should be tried in addition to TUR.
+ *
+ * Decription:
+ * Tests if devices are in a working state. Commands to devices now in
+ * a working state are sent to the done_q while commands to devices which
+ * are still failing to respond are returned to the work_q for more
+ * processing.
+ **/
+static int scsi_eh_test_devices(struct list_head *cmd_list,
+ struct list_head *work_q,
+ struct list_head *done_q, int try_stu)
+{
+ struct scsi_cmnd *scmd, *next;
+ struct scsi_device *sdev;
+ int finish_cmds;
+
+ while (!list_empty(cmd_list)) {
+ scmd = list_entry(cmd_list->next, struct scsi_cmnd, eh_entry);
+ sdev = scmd->device;
+
+ finish_cmds = !scsi_device_online(scmd->device) ||
+ (try_stu && !scsi_eh_try_stu(scmd) &&
+ !scsi_eh_tur(scmd)) ||
+ !scsi_eh_tur(scmd);
+
+ list_for_each_entry_safe(scmd, next, cmd_list, eh_entry)
+ if (scmd->device == sdev) {
+ if (finish_cmds)
+ scsi_eh_finish_cmd(scmd, done_q);
+ else
+ list_move_tail(&scmd->eh_entry, work_q);
+ }
+ }
+ return list_empty(work_q);
+}
+
+
+/**
* scsi_eh_abort_cmds - abort pending commands.
* @work_q: &list_head for pending commands.
* @done_q: &list_head for processed commands.
@@ -962,6 +1006,7 @@ static int scsi_eh_abort_cmds(struct list_head *work_q,
struct list_head *done_q)
{
struct scsi_cmnd *scmd, *next;
+ LIST_HEAD(check_list);
int rtn;
list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
@@ -973,11 +1018,10 @@ static int scsi_eh_abort_cmds(struct list_head *work_q,
rtn = scsi_try_to_abort_cmd(scmd->device->host->hostt, scmd);
if (rtn == SUCCESS || rtn == FAST_IO_FAIL) {
scmd->eh_eflags &= ~SCSI_EH_CANCEL_CMD;
- if (!scsi_device_online(scmd->device) ||
- rtn == FAST_IO_FAIL ||
- !scsi_eh_tur(scmd)) {
+ if (rtn == FAST_IO_FAIL)
scsi_eh_finish_cmd(scmd, done_q);
- }
+ else
+ list_move_tail(&scmd->eh_entry, &check_list);
} else
SCSI_LOG_ERROR_RECOVERY(3, printk("%s: aborting"
" cmd failed:"
@@ -986,7 +1030,7 @@ static int scsi_eh_abort_cmds(struct list_head *work_q,
scmd));
}
- return list_empty(work_q);
+ return scsi_eh_test_devices(&check_list, work_q, done_q, 0);
}
/**
@@ -1137,6 +1181,7 @@ static int scsi_eh_target_reset(struct Scsi_Host *shost,
struct list_head *done_q)
{
LIST_HEAD(tmp_list);
+ LIST_HEAD(check_list);
list_splice_init(work_q, &tmp_list);
@@ -1161,9 +1206,9 @@ static int scsi_eh_target_reset(struct Scsi_Host *shost,
if (scmd_id(scmd) != id)
continue;
- if ((rtn == SUCCESS || rtn == FAST_IO_FAIL)
- && (!scsi_device_online(scmd->device) ||
- rtn == FAST_IO_FAIL || !scsi_eh_tur(scmd)))
+ if (rtn == SUCCESS)
+ list_move_tail(&scmd->eh_entry, &check_list);
+ else if (rtn == FAST_IO_FAIL)
scsi_eh_finish_cmd(scmd, done_q);
else
/* push back on work queue for further processing */
@@ -1171,7 +1216,7 @@ static int scsi_eh_target_reset(struct Scsi_Host *shost,
}
}
- return list_empty(work_q);
+ return scsi_eh_test_devices(&check_list, work_q, done_q, 0);
}
/**
@@ -1185,6 +1230,7 @@ static int scsi_eh_bus_reset(struct Scsi_Host *shost,
struct list_head *done_q)
{
struct scsi_cmnd *scmd, *chan_scmd, *next;
+ LIST_HEAD(check_list);
unsigned int channel;
int rtn;
@@ -1216,12 +1262,14 @@ static int scsi_eh_bus_reset(struct Scsi_Host *shost,
rtn = scsi_try_bus_reset(chan_scmd);
if (rtn == SUCCESS || rtn == FAST_IO_FAIL) {
list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
- if (channel == scmd_channel(scmd))
- if (!scsi_device_online(scmd->device) ||
- rtn == FAST_IO_FAIL ||
- !scsi_eh_tur(scmd))
+ if (channel == scmd_channel(scmd)) {
+ if (rtn == FAST_IO_FAIL)
scsi_eh_finish_cmd(scmd,
done_q);
+ else
+ list_move_tail(&scmd->eh_entry,
+ &check_list);
+ }
}
} else {
SCSI_LOG_ERROR_RECOVERY(3, printk("%s: BRST"
@@ -1230,7 +1278,7 @@ static int scsi_eh_bus_reset(struct Scsi_Host *shost,
channel));
}
}
- return list_empty(work_q);
+ return scsi_eh_test_devices(&check_list, work_q, done_q, 0);
}
/**
@@ -1242,6 +1290,7 @@ static int scsi_eh_host_reset(struct list_head *work_q,
struct list_head *done_q)
{
struct scsi_cmnd *scmd, *next;
+ LIST_HEAD(check_list);
int rtn;
if (!list_empty(work_q)) {
@@ -1252,12 +1301,10 @@ static int scsi_eh_host_reset(struct list_head *work_q,
, current->comm));
rtn = scsi_try_host_reset(scmd);
- if (rtn == SUCCESS || rtn == FAST_IO_FAIL) {
+ if (rtn == SUCCESS) {
+ list_splice_init(work_q, &check_list);
+ } else if (rtn == FAST_IO_FAIL) {
list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
- if (!scsi_device_online(scmd->device) ||
- rtn == FAST_IO_FAIL ||
- (!scsi_eh_try_stu(scmd) && !scsi_eh_tur(scmd)) ||
- !scsi_eh_tur(scmd))
scsi_eh_finish_cmd(scmd, done_q);
}
} else {
@@ -1266,7 +1313,7 @@ static int scsi_eh_host_reset(struct list_head *work_q,
current->comm));
}
}
- return list_empty(work_q);
+ return scsi_eh_test_devices(&check_list, work_q, done_q, 1);
}
/**
diff --git a/drivers/scsi/scsi_proc.c b/drivers/scsi/scsi_proc.c
index f46855cd853d..ad747dc337da 100644
--- a/drivers/scsi/scsi_proc.c
+++ b/drivers/scsi/scsi_proc.c
@@ -381,11 +381,6 @@ static ssize_t proc_scsi_write(struct file *file, const char __user *buf,
return err;
}
-/**
- * proc_scsi_show - show contents of /proc/scsi/scsi (attached devices)
- * @s: output goes here
- * @p: not used
- */
static int always_match(struct device *dev, void *data)
{
return 1;
diff --git a/drivers/scsi/scsi_trace.c b/drivers/scsi/scsi_trace.c
index b587289cfacb..2bea4f0b684a 100644
--- a/drivers/scsi/scsi_trace.c
+++ b/drivers/scsi/scsi_trace.c
@@ -59,6 +59,10 @@ scsi_trace_rw10(struct trace_seq *p, unsigned char *cdb, int len)
trace_seq_printf(p, "lba=%llu txlen=%llu protect=%u",
(unsigned long long)lba, (unsigned long long)txlen,
cdb[1] >> 5);
+
+ if (cdb[0] == WRITE_SAME)
+ trace_seq_printf(p, " unmap=%u", cdb[1] >> 3 & 1);
+
trace_seq_putc(p, 0);
return ret;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index bd0806e64e85..953773cb26d9 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -490,7 +490,8 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
unsigned int max_blocks = 0;
q->limits.discard_zeroes_data = sdkp->lbprz;
- q->limits.discard_alignment = sdkp->unmap_alignment;
+ q->limits.discard_alignment = sdkp->unmap_alignment *
+ logical_block_size;
q->limits.discard_granularity =
max(sdkp->physical_block_size,
sdkp->unmap_granularity * logical_block_size);
@@ -2021,16 +2022,26 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
int dbd;
int modepage;
+ int first_len;
struct scsi_mode_data data;
struct scsi_sense_hdr sshdr;
int old_wce = sdkp->WCE;
int old_rcd = sdkp->RCD;
int old_dpofua = sdkp->DPOFUA;
- if (sdp->skip_ms_page_8)
- goto defaults;
-
- if (sdp->type == TYPE_RBC) {
+ first_len = 4;
+ if (sdp->skip_ms_page_8) {
+ if (sdp->type == TYPE_RBC)
+ goto defaults;
+ else {
+ if (sdp->skip_ms_page_3f)
+ goto defaults;
+ modepage = 0x3F;
+ if (sdp->use_192_bytes_for_3f)
+ first_len = 192;
+ dbd = 0;
+ }
+ } else if (sdp->type == TYPE_RBC) {
modepage = 6;
dbd = 8;
} else {
@@ -2039,13 +2050,15 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
}
/* cautiously ask */
- res = sd_do_mode_sense(sdp, dbd, modepage, buffer, 4, &data, &sshdr);
+ res = sd_do_mode_sense(sdp, dbd, modepage, buffer, first_len,
+ &data, &sshdr);
if (!scsi_status_is_good(res))
goto bad_sense;
if (!data.header_length) {
modepage = 6;
+ first_len = 0;
sd_printk(KERN_ERR, sdkp, "Missing header in MODE_SENSE response\n");
}
@@ -2058,30 +2071,61 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
*/
if (len < 3)
goto bad_sense;
- if (len > 20)
- len = 20;
-
- /* Take headers and block descriptors into account */
- len += data.header_length + data.block_descriptor_length;
- if (len > SD_BUF_SIZE)
- goto bad_sense;
+ else if (len > SD_BUF_SIZE) {
+ sd_printk(KERN_NOTICE, sdkp, "Truncating mode parameter "
+ "data from %d to %d bytes\n", len, SD_BUF_SIZE);
+ len = SD_BUF_SIZE;
+ }
+ if (modepage == 0x3F && sdp->use_192_bytes_for_3f)
+ len = 192;
/* Get the data */
- res = sd_do_mode_sense(sdp, dbd, modepage, buffer, len, &data, &sshdr);
+ if (len > first_len)
+ res = sd_do_mode_sense(sdp, dbd, modepage, buffer, len,
+ &data, &sshdr);
if (scsi_status_is_good(res)) {
int offset = data.header_length + data.block_descriptor_length;
- if (offset >= SD_BUF_SIZE - 2) {
- sd_printk(KERN_ERR, sdkp, "Malformed MODE SENSE response\n");
- goto defaults;
+ while (offset < len) {
+ u8 page_code = buffer[offset] & 0x3F;
+ u8 spf = buffer[offset] & 0x40;
+
+ if (page_code == 8 || page_code == 6) {
+ /* We're interested only in the first 3 bytes.
+ */
+ if (len - offset <= 2) {
+ sd_printk(KERN_ERR, sdkp, "Incomplete "
+ "mode parameter data\n");
+ goto defaults;
+ } else {
+ modepage = page_code;
+ goto Page_found;
+ }
+ } else {
+ /* Go to the next page */
+ if (spf && len - offset > 3)
+ offset += 4 + (buffer[offset+2] << 8) +
+ buffer[offset+3];
+ else if (!spf && len - offset > 1)
+ offset += 2 + buffer[offset+1];
+ else {
+ sd_printk(KERN_ERR, sdkp, "Incomplete "
+ "mode parameter data\n");
+ goto defaults;
+ }
+ }
}
- if ((buffer[offset] & 0x3f) != modepage) {
+ if (modepage == 0x3F) {
+ sd_printk(KERN_ERR, sdkp, "No Caching mode page "
+ "present\n");
+ goto defaults;
+ } else if ((buffer[offset] & 0x3f) != modepage) {
sd_printk(KERN_ERR, sdkp, "Got wrong page\n");
goto defaults;
}
-
+ Page_found:
if (modepage == 8) {
sdkp->WCE = ((buffer[offset + 2] & 0x04) != 0);
sdkp->RCD = ((buffer[offset + 2] & 0x01) != 0);
diff --git a/drivers/scsi/ultrastor.c b/drivers/scsi/ultrastor.c
index 9f4b58b7daad..7e22b737dfd8 100644
--- a/drivers/scsi/ultrastor.c
+++ b/drivers/scsi/ultrastor.c
@@ -307,7 +307,7 @@ static inline int find_and_clear_bit_16(unsigned long *field)
"0: bsfw %1,%w0\n\t"
"btr %0,%1\n\t"
"jnc 0b"
- : "=&r" (rv), "=m" (*field) :);
+ : "=&r" (rv), "+m" (*field) :);
return rv;
}
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index fbd96b29530d..de35c3ad8a69 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -80,6 +80,15 @@ config SPI_BFIN
help
This is the SPI controller master driver for Blackfin 5xx processor.
+config SPI_BFIN_SPORT
+ tristate "SPI bus via Blackfin SPORT"
+ depends on BLACKFIN
+ help
+ Enable support for a SPI bus via the Blackfin SPORT peripheral.
+
+ This driver can also be built as a module. If so, the module
+ will be called spi_bfin_sport.
+
config SPI_AU1550
tristate "Au1550/Au12x0 SPI Controller"
depends on (SOC_AU1550 || SOC_AU1200) && EXPERIMENTAL
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index fd2fc5f6505f..0f8c69b6b19e 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_SPI_ALTERA) += spi_altera.o
obj-$(CONFIG_SPI_ATMEL) += atmel_spi.o
obj-$(CONFIG_SPI_ATH79) += ath79_spi.o
obj-$(CONFIG_SPI_BFIN) += spi_bfin5xx.o
+obj-$(CONFIG_SPI_BFIN_SPORT) += spi_bfin_sport.o
obj-$(CONFIG_SPI_BITBANG) += spi_bitbang.o
obj-$(CONFIG_SPI_AU1550) += au1550_spi.o
obj-$(CONFIG_SPI_BUTTERFLY) += spi_butterfly.o
diff --git a/drivers/spi/spi_bfin_sport.c b/drivers/spi/spi_bfin_sport.c
new file mode 100644
index 000000000000..e557ff617b11
--- /dev/null
+++ b/drivers/spi/spi_bfin_sport.c
@@ -0,0 +1,952 @@
+/*
+ * SPI bus via the Blackfin SPORT peripheral
+ *
+ * Enter bugs at http://blackfin.uclinux.org/
+ *
+ * Copyright 2009-2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/gpio.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/irq.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/workqueue.h>
+
+#include <asm/portmux.h>
+#include <asm/bfin5xx_spi.h>
+#include <asm/blackfin.h>
+#include <asm/bfin_sport.h>
+#include <asm/cacheflush.h>
+
+#define DRV_NAME "bfin-sport-spi"
+#define DRV_DESC "SPI bus via the Blackfin SPORT"
+
+MODULE_AUTHOR("Cliff Cai");
+MODULE_DESCRIPTION(DRV_DESC);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:bfin-sport-spi");
+
+enum bfin_sport_spi_state {
+ START_STATE,
+ RUNNING_STATE,
+ DONE_STATE,
+ ERROR_STATE,
+};
+
+struct bfin_sport_spi_master_data;
+
+struct bfin_sport_transfer_ops {
+ void (*write) (struct bfin_sport_spi_master_data *);
+ void (*read) (struct bfin_sport_spi_master_data *);
+ void (*duplex) (struct bfin_sport_spi_master_data *);
+};
+
+struct bfin_sport_spi_master_data {
+ /* Driver model hookup */
+ struct device *dev;
+
+ /* SPI framework hookup */
+ struct spi_master *master;
+
+ /* Regs base of SPI controller */
+ struct sport_register __iomem *regs;
+ int err_irq;
+
+ /* Pin request list */
+ u16 *pin_req;
+
+ /* Driver message queue */
+ struct workqueue_struct *workqueue;
+ struct work_struct pump_messages;
+ spinlock_t lock;
+ struct list_head queue;
+ int busy;
+ bool run;
+
+ /* Message Transfer pump */
+ struct tasklet_struct pump_transfers;
+
+ /* Current message transfer state info */
+ enum bfin_sport_spi_state state;
+ struct spi_message *cur_msg;
+ struct spi_transfer *cur_transfer;
+ struct bfin_sport_spi_slave_data *cur_chip;
+ union {
+ void *tx;
+ u8 *tx8;
+ u16 *tx16;
+ };
+ void *tx_end;
+ union {
+ void *rx;
+ u8 *rx8;
+ u16 *rx16;
+ };
+ void *rx_end;
+
+ int cs_change;
+ struct bfin_sport_transfer_ops *ops;
+};
+
+struct bfin_sport_spi_slave_data {
+ u16 ctl_reg;
+ u16 baud;
+ u16 cs_chg_udelay; /* Some devices require > 255usec delay */
+ u32 cs_gpio;
+ u16 idle_tx_val;
+ struct bfin_sport_transfer_ops *ops;
+};
+
+static void
+bfin_sport_spi_enable(struct bfin_sport_spi_master_data *drv_data)
+{
+ bfin_write_or(&drv_data->regs->tcr1, TSPEN);
+ bfin_write_or(&drv_data->regs->rcr1, TSPEN);
+ SSYNC();
+}
+
+static void
+bfin_sport_spi_disable(struct bfin_sport_spi_master_data *drv_data)
+{
+ bfin_write_and(&drv_data->regs->tcr1, ~TSPEN);
+ bfin_write_and(&drv_data->regs->rcr1, ~TSPEN);
+ SSYNC();
+}
+
+/* Caculate the SPI_BAUD register value based on input HZ */
+static u16
+bfin_sport_hz_to_spi_baud(u32 speed_hz)
+{
+ u_long clk, sclk = get_sclk();
+ int div = (sclk / (2 * speed_hz)) - 1;
+
+ if (div < 0)
+ div = 0;
+
+ clk = sclk / (2 * (div + 1));
+
+ if (clk > speed_hz)
+ div++;
+
+ return div;
+}
+
+/* Chip select operation functions for cs_change flag */
+static void
+bfin_sport_spi_cs_active(struct bfin_sport_spi_slave_data *chip)
+{
+ gpio_direction_output(chip->cs_gpio, 0);
+}
+
+static void
+bfin_sport_spi_cs_deactive(struct bfin_sport_spi_slave_data *chip)
+{
+ gpio_direction_output(chip->cs_gpio, 1);
+ /* Move delay here for consistency */
+ if (chip->cs_chg_udelay)
+ udelay(chip->cs_chg_udelay);
+}
+
+static void
+bfin_sport_spi_stat_poll_complete(struct bfin_sport_spi_master_data *drv_data)
+{
+ unsigned long timeout = jiffies + HZ;
+ while (!(bfin_read(&drv_data->regs->stat) & RXNE)) {
+ if (!time_before(jiffies, timeout))
+ break;
+ }
+}
+
+static void
+bfin_sport_spi_u8_writer(struct bfin_sport_spi_master_data *drv_data)
+{
+ u16 dummy;
+
+ while (drv_data->tx < drv_data->tx_end) {
+ bfin_write(&drv_data->regs->tx16, *drv_data->tx8++);
+ bfin_sport_spi_stat_poll_complete(drv_data);
+ dummy = bfin_read(&drv_data->regs->rx16);
+ }
+}
+
+static void
+bfin_sport_spi_u8_reader(struct bfin_sport_spi_master_data *drv_data)
+{
+ u16 tx_val = drv_data->cur_chip->idle_tx_val;
+
+ while (drv_data->rx < drv_data->rx_end) {
+ bfin_write(&drv_data->regs->tx16, tx_val);
+ bfin_sport_spi_stat_poll_complete(drv_data);
+ *drv_data->rx8++ = bfin_read(&drv_data->regs->rx16);
+ }
+}
+
+static void
+bfin_sport_spi_u8_duplex(struct bfin_sport_spi_master_data *drv_data)
+{
+ while (drv_data->rx < drv_data->rx_end) {
+ bfin_write(&drv_data->regs->tx16, *drv_data->tx8++);
+ bfin_sport_spi_stat_poll_complete(drv_data);
+ *drv_data->rx8++ = bfin_read(&drv_data->regs->rx16);
+ }
+}
+
+static struct bfin_sport_transfer_ops bfin_sport_transfer_ops_u8 = {
+ .write = bfin_sport_spi_u8_writer,
+ .read = bfin_sport_spi_u8_reader,
+ .duplex = bfin_sport_spi_u8_duplex,
+};
+
+static void
+bfin_sport_spi_u16_writer(struct bfin_sport_spi_master_data *drv_data)
+{
+ u16 dummy;
+
+ while (drv_data->tx < drv_data->tx_end) {
+ bfin_write(&drv_data->regs->tx16, *drv_data->tx16++);
+ bfin_sport_spi_stat_poll_complete(drv_data);
+ dummy = bfin_read(&drv_data->regs->rx16);
+ }
+}
+
+static void
+bfin_sport_spi_u16_reader(struct bfin_sport_spi_master_data *drv_data)
+{
+ u16 tx_val = drv_data->cur_chip->idle_tx_val;
+
+ while (drv_data->rx < drv_data->rx_end) {
+ bfin_write(&drv_data->regs->tx16, tx_val);
+ bfin_sport_spi_stat_poll_complete(drv_data);
+ *drv_data->rx16++ = bfin_read(&drv_data->regs->rx16);
+ }
+}
+
+static void
+bfin_sport_spi_u16_duplex(struct bfin_sport_spi_master_data *drv_data)
+{
+ while (drv_data->rx < drv_data->rx_end) {
+ bfin_write(&drv_data->regs->tx16, *drv_data->tx16++);
+ bfin_sport_spi_stat_poll_complete(drv_data);
+ *drv_data->rx16++ = bfin_read(&drv_data->regs->rx16);
+ }
+}
+
+static struct bfin_sport_transfer_ops bfin_sport_transfer_ops_u16 = {
+ .write = bfin_sport_spi_u16_writer,
+ .read = bfin_sport_spi_u16_reader,
+ .duplex = bfin_sport_spi_u16_duplex,
+};
+
+/* stop controller and re-config current chip */
+static void
+bfin_sport_spi_restore_state(struct bfin_sport_spi_master_data *drv_data)
+{
+ struct bfin_sport_spi_slave_data *chip = drv_data->cur_chip;
+ unsigned int bits = (drv_data->ops == &bfin_sport_transfer_ops_u8 ? 7 : 15);
+
+ bfin_sport_spi_disable(drv_data);
+ dev_dbg(drv_data->dev, "restoring spi ctl state\n");
+
+ bfin_write(&drv_data->regs->tcr1, chip->ctl_reg);
+ bfin_write(&drv_data->regs->tcr2, bits);
+ bfin_write(&drv_data->regs->tclkdiv, chip->baud);
+ bfin_write(&drv_data->regs->tfsdiv, bits);
+ SSYNC();
+
+ bfin_write(&drv_data->regs->rcr1, chip->ctl_reg & ~(ITCLK | ITFS));
+ bfin_write(&drv_data->regs->rcr2, bits);
+ SSYNC();
+
+ bfin_sport_spi_cs_active(chip);
+}
+
+/* test if there is more transfer to be done */
+static enum bfin_sport_spi_state
+bfin_sport_spi_next_transfer(struct bfin_sport_spi_master_data *drv_data)
+{
+ struct spi_message *msg = drv_data->cur_msg;
+ struct spi_transfer *trans = drv_data->cur_transfer;
+
+ /* Move to next transfer */
+ if (trans->transfer_list.next != &msg->transfers) {
+ drv_data->cur_transfer =
+ list_entry(trans->transfer_list.next,
+ struct spi_transfer, transfer_list);
+ return RUNNING_STATE;
+ }
+
+ return DONE_STATE;
+}
+
+/*
+ * caller already set message->status;
+ * dma and pio irqs are blocked give finished message back
+ */
+static void
+bfin_sport_spi_giveback(struct bfin_sport_spi_master_data *drv_data)
+{
+ struct bfin_sport_spi_slave_data *chip = drv_data->cur_chip;
+ unsigned long flags;
+ struct spi_message *msg;
+
+ spin_lock_irqsave(&drv_data->lock, flags);
+ msg = drv_data->cur_msg;
+ drv_data->state = START_STATE;
+ drv_data->cur_msg = NULL;
+ drv_data->cur_transfer = NULL;
+ drv_data->cur_chip = NULL;
+ queue_work(drv_data->workqueue, &drv_data->pump_messages);
+ spin_unlock_irqrestore(&drv_data->lock, flags);
+
+ if (!drv_data->cs_change)
+ bfin_sport_spi_cs_deactive(chip);
+
+ if (msg->complete)
+ msg->complete(msg->context);
+}
+
+static irqreturn_t
+sport_err_handler(int irq, void *dev_id)
+{
+ struct bfin_sport_spi_master_data *drv_data = dev_id;
+ u16 status;
+
+ dev_dbg(drv_data->dev, "%s enter\n", __func__);
+ status = bfin_read(&drv_data->regs->stat) & (TOVF | TUVF | ROVF | RUVF);
+
+ if (status) {
+ bfin_write(&drv_data->regs->stat, status);
+ SSYNC();
+
+ bfin_sport_spi_disable(drv_data);
+ dev_err(drv_data->dev, "status error:%s%s%s%s\n",
+ status & TOVF ? " TOVF" : "",
+ status & TUVF ? " TUVF" : "",
+ status & ROVF ? " ROVF" : "",
+ status & RUVF ? " RUVF" : "");
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void
+bfin_sport_spi_pump_transfers(unsigned long data)
+{
+ struct bfin_sport_spi_master_data *drv_data = (void *)data;
+ struct spi_message *message = NULL;
+ struct spi_transfer *transfer = NULL;
+ struct spi_transfer *previous = NULL;
+ struct bfin_sport_spi_slave_data *chip = NULL;
+ unsigned int bits_per_word;
+ u32 tranf_success = 1;
+ u32 transfer_speed;
+ u8 full_duplex = 0;
+
+ /* Get current state information */
+ message = drv_data->cur_msg;
+ transfer = drv_data->cur_transfer;
+ chip = drv_data->cur_chip;
+
+ if (transfer->speed_hz)
+ transfer_speed = bfin_sport_hz_to_spi_baud(transfer->speed_hz);
+ else
+ transfer_speed = chip->baud;
+ bfin_write(&drv_data->regs->tclkdiv, transfer_speed);
+ SSYNC();
+
+ /*
+ * if msg is error or done, report it back using complete() callback
+ */
+
+ /* Handle for abort */
+ if (drv_data->state == ERROR_STATE) {
+ dev_dbg(drv_data->dev, "transfer: we've hit an error\n");
+ message->status = -EIO;
+ bfin_sport_spi_giveback(drv_data);
+ return;
+ }
+
+ /* Handle end of message */
+ if (drv_data->state == DONE_STATE) {
+ dev_dbg(drv_data->dev, "transfer: all done!\n");
+ message->status = 0;
+ bfin_sport_spi_giveback(drv_data);
+ return;
+ }
+
+ /* Delay if requested at end of transfer */
+ if (drv_data->state == RUNNING_STATE) {
+ dev_dbg(drv_data->dev, "transfer: still running ...\n");
+ previous = list_entry(transfer->transfer_list.prev,
+ struct spi_transfer, transfer_list);
+ if (previous->delay_usecs)
+ udelay(previous->delay_usecs);
+ }
+
+ if (transfer->len == 0) {
+ /* Move to next transfer of this msg */
+ drv_data->state = bfin_sport_spi_next_transfer(drv_data);
+ /* Schedule next transfer tasklet */
+ tasklet_schedule(&drv_data->pump_transfers);
+ }
+
+ if (transfer->tx_buf != NULL) {
+ drv_data->tx = (void *)transfer->tx_buf;
+ drv_data->tx_end = drv_data->tx + transfer->len;
+ dev_dbg(drv_data->dev, "tx_buf is %p, tx_end is %p\n",
+ transfer->tx_buf, drv_data->tx_end);
+ } else
+ drv_data->tx = NULL;
+
+ if (transfer->rx_buf != NULL) {
+ full_duplex = transfer->tx_buf != NULL;
+ drv_data->rx = transfer->rx_buf;
+ drv_data->rx_end = drv_data->rx + transfer->len;
+ dev_dbg(drv_data->dev, "rx_buf is %p, rx_end is %p\n",
+ transfer->rx_buf, drv_data->rx_end);
+ } else
+ drv_data->rx = NULL;
+
+ drv_data->cs_change = transfer->cs_change;
+
+ /* Bits per word setup */
+ bits_per_word = transfer->bits_per_word ? : message->spi->bits_per_word;
+ if (bits_per_word == 8)
+ drv_data->ops = &bfin_sport_transfer_ops_u8;
+ else
+ drv_data->ops = &bfin_sport_transfer_ops_u16;
+
+ drv_data->state = RUNNING_STATE;
+
+ if (drv_data->cs_change)
+ bfin_sport_spi_cs_active(chip);
+
+ dev_dbg(drv_data->dev,
+ "now pumping a transfer: width is %d, len is %d\n",
+ bits_per_word, transfer->len);
+
+ /* PIO mode write then read */
+ dev_dbg(drv_data->dev, "doing IO transfer\n");
+
+ bfin_sport_spi_enable(drv_data);
+ if (full_duplex) {
+ /* full duplex mode */
+ BUG_ON((drv_data->tx_end - drv_data->tx) !=
+ (drv_data->rx_end - drv_data->rx));
+ drv_data->ops->duplex(drv_data);
+
+ if (drv_data->tx != drv_data->tx_end)
+ tranf_success = 0;
+ } else if (drv_data->tx != NULL) {
+ /* write only half duplex */
+
+ drv_data->ops->write(drv_data);
+
+ if (drv_data->tx != drv_data->tx_end)
+ tranf_success = 0;
+ } else if (drv_data->rx != NULL) {
+ /* read only half duplex */
+
+ drv_data->ops->read(drv_data);
+ if (drv_data->rx != drv_data->rx_end)
+ tranf_success = 0;
+ }
+ bfin_sport_spi_disable(drv_data);
+
+ if (!tranf_success) {
+ dev_dbg(drv_data->dev, "IO write error!\n");
+ drv_data->state = ERROR_STATE;
+ } else {
+ /* Update total byte transfered */
+ message->actual_length += transfer->len;
+ /* Move to next transfer of this msg */
+ drv_data->state = bfin_sport_spi_next_transfer(drv_data);
+ if (drv_data->cs_change)
+ bfin_sport_spi_cs_deactive(chip);
+ }
+
+ /* Schedule next transfer tasklet */
+ tasklet_schedule(&drv_data->pump_transfers);
+}
+
+/* pop a msg from queue and kick off real transfer */
+static void
+bfin_sport_spi_pump_messages(struct work_struct *work)
+{
+ struct bfin_sport_spi_master_data *drv_data;
+ unsigned long flags;
+ struct spi_message *next_msg;
+
+ drv_data = container_of(work, struct bfin_sport_spi_master_data, pump_messages);
+
+ /* Lock queue and check for queue work */
+ spin_lock_irqsave(&drv_data->lock, flags);
+ if (list_empty(&drv_data->queue) || !drv_data->run) {
+ /* pumper kicked off but no work to do */
+ drv_data->busy = 0;
+ spin_unlock_irqrestore(&drv_data->lock, flags);
+ return;
+ }
+
+ /* Make sure we are not already running a message */
+ if (drv_data->cur_msg) {
+ spin_unlock_irqrestore(&drv_data->lock, flags);
+ return;
+ }
+
+ /* Extract head of queue */
+ next_msg = list_entry(drv_data->queue.next,
+ struct spi_message, queue);
+
+ drv_data->cur_msg = next_msg;
+
+ /* Setup the SSP using the per chip configuration */
+ drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi);
+
+ list_del_init(&drv_data->cur_msg->queue);
+
+ /* Initialize message state */
+ drv_data->cur_msg->state = START_STATE;
+ drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next,
+ struct spi_transfer, transfer_list);
+ bfin_sport_spi_restore_state(drv_data);
+ dev_dbg(drv_data->dev, "got a message to pump, "
+ "state is set to: baud %d, cs_gpio %i, ctl 0x%x\n",
+ drv_data->cur_chip->baud, drv_data->cur_chip->cs_gpio,
+ drv_data->cur_chip->ctl_reg);
+
+ dev_dbg(drv_data->dev,
+ "the first transfer len is %d\n",
+ drv_data->cur_transfer->len);
+
+ /* Mark as busy and launch transfers */
+ tasklet_schedule(&drv_data->pump_transfers);
+
+ drv_data->busy = 1;
+ spin_unlock_irqrestore(&drv_data->lock, flags);
+}
+
+/*
+ * got a msg to transfer, queue it in drv_data->queue.
+ * And kick off message pumper
+ */
+static int
+bfin_sport_spi_transfer(struct spi_device *spi, struct spi_message *msg)
+{
+ struct bfin_sport_spi_master_data *drv_data = spi_master_get_devdata(spi->master);
+ unsigned long flags;
+
+ spin_lock_irqsave(&drv_data->lock, flags);
+
+ if (!drv_data->run) {
+ spin_unlock_irqrestore(&drv_data->lock, flags);
+ return -ESHUTDOWN;
+ }
+
+ msg->actual_length = 0;
+ msg->status = -EINPROGRESS;
+ msg->state = START_STATE;
+
+ dev_dbg(&spi->dev, "adding an msg in transfer()\n");
+ list_add_tail(&msg->queue, &drv_data->queue);
+
+ if (drv_data->run && !drv_data->busy)
+ queue_work(drv_data->workqueue, &drv_data->pump_messages);
+
+ spin_unlock_irqrestore(&drv_data->lock, flags);
+
+ return 0;
+}
+
+/* Called every time common spi devices change state */
+static int
+bfin_sport_spi_setup(struct spi_device *spi)
+{
+ struct bfin_sport_spi_slave_data *chip, *first = NULL;
+ int ret;
+
+ /* Only alloc (or use chip_info) on first setup */
+ chip = spi_get_ctldata(spi);
+ if (chip == NULL) {
+ struct bfin5xx_spi_chip *chip_info;
+
+ chip = first = kzalloc(sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ /* platform chip_info isn't required */
+ chip_info = spi->controller_data;
+ if (chip_info) {
+ /*
+ * DITFS and TDTYPE are only thing we don't set, but
+ * they probably shouldn't be changed by people.
+ */
+ if (chip_info->ctl_reg || chip_info->enable_dma) {
+ ret = -EINVAL;
+ dev_err(&spi->dev, "don't set ctl_reg/enable_dma fields");
+ goto error;
+ }
+ chip->cs_chg_udelay = chip_info->cs_chg_udelay;
+ chip->idle_tx_val = chip_info->idle_tx_val;
+ spi->bits_per_word = chip_info->bits_per_word;
+ }
+ }
+
+ if (spi->bits_per_word != 8 && spi->bits_per_word != 16) {
+ ret = -EINVAL;
+ goto error;
+ }
+
+ /* translate common spi framework into our register
+ * following configure contents are same for tx and rx.
+ */
+
+ if (spi->mode & SPI_CPHA)
+ chip->ctl_reg &= ~TCKFE;
+ else
+ chip->ctl_reg |= TCKFE;
+
+ if (spi->mode & SPI_LSB_FIRST)
+ chip->ctl_reg |= TLSBIT;
+ else
+ chip->ctl_reg &= ~TLSBIT;
+
+ /* Sport in master mode */
+ chip->ctl_reg |= ITCLK | ITFS | TFSR | LATFS | LTFS;
+
+ chip->baud = bfin_sport_hz_to_spi_baud(spi->max_speed_hz);
+
+ chip->cs_gpio = spi->chip_select;
+ ret = gpio_request(chip->cs_gpio, spi->modalias);
+ if (ret)
+ goto error;
+
+ dev_dbg(&spi->dev, "setup spi chip %s, width is %d\n",
+ spi->modalias, spi->bits_per_word);
+ dev_dbg(&spi->dev, "ctl_reg is 0x%x, GPIO is %i\n",
+ chip->ctl_reg, spi->chip_select);
+
+ spi_set_ctldata(spi, chip);
+
+ bfin_sport_spi_cs_deactive(chip);
+
+ return ret;
+
+ error:
+ kfree(first);
+ return ret;
+}
+
+/*
+ * callback for spi framework.
+ * clean driver specific data
+ */
+static void
+bfin_sport_spi_cleanup(struct spi_device *spi)
+{
+ struct bfin_sport_spi_slave_data *chip = spi_get_ctldata(spi);
+
+ if (!chip)
+ return;
+
+ gpio_free(chip->cs_gpio);
+
+ kfree(chip);
+}
+
+static int
+bfin_sport_spi_init_queue(struct bfin_sport_spi_master_data *drv_data)
+{
+ INIT_LIST_HEAD(&drv_data->queue);
+ spin_lock_init(&drv_data->lock);
+
+ drv_data->run = false;
+ drv_data->busy = 0;
+
+ /* init transfer tasklet */
+ tasklet_init(&drv_data->pump_transfers,
+ bfin_sport_spi_pump_transfers, (unsigned long)drv_data);
+
+ /* init messages workqueue */
+ INIT_WORK(&drv_data->pump_messages, bfin_sport_spi_pump_messages);
+ drv_data->workqueue =
+ create_singlethread_workqueue(dev_name(drv_data->master->dev.parent));
+ if (drv_data->workqueue == NULL)
+ return -EBUSY;
+
+ return 0;
+}
+
+static int
+bfin_sport_spi_start_queue(struct bfin_sport_spi_master_data *drv_data)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&drv_data->lock, flags);
+
+ if (drv_data->run || drv_data->busy) {
+ spin_unlock_irqrestore(&drv_data->lock, flags);
+ return -EBUSY;
+ }
+
+ drv_data->run = true;
+ drv_data->cur_msg = NULL;
+ drv_data->cur_transfer = NULL;
+ drv_data->cur_chip = NULL;
+ spin_unlock_irqrestore(&drv_data->lock, flags);
+
+ queue_work(drv_data->workqueue, &drv_data->pump_messages);
+
+ return 0;
+}
+
+static inline int
+bfin_sport_spi_stop_queue(struct bfin_sport_spi_master_data *drv_data)
+{
+ unsigned long flags;
+ unsigned limit = 500;
+ int status = 0;
+
+ spin_lock_irqsave(&drv_data->lock, flags);
+
+ /*
+ * This is a bit lame, but is optimized for the common execution path.
+ * A wait_queue on the drv_data->busy could be used, but then the common
+ * execution path (pump_messages) would be required to call wake_up or
+ * friends on every SPI message. Do this instead
+ */
+ drv_data->run = false;
+ while (!list_empty(&drv_data->queue) && drv_data->busy && limit--) {
+ spin_unlock_irqrestore(&drv_data->lock, flags);
+ msleep(10);
+ spin_lock_irqsave(&drv_data->lock, flags);
+ }
+
+ if (!list_empty(&drv_data->queue) || drv_data->busy)
+ status = -EBUSY;
+
+ spin_unlock_irqrestore(&drv_data->lock, flags);
+
+ return status;
+}
+
+static inline int
+bfin_sport_spi_destroy_queue(struct bfin_sport_spi_master_data *drv_data)
+{
+ int status;
+
+ status = bfin_sport_spi_stop_queue(drv_data);
+ if (status)
+ return status;
+
+ destroy_workqueue(drv_data->workqueue);
+
+ return 0;
+}
+
+static int __devinit
+bfin_sport_spi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct bfin5xx_spi_master *platform_info;
+ struct spi_master *master;
+ struct resource *res, *ires;
+ struct bfin_sport_spi_master_data *drv_data;
+ int status;
+
+ platform_info = dev->platform_data;
+
+ /* Allocate master with space for drv_data */
+ master = spi_alloc_master(dev, sizeof(*master) + 16);
+ if (!master) {
+ dev_err(dev, "cannot alloc spi_master\n");
+ return -ENOMEM;
+ }
+
+ drv_data = spi_master_get_devdata(master);
+ drv_data->master = master;
+ drv_data->dev = dev;
+ drv_data->pin_req = platform_info->pin_req;
+
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
+ master->bus_num = pdev->id;
+ master->num_chipselect = platform_info->num_chipselect;
+ master->cleanup = bfin_sport_spi_cleanup;
+ master->setup = bfin_sport_spi_setup;
+ master->transfer = bfin_sport_spi_transfer;
+
+ /* Find and map our resources */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ dev_err(dev, "cannot get IORESOURCE_MEM\n");
+ status = -ENOENT;
+ goto out_error_get_res;
+ }
+
+ drv_data->regs = ioremap(res->start, resource_size(res));
+ if (drv_data->regs == NULL) {
+ dev_err(dev, "cannot map registers\n");
+ status = -ENXIO;
+ goto out_error_ioremap;
+ }
+
+ ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!ires) {
+ dev_err(dev, "cannot get IORESOURCE_IRQ\n");
+ status = -ENODEV;
+ goto out_error_get_ires;
+ }
+ drv_data->err_irq = ires->start;
+
+ /* Initial and start queue */
+ status = bfin_sport_spi_init_queue(drv_data);
+ if (status) {
+ dev_err(dev, "problem initializing queue\n");
+ goto out_error_queue_alloc;
+ }
+
+ status = bfin_sport_spi_start_queue(drv_data);
+ if (status) {
+ dev_err(dev, "problem starting queue\n");
+ goto out_error_queue_alloc;
+ }
+
+ status = request_irq(drv_data->err_irq, sport_err_handler,
+ 0, "sport_spi_err", drv_data);
+ if (status) {
+ dev_err(dev, "unable to request sport err irq\n");
+ goto out_error_irq;
+ }
+
+ status = peripheral_request_list(drv_data->pin_req, DRV_NAME);
+ if (status) {
+ dev_err(dev, "requesting peripherals failed\n");
+ goto out_error_peripheral;
+ }
+
+ /* Register with the SPI framework */
+ platform_set_drvdata(pdev, drv_data);
+ status = spi_register_master(master);
+ if (status) {
+ dev_err(dev, "problem registering spi master\n");
+ goto out_error_master;
+ }
+
+ dev_info(dev, "%s, regs_base@%p\n", DRV_DESC, drv_data->regs);
+ return 0;
+
+ out_error_master:
+ peripheral_free_list(drv_data->pin_req);
+ out_error_peripheral:
+ free_irq(drv_data->err_irq, drv_data);
+ out_error_irq:
+ out_error_queue_alloc:
+ bfin_sport_spi_destroy_queue(drv_data);
+ out_error_get_ires:
+ iounmap(drv_data->regs);
+ out_error_ioremap:
+ out_error_get_res:
+ spi_master_put(master);
+
+ return status;
+}
+
+/* stop hardware and remove the driver */
+static int __devexit
+bfin_sport_spi_remove(struct platform_device *pdev)
+{
+ struct bfin_sport_spi_master_data *drv_data = platform_get_drvdata(pdev);
+ int status = 0;
+
+ if (!drv_data)
+ return 0;
+
+ /* Remove the queue */
+ status = bfin_sport_spi_destroy_queue(drv_data);
+ if (status)
+ return status;
+
+ /* Disable the SSP at the peripheral and SOC level */
+ bfin_sport_spi_disable(drv_data);
+
+ /* Disconnect from the SPI framework */
+ spi_unregister_master(drv_data->master);
+
+ peripheral_free_list(drv_data->pin_req);
+
+ /* Prevent double remove */
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int
+bfin_sport_spi_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct bfin_sport_spi_master_data *drv_data = platform_get_drvdata(pdev);
+ int status;
+
+ status = bfin_sport_spi_stop_queue(drv_data);
+ if (status)
+ return status;
+
+ /* stop hardware */
+ bfin_sport_spi_disable(drv_data);
+
+ return status;
+}
+
+static int
+bfin_sport_spi_resume(struct platform_device *pdev)
+{
+ struct bfin_sport_spi_master_data *drv_data = platform_get_drvdata(pdev);
+ int status;
+
+ /* Enable the SPI interface */
+ bfin_sport_spi_enable(drv_data);
+
+ /* Start the queue running */
+ status = bfin_sport_spi_start_queue(drv_data);
+ if (status)
+ dev_err(drv_data->dev, "problem resuming queue\n");
+
+ return status;
+}
+#else
+# define bfin_sport_spi_suspend NULL
+# define bfin_sport_spi_resume NULL
+#endif
+
+static struct platform_driver bfin_sport_spi_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = bfin_sport_spi_probe,
+ .remove = __devexit_p(bfin_sport_spi_remove),
+ .suspend = bfin_sport_spi_suspend,
+ .resume = bfin_sport_spi_resume,
+};
+
+static int __init bfin_sport_spi_init(void)
+{
+ return platform_driver_register(&bfin_sport_spi_driver);
+}
+module_init(bfin_sport_spi_init);
+
+static void __exit bfin_sport_spi_exit(void)
+{
+ platform_driver_unregister(&bfin_sport_spi_driver);
+}
+module_exit(bfin_sport_spi_exit);
diff --git a/drivers/spi/tle62x0.c b/drivers/spi/tle62x0.c
index a3938958147c..32a40876532f 100644
--- a/drivers/spi/tle62x0.c
+++ b/drivers/spi/tle62x0.c
@@ -283,7 +283,7 @@ static int __devinit tle62x0_probe(struct spi_device *spi)
return 0;
err_gpios:
- for (; ptr > 0; ptr--)
+ while (--ptr >= 0)
device_remove_file(&spi->dev, gpio_attrs[ptr]);
device_remove_file(&spi->dev, &dev_attr_status_show);
@@ -301,6 +301,7 @@ static int __devexit tle62x0_remove(struct spi_device *spi)
for (ptr = 0; ptr < st->nr_gpio; ptr++)
device_remove_file(&spi->dev, gpio_attrs[ptr]);
+ device_remove_file(&spi->dev, &dev_attr_status_show);
kfree(st);
return 0;
}
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index aed4e464d31c..dee2a2c909f5 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -31,7 +31,7 @@
#include <scsi/scsi_host.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_cmnd.h>
-#include <scsi/libsas.h> /* For TASK_ATTR_* */
+#include <scsi/scsi_tcq.h>
#include <target/target_core_base.h>
#include <target/target_core_transport.h>
@@ -95,17 +95,17 @@ static struct se_cmd *tcm_loop_allocate_core_cmd(
if (sc->device->tagged_supported) {
switch (sc->tag) {
case HEAD_OF_QUEUE_TAG:
- sam_task_attr = TASK_ATTR_HOQ;
+ sam_task_attr = MSG_HEAD_TAG;
break;
case ORDERED_QUEUE_TAG:
- sam_task_attr = TASK_ATTR_ORDERED;
+ sam_task_attr = MSG_ORDERED_TAG;
break;
default:
- sam_task_attr = TASK_ATTR_SIMPLE;
+ sam_task_attr = MSG_SIMPLE_TAG;
break;
}
} else
- sam_task_attr = TASK_ATTR_SIMPLE;
+ sam_task_attr = MSG_SIMPLE_TAG;
/*
* Initialize struct se_cmd descriptor from target_core_mod infrastructure
@@ -379,7 +379,7 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)
* Initialize struct se_cmd descriptor from target_core_mod infrastructure
*/
transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 0,
- DMA_NONE, TASK_ATTR_SIMPLE,
+ DMA_NONE, MSG_SIMPLE_TAG,
&tl_cmd->tl_sense_buf[0]);
/*
* Allocate the LUN_RESET TMR
@@ -939,18 +939,6 @@ static u16 tcm_loop_get_fabric_sense_len(void)
return 0;
}
-static u64 tcm_loop_pack_lun(unsigned int lun)
-{
- u64 result;
-
- /* LSB of lun into byte 1 big-endian */
- result = ((lun & 0xff) << 8);
- /* use flat space addressing method */
- result |= 0x40 | ((lun >> 8) & 0x3f);
-
- return cpu_to_le64(result);
-}
-
static char *tcm_loop_dump_proto_id(struct tcm_loop_hba *tl_hba)
{
switch (tl_hba->tl_proto_id) {
@@ -1481,7 +1469,6 @@ static int tcm_loop_register_configfs(void)
fabric->tf_ops.set_fabric_sense_len = &tcm_loop_set_fabric_sense_len;
fabric->tf_ops.get_fabric_sense_len = &tcm_loop_get_fabric_sense_len;
fabric->tf_ops.is_state_remove = &tcm_loop_is_state_remove;
- fabric->tf_ops.pack_lun = &tcm_loop_pack_lun;
tf_cg = &fabric->tf_group;
/*
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index a5f44a6e6e1d..ee6fad979b50 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -497,10 +497,6 @@ static int target_fabric_tf_ops_check(
printk(KERN_ERR "Missing tfo->is_state_remove()\n");
return -EINVAL;
}
- if (!(tfo->pack_lun)) {
- printk(KERN_ERR "Missing tfo->pack_lun()\n");
- return -EINVAL;
- }
/*
* We at least require tfo->fabric_make_wwn(), tfo->fabric_drop_wwn()
* tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index d25e20829012..8407f9ca2b31 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -38,6 +38,7 @@
#include <net/sock.h>
#include <net/tcp.h>
#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
#include <target/target_core_base.h>
#include <target/target_core_device.h>
@@ -150,13 +151,13 @@ out:
{
struct se_device *dev = se_lun->lun_se_dev;
- spin_lock(&dev->stats_lock);
+ spin_lock_irq(&dev->stats_lock);
dev->num_cmds++;
if (se_cmd->data_direction == DMA_TO_DEVICE)
dev->write_bytes += se_cmd->data_length;
else if (se_cmd->data_direction == DMA_FROM_DEVICE)
dev->read_bytes += se_cmd->data_length;
- spin_unlock(&dev->stats_lock);
+ spin_unlock_irq(&dev->stats_lock);
}
/*
@@ -658,8 +659,7 @@ int transport_core_report_lun_response(struct se_cmd *se_cmd)
struct se_session *se_sess = SE_SESS(se_cmd);
struct se_task *se_task;
unsigned char *buf = (unsigned char *)T_TASK(se_cmd)->t_task_buf;
- u32 cdb_offset = 0, lun_count = 0, offset = 8;
- u64 i, lun;
+ u32 cdb_offset = 0, lun_count = 0, offset = 8, i;
list_for_each_entry(se_task, &T_TASK(se_cmd)->t_task_list, t_list)
break;
@@ -675,15 +675,7 @@ int transport_core_report_lun_response(struct se_cmd *se_cmd)
* a $FABRIC_MOD. In that case, report LUN=0 only.
*/
if (!(se_sess)) {
- lun = 0;
- buf[offset++] = ((lun >> 56) & 0xff);
- buf[offset++] = ((lun >> 48) & 0xff);
- buf[offset++] = ((lun >> 40) & 0xff);
- buf[offset++] = ((lun >> 32) & 0xff);
- buf[offset++] = ((lun >> 24) & 0xff);
- buf[offset++] = ((lun >> 16) & 0xff);
- buf[offset++] = ((lun >> 8) & 0xff);
- buf[offset++] = (lun & 0xff);
+ int_to_scsilun(0, (struct scsi_lun *)&buf[offset]);
lun_count = 1;
goto done;
}
@@ -703,15 +695,8 @@ int transport_core_report_lun_response(struct se_cmd *se_cmd)
if ((cdb_offset + 8) >= se_cmd->data_length)
continue;
- lun = cpu_to_be64(CMD_TFO(se_cmd)->pack_lun(deve->mapped_lun));
- buf[offset++] = ((lun >> 56) & 0xff);
- buf[offset++] = ((lun >> 48) & 0xff);
- buf[offset++] = ((lun >> 40) & 0xff);
- buf[offset++] = ((lun >> 32) & 0xff);
- buf[offset++] = ((lun >> 24) & 0xff);
- buf[offset++] = ((lun >> 16) & 0xff);
- buf[offset++] = ((lun >> 8) & 0xff);
- buf[offset++] = (lun & 0xff);
+ int_to_scsilun(deve->mapped_lun, (struct scsi_lun *)&buf[offset]);
+ offset += 8;
cdb_offset += 8;
}
spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 7ff6a35f26ac..331d423fd0e0 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -41,7 +41,7 @@
#include <scsi/scsi_device.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_host.h>
-#include <scsi/libsas.h> /* For TASK_ATTR_* */
+#include <scsi/scsi_tcq.h>
#include <target/target_core_base.h>
#include <target/target_core_device.h>
@@ -911,7 +911,7 @@ static int pscsi_do_task(struct se_task *task)
* descriptor
*/
blk_execute_rq_nowait(pdv->pdv_sd->request_queue, NULL, pt->pscsi_req,
- (task->task_se_cmd->sam_task_attr == TASK_ATTR_HOQ),
+ (task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG),
pscsi_req_done);
return PYX_TRANSPORT_SENT_TO_TRANSPORT;
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 4a109835e420..59b8b9c5ad72 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -55,7 +55,8 @@ struct se_tmr_req *core_tmr_alloc_req(
{
struct se_tmr_req *tmr;
- tmr = kmem_cache_zalloc(se_tmr_req_cache, GFP_KERNEL);
+ tmr = kmem_cache_zalloc(se_tmr_req_cache, (in_interrupt()) ?
+ GFP_ATOMIC : GFP_KERNEL);
if (!(tmr)) {
printk(KERN_ERR "Unable to allocate struct se_tmr_req\n");
return ERR_PTR(-ENOMEM);
@@ -398,9 +399,9 @@ int core_tmr_lun_reset(
printk(KERN_INFO "LUN_RESET: SCSI-2 Released reservation\n");
}
- spin_lock(&dev->stats_lock);
+ spin_lock_irq(&dev->stats_lock);
dev->num_resets++;
- spin_unlock(&dev->stats_lock);
+ spin_unlock_irq(&dev->stats_lock);
DEBUG_LR("LUN_RESET: %s for [%s] Complete\n",
(preempt_and_abort_list) ? "Preempt" : "TMR",
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index b9d3501bdd91..4dafeb8b5638 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -42,7 +42,7 @@
#include <net/tcp.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
-#include <scsi/libsas.h> /* For TASK_ATTR_* */
+#include <scsi/scsi_tcq.h>
#include <target/target_core_base.h>
#include <target/target_core_device.h>
@@ -762,7 +762,6 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd)
transport_all_task_dev_remove_state(cmd);
spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
- transport_free_dev_tasks(cmd);
check_lun:
spin_lock_irqsave(&lun->lun_cmd_lock, flags);
@@ -1075,7 +1074,7 @@ static inline int transport_add_task_check_sam_attr(
* head of the struct se_device->execute_task_list, and task_prev
* after that for each subsequent task
*/
- if (task->task_se_cmd->sam_task_attr == TASK_ATTR_HOQ) {
+ if (task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG) {
list_add(&task->t_execute_list,
(task_prev != NULL) ?
&task_prev->t_execute_list :
@@ -1195,6 +1194,7 @@ transport_get_task_from_execute_queue(struct se_device *dev)
break;
list_del(&task->t_execute_list);
+ atomic_set(&task->task_execute_queue, 0);
atomic_dec(&dev->execute_tasks);
return task;
@@ -1210,8 +1210,14 @@ void transport_remove_task_from_execute_queue(
{
unsigned long flags;
+ if (atomic_read(&task->task_execute_queue) == 0) {
+ dump_stack();
+ return;
+ }
+
spin_lock_irqsave(&dev->execute_task_lock, flags);
list_del(&task->t_execute_list);
+ atomic_set(&task->task_execute_queue, 0);
atomic_dec(&dev->execute_tasks);
spin_unlock_irqrestore(&dev->execute_task_lock, flags);
}
@@ -1867,7 +1873,7 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd)
if (SE_DEV(cmd)->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
return 0;
- if (cmd->sam_task_attr == TASK_ATTR_ACA) {
+ if (cmd->sam_task_attr == MSG_ACA_TAG) {
DEBUG_STA("SAM Task Attribute ACA"
" emulation is not supported\n");
return -1;
@@ -2058,6 +2064,13 @@ int transport_generic_handle_tmr(
}
EXPORT_SYMBOL(transport_generic_handle_tmr);
+void transport_generic_free_cmd_intr(
+ struct se_cmd *cmd)
+{
+ transport_add_cmd_to_queue(cmd, TRANSPORT_FREE_CMD_INTR);
+}
+EXPORT_SYMBOL(transport_generic_free_cmd_intr);
+
static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
{
struct se_task *task, *task_tmp;
@@ -2504,7 +2517,7 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd)
* Check for the existence of HEAD_OF_QUEUE, and if true return 1
* to allow the passed struct se_cmd list of tasks to the front of the list.
*/
- if (cmd->sam_task_attr == TASK_ATTR_HOQ) {
+ if (cmd->sam_task_attr == MSG_HEAD_TAG) {
atomic_inc(&SE_DEV(cmd)->dev_hoq_count);
smp_mb__after_atomic_inc();
DEBUG_STA("Added HEAD_OF_QUEUE for CDB:"
@@ -2512,7 +2525,7 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd)
T_TASK(cmd)->t_task_cdb[0],
cmd->se_ordered_id);
return 1;
- } else if (cmd->sam_task_attr == TASK_ATTR_ORDERED) {
+ } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
spin_lock(&SE_DEV(cmd)->ordered_cmd_lock);
list_add_tail(&cmd->se_ordered_list,
&SE_DEV(cmd)->ordered_cmd_list);
@@ -3411,7 +3424,7 @@ static int transport_generic_cmd_sequencer(
* See spc4r17 section 5.3
*/
if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
- cmd->sam_task_attr = TASK_ATTR_HOQ;
+ cmd->sam_task_attr = MSG_HEAD_TAG;
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
break;
case READ_BUFFER:
@@ -3619,7 +3632,7 @@ static int transport_generic_cmd_sequencer(
* See spc4r17 section 5.3
*/
if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
- cmd->sam_task_attr = TASK_ATTR_HOQ;
+ cmd->sam_task_attr = MSG_HEAD_TAG;
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
break;
default:
@@ -3777,21 +3790,21 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
struct se_cmd *cmd_p, *cmd_tmp;
int new_active_tasks = 0;
- if (cmd->sam_task_attr == TASK_ATTR_SIMPLE) {
+ if (cmd->sam_task_attr == MSG_SIMPLE_TAG) {
atomic_dec(&dev->simple_cmds);
smp_mb__after_atomic_dec();
dev->dev_cur_ordered_id++;
DEBUG_STA("Incremented dev->dev_cur_ordered_id: %u for"
" SIMPLE: %u\n", dev->dev_cur_ordered_id,
cmd->se_ordered_id);
- } else if (cmd->sam_task_attr == TASK_ATTR_HOQ) {
+ } else if (cmd->sam_task_attr == MSG_HEAD_TAG) {
atomic_dec(&dev->dev_hoq_count);
smp_mb__after_atomic_dec();
dev->dev_cur_ordered_id++;
DEBUG_STA("Incremented dev_cur_ordered_id: %u for"
" HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
cmd->se_ordered_id);
- } else if (cmd->sam_task_attr == TASK_ATTR_ORDERED) {
+ } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
spin_lock(&dev->ordered_cmd_lock);
list_del(&cmd->se_ordered_list);
atomic_dec(&dev->dev_ordered_sync);
@@ -3824,7 +3837,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
new_active_tasks++;
spin_lock(&dev->delayed_cmd_lock);
- if (cmd_p->sam_task_attr == TASK_ATTR_ORDERED)
+ if (cmd_p->sam_task_attr == MSG_ORDERED_TAG)
break;
}
spin_unlock(&dev->delayed_cmd_lock);
@@ -4776,18 +4789,20 @@ void transport_do_task_sg_chain(struct se_cmd *cmd)
sg_end_cur->page_link &= ~0x02;
sg_chain(sg_head, task_sg_num, sg_head_cur);
- sg_count += (task->task_sg_num + 1);
- } else
sg_count += task->task_sg_num;
+ task_sg_num = (task->task_sg_num + 1);
+ } else {
+ sg_chain(sg_head, task_sg_num, sg_head_cur);
+ sg_count += task->task_sg_num;
+ task_sg_num = task->task_sg_num;
+ }
sg_head = sg_head_cur;
sg_link = sg_link_cur;
- task_sg_num = task->task_sg_num;
continue;
}
sg_head = sg_first = &task->task_sg[0];
sg_link = &task->task_sg[task->task_sg_num];
- task_sg_num = task->task_sg_num;
/*
* Check for single task..
*/
@@ -4798,9 +4813,12 @@ void transport_do_task_sg_chain(struct se_cmd *cmd)
*/
sg_end = &task->task_sg[task->task_sg_num - 1];
sg_end->page_link &= ~0x02;
- sg_count += (task->task_sg_num + 1);
- } else
sg_count += task->task_sg_num;
+ task_sg_num = (task->task_sg_num + 1);
+ } else {
+ sg_count += task->task_sg_num;
+ task_sg_num = task->task_sg_num;
+ }
}
/*
* Setup the starting pointer and total t_tasks_sg_linked_no including
@@ -4809,21 +4827,20 @@ void transport_do_task_sg_chain(struct se_cmd *cmd)
T_TASK(cmd)->t_tasks_sg_chained = sg_first;
T_TASK(cmd)->t_tasks_sg_chained_no = sg_count;
- DEBUG_CMD_M("Setup T_TASK(cmd)->t_tasks_sg_chained: %p and"
- " t_tasks_sg_chained_no: %u\n", T_TASK(cmd)->t_tasks_sg_chained,
+ DEBUG_CMD_M("Setup cmd: %p T_TASK(cmd)->t_tasks_sg_chained: %p and"
+ " t_tasks_sg_chained_no: %u\n", cmd, T_TASK(cmd)->t_tasks_sg_chained,
T_TASK(cmd)->t_tasks_sg_chained_no);
for_each_sg(T_TASK(cmd)->t_tasks_sg_chained, sg,
T_TASK(cmd)->t_tasks_sg_chained_no, i) {
- DEBUG_CMD_M("SG: %p page: %p length: %d offset: %d\n",
- sg, sg_page(sg), sg->length, sg->offset);
+ DEBUG_CMD_M("SG[%d]: %p page: %p length: %d offset: %d, magic: 0x%08x\n",
+ i, sg, sg_page(sg), sg->length, sg->offset, sg->sg_magic);
if (sg_is_chain(sg))
DEBUG_CMD_M("SG: %p sg_is_chain=1\n", sg);
if (sg_is_last(sg))
DEBUG_CMD_M("SG: %p sg_is_last=1\n", sg);
}
-
}
EXPORT_SYMBOL(transport_do_task_sg_chain);
@@ -5297,6 +5314,8 @@ void transport_generic_free_cmd(
if (wait_for_tasks && cmd->transport_wait_for_tasks)
cmd->transport_wait_for_tasks(cmd, 0, 0);
+ transport_free_dev_tasks(cmd);
+
transport_generic_remove(cmd, release_to_pool,
session_reinstatement);
}
@@ -6132,6 +6151,9 @@ get_cmd:
case TRANSPORT_REMOVE:
transport_generic_remove(cmd, 1, 0);
break;
+ case TRANSPORT_FREE_CMD_INTR:
+ transport_generic_free_cmd(cmd, 0, 1, 0);
+ break;
case TRANSPORT_PROCESS_TMR:
transport_generic_do_tmr(cmd);
break;
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index 49e51778f733..c056a1132ae1 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -35,6 +35,7 @@
#include <scsi/scsi_host.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_tcq.h>
#include <scsi/libfc.h>
#include <scsi/fc_encode.h>
@@ -592,8 +593,25 @@ static void ft_send_cmd(struct ft_cmd *cmd)
case FCP_CFL_WRDATA | FCP_CFL_RDDATA:
goto err; /* TBD not supported by tcm_fc yet */
}
+ /*
+ * Locate the SAM Task Attr from fc_pri_ta
+ */
+ switch (fcp->fc_pri_ta & FCP_PTA_MASK) {
+ case FCP_PTA_HEADQ:
+ task_attr = MSG_HEAD_TAG;
+ break;
+ case FCP_PTA_ORDERED:
+ task_attr = MSG_ORDERED_TAG;
+ break;
+ case FCP_PTA_ACA:
+ task_attr = MSG_ACA_TAG;
+ break;
+ case FCP_PTA_SIMPLE: /* Fallthrough */
+ default:
+ task_attr = MSG_SIMPLE_TAG;
+ }
+
- /* FCP_PTA_ maps 1:1 to TASK_ATTR_ */
task_attr = fcp->fc_pri_ta & FCP_PTA_MASK;
data_len = ntohl(fcp->fc_dl);
cmd->cdb = fcp->fc_cdb;
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
index fcdbbffe88cc..84e868c255dd 100644
--- a/drivers/target/tcm_fc/tfc_conf.c
+++ b/drivers/target/tcm_fc/tfc_conf.c
@@ -519,13 +519,6 @@ static u32 ft_tpg_get_inst_index(struct se_portal_group *se_tpg)
return tpg->index;
}
-static u64 ft_pack_lun(unsigned int index)
-{
- WARN_ON(index >= 256);
- /* Caller wants this byte-swapped */
- return cpu_to_le64((index & 0xff) << 8);
-}
-
static struct target_core_fabric_ops ft_fabric_ops = {
.get_fabric_name = ft_get_fabric_name,
.get_fabric_proto_ident = fc_get_fabric_proto_ident,
@@ -564,7 +557,6 @@ static struct target_core_fabric_ops ft_fabric_ops = {
.get_fabric_sense_len = ft_get_fabric_sense_len,
.set_fabric_sense_len = ft_set_fabric_sense_len,
.is_state_remove = ft_is_state_remove,
- .pack_lun = ft_pack_lun,
/*
* Setup function pointers for generic logic in
* target_core_fabric_configfs.c
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 652bdac8ce8e..6d5d6e679fc7 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -1420,7 +1420,7 @@ static void __devinit atmel_init_port(struct atmel_uart_port *atmel_port,
port->flags = UPF_BOOT_AUTOCONF;
port->ops = &atmel_pops;
port->fifosize = 1;
- port->line = pdev->id;
+ port->line = data->num;
port->dev = &pdev->dev;
port->mapbase = pdev->resource[0].start;
port->irq = pdev->resource[1].start;
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index 660b80a75cac..1102ce65a3a9 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -348,11 +348,50 @@ static int ehci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
return rc;
}
+static bool usb_is_intel_switchable_ehci(struct pci_dev *pdev)
+{
+ return pdev->class == PCI_CLASS_SERIAL_USB_EHCI &&
+ pdev->vendor == PCI_VENDOR_ID_INTEL &&
+ pdev->device == 0x1E26;
+}
+
+static void ehci_enable_xhci_companion(void)
+{
+ struct pci_dev *companion = NULL;
+
+ /* The xHCI and EHCI controllers are not on the same PCI slot */
+ for_each_pci_dev(companion) {
+ if (!usb_is_intel_switchable_xhci(companion))
+ continue;
+ usb_enable_xhci_ports(companion);
+ return;
+ }
+}
+
static int ehci_pci_resume(struct usb_hcd *hcd, bool hibernated)
{
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
+ /* The BIOS on systems with the Intel Panther Point chipset may or may
+ * not support xHCI natively. That means that during system resume, it
+ * may switch the ports back to EHCI so that users can use their
+ * keyboard to select a kernel from GRUB after resume from hibernate.
+ *
+ * The BIOS is supposed to remember whether the OS had xHCI ports
+ * enabled before resume, and switch the ports back to xHCI when the
+ * BIOS/OS semaphore is written, but we all know we can't trust BIOS
+ * writers.
+ *
+ * Unconditionally switch the ports back to xHCI after a system resume.
+ * We can't tell whether the EHCI or xHCI controller will be resumed
+ * first, so we have to do the port switchover in both drivers. Writing
+ * a '1' to the port switchover registers should have no effect if the
+ * port was already switched over.
+ */
+ if (usb_is_intel_switchable_ehci(pdev))
+ ehci_enable_xhci_companion();
+
// maybe restore FLADJ
if (time_before(jiffies, ehci->next_statechange))
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index f16c59d5f487..fd930618c28f 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -69,6 +69,9 @@
#define NB_PIF0_PWRDOWN_0 0x01100012
#define NB_PIF0_PWRDOWN_1 0x01100013
+#define USB_INTEL_XUSB2PR 0xD0
+#define USB_INTEL_USB3_PSSEN 0xD8
+
static struct amd_chipset_info {
struct pci_dev *nb_dev;
struct pci_dev *smbus_dev;
@@ -673,6 +676,64 @@ static int handshake(void __iomem *ptr, u32 mask, u32 done,
return -ETIMEDOUT;
}
+bool usb_is_intel_switchable_xhci(struct pci_dev *pdev)
+{
+ return pdev->class == PCI_CLASS_SERIAL_USB_XHCI &&
+ pdev->vendor == PCI_VENDOR_ID_INTEL &&
+ pdev->device == PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI;
+}
+EXPORT_SYMBOL_GPL(usb_is_intel_switchable_xhci);
+
+/*
+ * Intel's Panther Point chipset has two host controllers (EHCI and xHCI) that
+ * share some number of ports. These ports can be switched between either
+ * controller. Not all of the ports under the EHCI host controller may be
+ * switchable.
+ *
+ * The ports should be switched over to xHCI before PCI probes for any device
+ * start. This avoids active devices under EHCI being disconnected during the
+ * port switchover, which could cause loss of data on USB storage devices, or
+ * failed boot when the root file system is on a USB mass storage device and is
+ * enumerated under EHCI first.
+ *
+ * We write into the xHC's PCI configuration space in some Intel-specific
+ * registers to switch the ports over. The USB 3.0 terminations and the USB
+ * 2.0 data wires are switched separately. We want to enable the SuperSpeed
+ * terminations before switching the USB 2.0 wires over, so that USB 3.0
+ * devices connect at SuperSpeed, rather than at USB 2.0 speeds.
+ */
+void usb_enable_xhci_ports(struct pci_dev *xhci_pdev)
+{
+ u32 ports_available;
+
+ ports_available = 0xffffffff;
+ /* Write USB3_PSSEN, the USB 3.0 Port SuperSpeed Enable
+ * Register, to turn on SuperSpeed terminations for all
+ * available ports.
+ */
+ pci_write_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN,
+ cpu_to_le32(ports_available));
+
+ pci_read_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN,
+ &ports_available);
+ dev_dbg(&xhci_pdev->dev, "USB 3.0 ports that are now enabled "
+ "under xHCI: 0x%x\n", ports_available);
+
+ ports_available = 0xffffffff;
+ /* Write XUSB2PR, the xHC USB 2.0 Port Routing Register, to
+ * switch the USB 2.0 power and data lines over to the xHCI
+ * host.
+ */
+ pci_write_config_dword(xhci_pdev, USB_INTEL_XUSB2PR,
+ cpu_to_le32(ports_available));
+
+ pci_read_config_dword(xhci_pdev, USB_INTEL_XUSB2PR,
+ &ports_available);
+ dev_dbg(&xhci_pdev->dev, "USB 2.0 ports that are now switched over "
+ "to xHCI: 0x%x\n", ports_available);
+}
+EXPORT_SYMBOL_GPL(usb_enable_xhci_ports);
+
/**
* PCI Quirks for xHCI.
*
@@ -732,6 +793,8 @@ static void __devinit quirk_usb_handoff_xhci(struct pci_dev *pdev)
writel(XHCI_LEGACY_DISABLE_SMI,
base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET);
+ if (usb_is_intel_switchable_xhci(pdev))
+ usb_enable_xhci_ports(pdev);
hc_init:
op_reg_base = base + XHCI_HC_LENGTH(readl(base));
diff --git a/drivers/usb/host/pci-quirks.h b/drivers/usb/host/pci-quirks.h
index 6ae9f78e9938..b1002a8ef96f 100644
--- a/drivers/usb/host/pci-quirks.h
+++ b/drivers/usb/host/pci-quirks.h
@@ -8,6 +8,8 @@ int usb_amd_find_chipset_info(void);
void usb_amd_dev_put(void);
void usb_amd_quirk_pll_disable(void);
void usb_amd_quirk_pll_enable(void);
+bool usb_is_intel_switchable_xhci(struct pci_dev *pdev);
+void usb_enable_xhci_ports(struct pci_dev *xhci_pdev);
#else
static inline void usb_amd_quirk_pll_disable(void) {}
static inline void usb_amd_quirk_pll_enable(void) {}
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index cbc4d491e626..c408e9f6a707 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -118,6 +118,12 @@ static int xhci_pci_setup(struct usb_hcd *hcd)
/* AMD PLL quirk */
if (pdev->vendor == PCI_VENDOR_ID_AMD && usb_amd_find_chipset_info())
xhci->quirks |= XHCI_AMD_PLL_FIX;
+ if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+ pdev->device == PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI) {
+ xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
+ xhci->quirks |= XHCI_EP_LIMIT_QUIRK;
+ xhci->limit_active_eps = 64;
+ }
/* Make sure the HC is halted. */
retval = xhci_halt(xhci);
@@ -242,8 +248,28 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
int retval = 0;
+ /* The BIOS on systems with the Intel Panther Point chipset may or may
+ * not support xHCI natively. That means that during system resume, it
+ * may switch the ports back to EHCI so that users can use their
+ * keyboard to select a kernel from GRUB after resume from hibernate.
+ *
+ * The BIOS is supposed to remember whether the OS had xHCI ports
+ * enabled before resume, and switch the ports back to xHCI when the
+ * BIOS/OS semaphore is written, but we all know we can't trust BIOS
+ * writers.
+ *
+ * Unconditionally switch the ports back to xHCI after a system resume.
+ * We can't tell whether the EHCI or xHCI controller will be resumed
+ * first, so we have to do the port switchover in both drivers. Writing
+ * a '1' to the port switchover registers should have no effect if the
+ * port was already switched over.
+ */
+ if (usb_is_intel_switchable_xhci(pdev))
+ usb_enable_xhci_ports(pdev);
+
retval = xhci_resume(xhci, hibernated);
return retval;
}
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 237a765f8d18..cc1485bfed38 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -167,12 +167,6 @@ static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer
next = ring->dequeue;
}
addr = (unsigned long long) xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue);
- if (ring == xhci->event_ring)
- xhci_dbg(xhci, "Event ring deq = 0x%llx (DMA)\n", addr);
- else if (ring == xhci->cmd_ring)
- xhci_dbg(xhci, "Command ring deq = 0x%llx (DMA)\n", addr);
- else
- xhci_dbg(xhci, "Ring deq = 0x%llx (DMA)\n", addr);
}
/*
@@ -248,12 +242,6 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
next = ring->enqueue;
}
addr = (unsigned long long) xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
- if (ring == xhci->event_ring)
- xhci_dbg(xhci, "Event ring enq = 0x%llx (DMA)\n", addr);
- else if (ring == xhci->cmd_ring)
- xhci_dbg(xhci, "Command ring enq = 0x%llx (DMA)\n", addr);
- else
- xhci_dbg(xhci, "Ring enq = 0x%llx (DMA)\n", addr);
}
/*
@@ -636,13 +624,11 @@ static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
}
}
usb_hcd_unlink_urb_from_ep(hcd, urb);
- xhci_dbg(xhci, "Giveback %s URB %p\n", adjective, urb);
spin_unlock(&xhci->lock);
usb_hcd_giveback_urb(hcd, urb, status);
xhci_urb_free_priv(xhci, urb_priv);
spin_lock(&xhci->lock);
- xhci_dbg(xhci, "%s URB given back\n", adjective);
}
}
@@ -692,6 +678,8 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
if (list_empty(&ep->cancelled_td_list)) {
xhci_stop_watchdog_timer_in_irq(xhci, ep);
+ ep->stopped_td = NULL;
+ ep->stopped_trb = NULL;
ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
return;
}
@@ -1093,8 +1081,13 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
complete(&xhci->addr_dev);
break;
case TRB_TYPE(TRB_DISABLE_SLOT):
- if (xhci->devs[slot_id])
+ if (xhci->devs[slot_id]) {
+ if (xhci->quirks & XHCI_EP_LIMIT_QUIRK)
+ /* Delete default control endpoint resources */
+ xhci_free_device_endpoint_resources(xhci,
+ xhci->devs[slot_id], true);
xhci_free_virt_device(xhci, slot_id);
+ }
break;
case TRB_TYPE(TRB_CONFIG_EP):
virt_dev = xhci->devs[slot_id];
@@ -1630,7 +1623,6 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
"without IOC set??\n");
*status = -ESHUTDOWN;
} else {
- xhci_dbg(xhci, "Successful control transfer!\n");
*status = 0;
}
break;
@@ -1727,7 +1719,6 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
switch (trb_comp_code) {
case COMP_SUCCESS:
frame->status = 0;
- xhci_dbg(xhci, "Successful isoc transfer!\n");
break;
case COMP_SHORT_TX:
frame->status = td->urb->transfer_flags & URB_SHORT_NOT_OK ?
@@ -1837,12 +1828,6 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
else
*status = 0;
} else {
- if (usb_endpoint_xfer_bulk(&td->urb->ep->desc))
- xhci_dbg(xhci, "Successful bulk "
- "transfer!\n");
- else
- xhci_dbg(xhci, "Successful interrupt "
- "transfer!\n");
*status = 0;
}
break;
@@ -1856,11 +1841,12 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
/* Others already handled above */
break;
}
- xhci_dbg(xhci, "ep %#x - asked for %d bytes, "
- "%d bytes untransferred\n",
- td->urb->ep->desc.bEndpointAddress,
- td->urb->transfer_buffer_length,
- TRB_LEN(le32_to_cpu(event->transfer_len)));
+ if (trb_comp_code == COMP_SHORT_TX)
+ xhci_dbg(xhci, "ep %#x - asked for %d bytes, "
+ "%d bytes untransferred\n",
+ td->urb->ep->desc.bEndpointAddress,
+ td->urb->transfer_buffer_length,
+ TRB_LEN(le32_to_cpu(event->transfer_len)));
/* Fast path - was this the last TRB in the TD for this URB? */
if (event_trb == td->last_trb) {
if (TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
@@ -1954,7 +1940,6 @@ static int handle_tx_event(struct xhci_hcd *xhci,
/* Endpoint ID is 1 based, our index is zero based */
ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
- xhci_dbg(xhci, "%s - ep index = %d\n", __func__, ep_index);
ep = &xdev->eps[ep_index];
ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
@@ -2081,6 +2066,16 @@ static int handle_tx_event(struct xhci_hcd *xhci,
if (!event_seg) {
if (!ep->skip ||
!usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
+ /* Some host controllers give a spurious
+ * successful event after a short transfer.
+ * Ignore it.
+ */
+ if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) &&
+ ep_ring->last_td_was_short) {
+ ep_ring->last_td_was_short = false;
+ ret = 0;
+ goto cleanup;
+ }
/* HC is busted, give up! */
xhci_err(xhci,
"ERROR Transfer event TRB DMA ptr not "
@@ -2091,6 +2086,10 @@ static int handle_tx_event(struct xhci_hcd *xhci,
ret = skip_isoc_td(xhci, td, event, ep, &status);
goto cleanup;
}
+ if (trb_comp_code == COMP_SHORT_TX)
+ ep_ring->last_td_was_short = true;
+ else
+ ep_ring->last_td_was_short = false;
if (ep->skip) {
xhci_dbg(xhci, "Found td. Clear skip flag.\n");
@@ -2149,9 +2148,15 @@ cleanup:
xhci_urb_free_priv(xhci, urb_priv);
usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
- xhci_dbg(xhci, "Giveback URB %p, len = %d, "
- "status = %d\n",
- urb, urb->actual_length, status);
+ if ((urb->actual_length != urb->transfer_buffer_length &&
+ (urb->transfer_flags &
+ URB_SHORT_NOT_OK)) ||
+ status != 0)
+ xhci_dbg(xhci, "Giveback URB %p, len = %d, "
+ "expected = %x, status = %d\n",
+ urb, urb->actual_length,
+ urb->transfer_buffer_length,
+ status);
spin_unlock(&xhci->lock);
usb_hcd_giveback_urb(bus_to_hcd(urb->dev->bus), urb, status);
spin_lock(&xhci->lock);
@@ -2180,7 +2185,6 @@ static int xhci_handle_event(struct xhci_hcd *xhci)
int update_ptrs = 1;
int ret;
- xhci_dbg(xhci, "In %s\n", __func__);
if (!xhci->event_ring || !xhci->event_ring->dequeue) {
xhci->error_bitmask |= 1 << 1;
return 0;
@@ -2193,7 +2197,6 @@ static int xhci_handle_event(struct xhci_hcd *xhci)
xhci->error_bitmask |= 1 << 2;
return 0;
}
- xhci_dbg(xhci, "%s - OS owns TRB\n", __func__);
/*
* Barrier between reading the TRB_CYCLE (valid) flag above and any
@@ -2203,20 +2206,14 @@ static int xhci_handle_event(struct xhci_hcd *xhci)
/* FIXME: Handle more event types. */
switch ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK)) {
case TRB_TYPE(TRB_COMPLETION):
- xhci_dbg(xhci, "%s - calling handle_cmd_completion\n", __func__);
handle_cmd_completion(xhci, &event->event_cmd);
- xhci_dbg(xhci, "%s - returned from handle_cmd_completion\n", __func__);
break;
case TRB_TYPE(TRB_PORT_STATUS):
- xhci_dbg(xhci, "%s - calling handle_port_status\n", __func__);
handle_port_status(xhci, event);
- xhci_dbg(xhci, "%s - returned from handle_port_status\n", __func__);
update_ptrs = 0;
break;
case TRB_TYPE(TRB_TRANSFER):
- xhci_dbg(xhci, "%s - calling handle_tx_event\n", __func__);
ret = handle_tx_event(xhci, &event->trans_event);
- xhci_dbg(xhci, "%s - returned from handle_tx_event\n", __func__);
if (ret < 0)
xhci->error_bitmask |= 1 << 9;
else
@@ -2273,16 +2270,6 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
spin_unlock(&xhci->lock);
return IRQ_NONE;
}
- xhci_dbg(xhci, "op reg status = %08x\n", status);
- xhci_dbg(xhci, "Event ring dequeue ptr:\n");
- xhci_dbg(xhci, "@%llx %08x %08x %08x %08x\n",
- (unsigned long long)
- xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, trb),
- lower_32_bits(le64_to_cpu(trb->link.segment_ptr)),
- upper_32_bits(le64_to_cpu(trb->link.segment_ptr)),
- (unsigned int) le32_to_cpu(trb->link.intr_target),
- (unsigned int) le32_to_cpu(trb->link.control));
-
if (status & STS_FATAL) {
xhci_warn(xhci, "WARNING: Host System Error\n");
xhci_halt(xhci);
@@ -2397,7 +2384,6 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
{
/* Make sure the endpoint has been added to xHC schedule */
- xhci_dbg(xhci, "Endpoint state = 0x%x\n", ep_state);
switch (ep_state) {
case EP_STATE_DISABLED:
/*
@@ -2434,7 +2420,6 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
struct xhci_ring *ring = ep_ring;
union xhci_trb *next;
- xhci_dbg(xhci, "prepare_ring: pointing to link trb\n");
next = ring->enqueue;
while (last_trb(xhci, ring, ring->enq_seg, next)) {
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 8f2a56ece44f..d9660eb97eb9 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -1314,8 +1314,10 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
if (ret <= 0)
return ret;
xhci = hcd_to_xhci(hcd);
- xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
+ if (xhci->xhc_state & XHCI_STATE_DYING)
+ return -ENODEV;
+ xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
drop_flag = xhci_get_endpoint_flag(&ep->desc);
if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) {
xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n",
@@ -1401,6 +1403,8 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
return ret;
}
xhci = hcd_to_xhci(hcd);
+ if (xhci->xhc_state & XHCI_STATE_DYING)
+ return -ENODEV;
added_ctxs = xhci_get_endpoint_flag(&ep->desc);
last_ctx = xhci_last_valid_endpoint(added_ctxs);
@@ -1578,6 +1582,113 @@ static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
return ret;
}
+static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci,
+ struct xhci_container_ctx *in_ctx)
+{
+ struct xhci_input_control_ctx *ctrl_ctx;
+ u32 valid_add_flags;
+ u32 valid_drop_flags;
+
+ ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
+ /* Ignore the slot flag (bit 0), and the default control endpoint flag
+ * (bit 1). The default control endpoint is added during the Address
+ * Device command and is never removed until the slot is disabled.
+ */
+ valid_add_flags = ctrl_ctx->add_flags >> 2;
+ valid_drop_flags = ctrl_ctx->drop_flags >> 2;
+
+ /* Use hweight32 to count the number of ones in the add flags, or
+ * number of endpoints added. Don't count endpoints that are changed
+ * (both added and dropped).
+ */
+ return hweight32(valid_add_flags) -
+ hweight32(valid_add_flags & valid_drop_flags);
+}
+
+static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci,
+ struct xhci_container_ctx *in_ctx)
+{
+ struct xhci_input_control_ctx *ctrl_ctx;
+ u32 valid_add_flags;
+ u32 valid_drop_flags;
+
+ ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
+ valid_add_flags = ctrl_ctx->add_flags >> 2;
+ valid_drop_flags = ctrl_ctx->drop_flags >> 2;
+
+ return hweight32(valid_drop_flags) -
+ hweight32(valid_add_flags & valid_drop_flags);
+}
+
+/*
+ * We need to reserve the new number of endpoints before the configure endpoint
+ * command completes. We can't subtract the dropped endpoints from the number
+ * of active endpoints until the command completes because we can oversubscribe
+ * the host in this case:
+ *
+ * - the first configure endpoint command drops more endpoints than it adds
+ * - a second configure endpoint command that adds more endpoints is queued
+ * - the first configure endpoint command fails, so the config is unchanged
+ * - the second command may succeed, even though there isn't enough resources
+ *
+ * Must be called with xhci->lock held.
+ */
+static int xhci_reserve_host_resources(struct xhci_hcd *xhci,
+ struct xhci_container_ctx *in_ctx)
+{
+ u32 added_eps;
+
+ added_eps = xhci_count_num_new_endpoints(xhci, in_ctx);
+ if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) {
+ xhci_dbg(xhci, "Not enough ep ctxs: "
+ "%u active, need to add %u, limit is %u.\n",
+ xhci->num_active_eps, added_eps,
+ xhci->limit_active_eps);
+ return -ENOMEM;
+ }
+ xhci->num_active_eps += added_eps;
+ xhci_dbg(xhci, "Adding %u ep ctxs, %u now active.\n", added_eps,
+ xhci->num_active_eps);
+ return 0;
+}
+
+/*
+ * The configure endpoint was failed by the xHC for some other reason, so we
+ * need to revert the resources that failed configuration would have used.
+ *
+ * Must be called with xhci->lock held.
+ */
+static void xhci_free_host_resources(struct xhci_hcd *xhci,
+ struct xhci_container_ctx *in_ctx)
+{
+ u32 num_failed_eps;
+
+ num_failed_eps = xhci_count_num_new_endpoints(xhci, in_ctx);
+ xhci->num_active_eps -= num_failed_eps;
+ xhci_dbg(xhci, "Removing %u failed ep ctxs, %u now active.\n",
+ num_failed_eps,
+ xhci->num_active_eps);
+}
+
+/*
+ * Now that the command has completed, clean up the active endpoint count by
+ * subtracting out the endpoints that were dropped (but not changed).
+ *
+ * Must be called with xhci->lock held.
+ */
+static void xhci_finish_resource_reservation(struct xhci_hcd *xhci,
+ struct xhci_container_ctx *in_ctx)
+{
+ u32 num_dropped_eps;
+
+ num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, in_ctx);
+ xhci->num_active_eps -= num_dropped_eps;
+ if (num_dropped_eps)
+ xhci_dbg(xhci, "Removing %u dropped ep ctxs, %u now active.\n",
+ num_dropped_eps,
+ xhci->num_active_eps);
+}
+
/* Issue a configure endpoint command or evaluate context command
* and wait for it to finish.
*/
@@ -1598,6 +1709,15 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
virt_dev = xhci->devs[udev->slot_id];
if (command) {
in_ctx = command->in_ctx;
+ if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) &&
+ xhci_reserve_host_resources(xhci, in_ctx)) {
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ xhci_warn(xhci, "Not enough host resources, "
+ "active endpoint contexts = %u\n",
+ xhci->num_active_eps);
+ return -ENOMEM;
+ }
+
cmd_completion = command->completion;
cmd_status = &command->status;
command->command_trb = xhci->cmd_ring->enqueue;
@@ -1613,6 +1733,14 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
list_add_tail(&command->cmd_list, &virt_dev->cmd_list);
} else {
in_ctx = virt_dev->in_ctx;
+ if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) &&
+ xhci_reserve_host_resources(xhci, in_ctx)) {
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ xhci_warn(xhci, "Not enough host resources, "
+ "active endpoint contexts = %u\n",
+ xhci->num_active_eps);
+ return -ENOMEM;
+ }
cmd_completion = &virt_dev->cmd_completion;
cmd_status = &virt_dev->cmd_status;
}
@@ -1627,6 +1755,8 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
if (ret < 0) {
if (command)
list_del(&command->cmd_list);
+ if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
+ xhci_free_host_resources(xhci, in_ctx);
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_dbg(xhci, "FIXME allocate a new ring segment\n");
return -ENOMEM;
@@ -1649,8 +1779,22 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
}
if (!ctx_change)
- return xhci_configure_endpoint_result(xhci, udev, cmd_status);
- return xhci_evaluate_context_result(xhci, udev, cmd_status);
+ ret = xhci_configure_endpoint_result(xhci, udev, cmd_status);
+ else
+ ret = xhci_evaluate_context_result(xhci, udev, cmd_status);
+
+ if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
+ spin_lock_irqsave(&xhci->lock, flags);
+ /* If the command failed, remove the reserved resources.
+ * Otherwise, clean up the estimate to include dropped eps.
+ */
+ if (ret)
+ xhci_free_host_resources(xhci, in_ctx);
+ else
+ xhci_finish_resource_reservation(xhci, in_ctx);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ }
+ return ret;
}
/* Called after one or more calls to xhci_add_endpoint() or
@@ -1676,6 +1820,8 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
if (ret <= 0)
return ret;
xhci = hcd_to_xhci(hcd);
+ if (xhci->xhc_state & XHCI_STATE_DYING)
+ return -ENODEV;
xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
virt_dev = xhci->devs[udev->slot_id];
@@ -2266,6 +2412,34 @@ int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
}
/*
+ * Deletes endpoint resources for endpoints that were active before a Reset
+ * Device command, or a Disable Slot command. The Reset Device command leaves
+ * the control endpoint intact, whereas the Disable Slot command deletes it.
+ *
+ * Must be called with xhci->lock held.
+ */
+void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci,
+ struct xhci_virt_device *virt_dev, bool drop_control_ep)
+{
+ int i;
+ unsigned int num_dropped_eps = 0;
+ unsigned int drop_flags = 0;
+
+ for (i = (drop_control_ep ? 0 : 1); i < 31; i++) {
+ if (virt_dev->eps[i].ring) {
+ drop_flags |= 1 << i;
+ num_dropped_eps++;
+ }
+ }
+ xhci->num_active_eps -= num_dropped_eps;
+ if (num_dropped_eps)
+ xhci_dbg(xhci, "Dropped %u ep ctxs, flags = 0x%x, "
+ "%u now active.\n",
+ num_dropped_eps, drop_flags,
+ xhci->num_active_eps);
+}
+
+/*
* This submits a Reset Device Command, which will set the device state to 0,
* set the device address to 0, and disable all the endpoints except the default
* control endpoint. The USB core should come back and call
@@ -2406,6 +2580,14 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
goto command_cleanup;
}
+ /* Free up host controller endpoint resources */
+ if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
+ spin_lock_irqsave(&xhci->lock, flags);
+ /* Don't delete the default control endpoint resources */
+ xhci_free_device_endpoint_resources(xhci, virt_dev, false);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ }
+
/* Everything but endpoint 0 is disabled, so free or cache the rings. */
last_freed_endpoint = 1;
for (i = 1; i < 31; ++i) {
@@ -2479,6 +2661,27 @@ void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
}
/*
+ * Checks if we have enough host controller resources for the default control
+ * endpoint.
+ *
+ * Must be called with xhci->lock held.
+ */
+static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci)
+{
+ if (xhci->num_active_eps + 1 > xhci->limit_active_eps) {
+ xhci_dbg(xhci, "Not enough ep ctxs: "
+ "%u active, need to add 1, limit is %u.\n",
+ xhci->num_active_eps, xhci->limit_active_eps);
+ return -ENOMEM;
+ }
+ xhci->num_active_eps += 1;
+ xhci_dbg(xhci, "Adding 1 ep ctx, %u now active.\n",
+ xhci->num_active_eps);
+ return 0;
+}
+
+
+/*
* Returns 0 if the xHC ran out of device slots, the Enable Slot command
* timed out, or allocating memory failed. Returns 1 on success.
*/
@@ -2513,24 +2716,39 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
xhci_err(xhci, "Error while assigning device slot ID\n");
return 0;
}
- /* xhci_alloc_virt_device() does not touch rings; no need to lock.
- * Use GFP_NOIO, since this function can be called from
+
+ if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
+ spin_lock_irqsave(&xhci->lock, flags);
+ ret = xhci_reserve_host_control_ep_resources(xhci);
+ if (ret) {
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ xhci_warn(xhci, "Not enough host resources, "
+ "active endpoint contexts = %u\n",
+ xhci->num_active_eps);
+ goto disable_slot;
+ }
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ }
+ /* Use GFP_NOIO, since this function can be called from
* xhci_discover_or_reset_device(), which may be called as part of
* mass storage driver error handling.
*/
if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_NOIO)) {
- /* Disable slot, if we can do it without mem alloc */
xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
- spin_lock_irqsave(&xhci->lock, flags);
- if (!xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id))
- xhci_ring_cmd_db(xhci);
- spin_unlock_irqrestore(&xhci->lock, flags);
- return 0;
+ goto disable_slot;
}
udev->slot_id = xhci->slot_id;
/* Is this a LS or FS device under a HS hub? */
/* Hub or peripherial? */
return 1;
+
+disable_slot:
+ /* Disable slot, if we can do it without mem alloc */
+ spin_lock_irqsave(&xhci->lock, flags);
+ if (!xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id))
+ xhci_ring_cmd_db(xhci);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ return 0;
}
/*
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index e12db7cfb9bb..ac0196e7fcf1 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1123,6 +1123,7 @@ struct xhci_ring {
*/
u32 cycle_state;
unsigned int stream_id;
+ bool last_td_was_short;
};
struct xhci_erst_entry {
@@ -1290,6 +1291,19 @@ struct xhci_hcd {
#define XHCI_RESET_EP_QUIRK (1 << 1)
#define XHCI_NEC_HOST (1 << 2)
#define XHCI_AMD_PLL_FIX (1 << 3)
+#define XHCI_SPURIOUS_SUCCESS (1 << 4)
+/*
+ * Certain Intel host controllers have a limit to the number of endpoint
+ * contexts they can handle. Ideally, they would signal that they can't handle
+ * anymore endpoint contexts by returning a Resource Error for the Configure
+ * Endpoint command, but they don't. Instead they expect software to keep track
+ * of the number of active endpoints for them, across configure endpoint
+ * commands, reset device commands, disable slot commands, and address device
+ * commands.
+ */
+#define XHCI_EP_LIMIT_QUIRK (1 << 5)
+ unsigned int num_active_eps;
+ unsigned int limit_active_eps;
/* There are two roothubs to keep track of bus suspend info for */
struct xhci_bus_state bus_state[2];
/* Is each xHCI roothub port a USB 3.0, USB 2.0, or USB 1.1 port? */
@@ -1338,9 +1352,6 @@ static inline unsigned int xhci_readl(const struct xhci_hcd *xhci,
static inline void xhci_writel(struct xhci_hcd *xhci,
const unsigned int val, __le32 __iomem *regs)
{
- xhci_dbg(xhci,
- "`MEM_WRITE_DWORD(3'b000, 32'h%p, 32'h%0x, 4'hf);\n",
- regs, val);
writel(val, regs);
}
@@ -1368,9 +1379,6 @@ static inline void xhci_write_64(struct xhci_hcd *xhci,
u32 val_lo = lower_32_bits(val);
u32 val_hi = upper_32_bits(val);
- xhci_dbg(xhci,
- "`MEM_WRITE_DWORD(3'b000, 64'h%p, 64'h%0lx, 4'hf);\n",
- regs, (long unsigned int) val);
writel(val_lo, ptr);
writel(val_hi, ptr + 1);
}
@@ -1439,6 +1447,8 @@ void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci,
void xhci_setup_no_streams_ep_input_ctx(struct xhci_hcd *xhci,
struct xhci_ep_ctx *ep_ctx,
struct xhci_virt_ep *ep);
+void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci,
+ struct xhci_virt_device *virt_dev, bool drop_control_ep);
struct xhci_ring *xhci_dma_to_transfer_ring(
struct xhci_virt_ep *ep,
u64 address);
diff --git a/drivers/usb/otg/twl6030-usb.c b/drivers/usb/otg/twl6030-usb.c
index 3f2e07011a48..cfb5aa72b196 100644
--- a/drivers/usb/otg/twl6030-usb.c
+++ b/drivers/usb/otg/twl6030-usb.c
@@ -100,6 +100,7 @@ struct twl6030_usb {
u8 linkstat;
u8 asleep;
bool irq_enabled;
+ unsigned long features;
};
#define xceiv_to_twl(x) container_of((x), struct twl6030_usb, otg)
@@ -204,6 +205,12 @@ static int twl6030_start_srp(struct otg_transceiver *x)
static int twl6030_usb_ldo_init(struct twl6030_usb *twl)
{
+ char *regulator_name;
+
+ if (twl->features & TWL6025_SUBCLASS)
+ regulator_name = "ldousb";
+ else
+ regulator_name = "vusb";
/* Set to OTG_REV 1.3 and turn on the ID_WAKEUP_COMP */
twl6030_writeb(twl, TWL6030_MODULE_ID0 , 0x1, TWL6030_BACKUP_REG);
@@ -214,7 +221,7 @@ static int twl6030_usb_ldo_init(struct twl6030_usb *twl)
/* Program MISC2 register and set bit VUSB_IN_VBAT */
twl6030_writeb(twl, TWL6030_MODULE_ID0 , 0x10, TWL6030_MISC2);
- twl->usb3v3 = regulator_get(twl->dev, "vusb");
+ twl->usb3v3 = regulator_get(twl->dev, regulator_name);
if (IS_ERR(twl->usb3v3))
return -ENODEV;
@@ -409,6 +416,7 @@ static int __devinit twl6030_usb_probe(struct platform_device *pdev)
twl->dev = &pdev->dev;
twl->irq1 = platform_get_irq(pdev, 0);
twl->irq2 = platform_get_irq(pdev, 1);
+ twl->features = pdata->features;
twl->otg.dev = twl->dev;
twl->otg.label = "twl6030";
twl->otg.set_host = twl6030_set_host;
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 8d7f3e69ae29..7f6c67703195 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -814,7 +814,6 @@ int v9fs_vfs_unlink(struct inode *i, struct dentry *d)
int v9fs_vfs_rmdir(struct inode *i, struct dentry *d)
{
- dentry_unhash(d);
return v9fs_remove(i, d, 1);
}
@@ -840,9 +839,6 @@ v9fs_vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct p9_fid *newdirfid;
struct p9_wstat wstat;
- if (new_dentry->d_inode && S_ISDIR(new_dentry->d_inode->i_mode))
- dentry_unhash(new_dentry);
-
P9_DPRINTK(P9_DEBUG_VFS, "\n");
retval = 0;
old_inode = old_dentry->d_inode;
diff --git a/fs/affs/namei.c b/fs/affs/namei.c
index 03330e2e390c..e3e9efc1fdd8 100644
--- a/fs/affs/namei.c
+++ b/fs/affs/namei.c
@@ -320,8 +320,6 @@ affs_rmdir(struct inode *dir, struct dentry *dentry)
dentry->d_inode->i_ino,
(int)dentry->d_name.len, dentry->d_name.name);
- dentry_unhash(dentry);
-
return affs_remove_header(dentry);
}
@@ -419,9 +417,6 @@ affs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct buffer_head *bh = NULL;
int retval;
- if (new_dentry->d_inode && S_ISDIR(new_dentry->d_inode->i_mode))
- dentry_unhash(new_dentry);
-
pr_debug("AFFS: rename(old=%u,\"%*s\" to new=%u,\"%*s\")\n",
(u32)old_dir->i_ino, (int)old_dentry->d_name.len, old_dentry->d_name.name,
(u32)new_dir->i_ino, (int)new_dentry->d_name.len, new_dentry->d_name.name);
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index 2c4e05160042..20c106f24927 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -845,8 +845,6 @@ static int afs_rmdir(struct inode *dir, struct dentry *dentry)
_enter("{%x:%u},{%s}",
dvnode->fid.vid, dvnode->fid.vnode, dentry->d_name.name);
- dentry_unhash(dentry);
-
ret = -ENAMETOOLONG;
if (dentry->d_name.len >= AFSNAMEMAX)
goto error;
@@ -1148,9 +1146,6 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct key *key;
int ret;
- if (new_dentry->d_inode && S_ISDIR(new_dentry->d_inode->i_mode))
- dentry_unhash(new_dentry);
-
vnode = AFS_FS_I(old_dentry->d_inode);
orig_dvnode = AFS_FS_I(old_dir);
new_dvnode = AFS_FS_I(new_dir);
diff --git a/fs/attr.c b/fs/attr.c
index 91dbe2a107f2..caf2aa521e2b 100644
--- a/fs/attr.c
+++ b/fs/attr.c
@@ -175,6 +175,13 @@ int notify_change(struct dentry * dentry, struct iattr * attr)
return -EPERM;
}
+ if ((ia_valid & ATTR_MODE)) {
+ mode_t amode = attr->ia_mode;
+ /* Flag setting protected by i_mutex */
+ if (is_sxid(amode))
+ inode->i_flags &= ~S_NOSEC;
+ }
+
now = current_fs_time(inode->i_sb);
attr->ia_ctime = now;
diff --git a/fs/bfs/dir.c b/fs/bfs/dir.c
index c7d1d06b0483..b14cebfd9047 100644
--- a/fs/bfs/dir.c
+++ b/fs/bfs/dir.c
@@ -224,9 +224,6 @@ static int bfs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct bfs_sb_info *info;
int error = -ENOENT;
- if (new_dentry->d_inode && S_ISDIR(new_dentry->d_inode->i_mode))
- dentry_unhash(new_dentry);
-
old_bh = new_bh = NULL;
old_inode = old_dentry->d_inode;
if (S_ISDIR(old_inode->i_mode))
diff --git a/fs/bio.c b/fs/bio.c
index 840a0d755248..9bfade8a609b 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -638,10 +638,11 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
* @offset: vec entry offset
*
* Attempt to add a page to the bio_vec maplist. This can fail for a
- * number of reasons, such as the bio being full or target block
- * device limitations. The target block device must allow bio's
- * smaller than PAGE_SIZE, so it is always possible to add a single
- * page to an empty bio. This should only be used by REQ_PC bios.
+ * number of reasons, such as the bio being full or target block device
+ * limitations. The target block device must allow bio's up to PAGE_SIZE,
+ * so it is always possible to add a single page to an empty bio.
+ *
+ * This should only be used by REQ_PC bios.
*/
int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page *page,
unsigned int len, unsigned int offset)
@@ -659,10 +660,9 @@ EXPORT_SYMBOL(bio_add_pc_page);
* @offset: vec entry offset
*
* Attempt to add a page to the bio_vec maplist. This can fail for a
- * number of reasons, such as the bio being full or target block
- * device limitations. The target block device must allow bio's
- * smaller than PAGE_SIZE, so it is always possible to add a single
- * page to an empty bio.
+ * number of reasons, such as the bio being full or target block device
+ * limitations. The target block device must allow bio's up to PAGE_SIZE,
+ * so it is always possible to add a single page to an empty bio.
*/
int bio_add_page(struct bio *bio, struct page *page, unsigned int len,
unsigned int offset)
diff --git a/fs/btrfs/Makefile b/fs/btrfs/Makefile
index 31610ea73aec..9b72dcf1cd25 100644
--- a/fs/btrfs/Makefile
+++ b/fs/btrfs/Makefile
@@ -7,4 +7,4 @@ btrfs-y += super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \
extent_map.o sysfs.o struct-funcs.o xattr.o ordered-data.o \
extent_io.o volumes.o async-thread.o ioctl.o locking.o orphan.o \
export.o tree-log.o acl.o free-space-cache.o zlib.o lzo.o \
- compression.o delayed-ref.o relocation.o
+ compression.o delayed-ref.o relocation.o delayed-inode.o scrub.o
diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c
index 44ea5b92e1ba..f66fc9959733 100644
--- a/fs/btrfs/acl.c
+++ b/fs/btrfs/acl.c
@@ -288,7 +288,7 @@ int btrfs_acl_chmod(struct inode *inode)
return 0;
acl = btrfs_get_acl(inode, ACL_TYPE_ACCESS);
- if (IS_ERR(acl) || !acl)
+ if (IS_ERR_OR_NULL(acl))
return PTR_ERR(acl);
clone = posix_acl_clone(acl, GFP_KERNEL);
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index 57c3bb2884ce..93b1aa932014 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -22,6 +22,7 @@
#include "extent_map.h"
#include "extent_io.h"
#include "ordered-data.h"
+#include "delayed-inode.h"
/* in memory btrfs inode */
struct btrfs_inode {
@@ -152,20 +153,34 @@ struct btrfs_inode {
unsigned ordered_data_close:1;
unsigned orphan_meta_reserved:1;
unsigned dummy_inode:1;
+ unsigned in_defrag:1;
/*
* always compress this one file
*/
unsigned force_compress:4;
+ struct btrfs_delayed_node *delayed_node;
+
struct inode vfs_inode;
};
+extern unsigned char btrfs_filetype_table[];
+
static inline struct btrfs_inode *BTRFS_I(struct inode *inode)
{
return container_of(inode, struct btrfs_inode, vfs_inode);
}
+static inline u64 btrfs_ino(struct inode *inode)
+{
+ u64 ino = BTRFS_I(inode)->location.objectid;
+
+ if (ino <= BTRFS_FIRST_FREE_OBJECTID)
+ ino = inode->i_ino;
+ return ino;
+}
+
static inline void btrfs_i_size_write(struct inode *inode, u64 size)
{
i_size_write(inode, size);
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 41d1d7c70e29..bfe42b03eaf9 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -125,9 +125,10 @@ static int check_compressed_csum(struct inode *inode,
kunmap_atomic(kaddr, KM_USER0);
if (csum != *cb_sum) {
- printk(KERN_INFO "btrfs csum failed ino %lu "
+ printk(KERN_INFO "btrfs csum failed ino %llu "
"extent %llu csum %u "
- "wanted %u mirror %d\n", inode->i_ino,
+ "wanted %u mirror %d\n",
+ (unsigned long long)btrfs_ino(inode),
(unsigned long long)disk_start,
csum, *cb_sum, cb->mirror_num);
ret = -EIO;
@@ -332,7 +333,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
struct compressed_bio *cb;
unsigned long bytes_left;
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
- int page_index = 0;
+ int pg_index = 0;
struct page *page;
u64 first_byte = disk_start;
struct block_device *bdev;
@@ -366,8 +367,8 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
/* create and submit bios for the compressed pages */
bytes_left = compressed_len;
- for (page_index = 0; page_index < cb->nr_pages; page_index++) {
- page = compressed_pages[page_index];
+ for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) {
+ page = compressed_pages[pg_index];
page->mapping = inode->i_mapping;
if (bio->bi_size)
ret = io_tree->ops->merge_bio_hook(page, 0,
@@ -432,7 +433,7 @@ static noinline int add_ra_bio_pages(struct inode *inode,
struct compressed_bio *cb)
{
unsigned long end_index;
- unsigned long page_index;
+ unsigned long pg_index;
u64 last_offset;
u64 isize = i_size_read(inode);
int ret;
@@ -456,13 +457,13 @@ static noinline int add_ra_bio_pages(struct inode *inode,
end_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT;
while (last_offset < compressed_end) {
- page_index = last_offset >> PAGE_CACHE_SHIFT;
+ pg_index = last_offset >> PAGE_CACHE_SHIFT;
- if (page_index > end_index)
+ if (pg_index > end_index)
break;
rcu_read_lock();
- page = radix_tree_lookup(&mapping->page_tree, page_index);
+ page = radix_tree_lookup(&mapping->page_tree, pg_index);
rcu_read_unlock();
if (page) {
misses++;
@@ -476,7 +477,7 @@ static noinline int add_ra_bio_pages(struct inode *inode,
if (!page)
break;
- if (add_to_page_cache_lru(page, mapping, page_index,
+ if (add_to_page_cache_lru(page, mapping, pg_index,
GFP_NOFS)) {
page_cache_release(page);
goto next;
@@ -560,7 +561,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
unsigned long uncompressed_len = bio->bi_vcnt * PAGE_CACHE_SIZE;
unsigned long compressed_len;
unsigned long nr_pages;
- unsigned long page_index;
+ unsigned long pg_index;
struct page *page;
struct block_device *bdev;
struct bio *comp_bio;
@@ -613,10 +614,10 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
- for (page_index = 0; page_index < nr_pages; page_index++) {
- cb->compressed_pages[page_index] = alloc_page(GFP_NOFS |
+ for (pg_index = 0; pg_index < nr_pages; pg_index++) {
+ cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS |
__GFP_HIGHMEM);
- if (!cb->compressed_pages[page_index])
+ if (!cb->compressed_pages[pg_index])
goto fail2;
}
cb->nr_pages = nr_pages;
@@ -634,8 +635,8 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
comp_bio->bi_end_io = end_compressed_bio_read;
atomic_inc(&cb->pending_bios);
- for (page_index = 0; page_index < nr_pages; page_index++) {
- page = cb->compressed_pages[page_index];
+ for (pg_index = 0; pg_index < nr_pages; pg_index++) {
+ page = cb->compressed_pages[pg_index];
page->mapping = inode->i_mapping;
page->index = em_start >> PAGE_CACHE_SHIFT;
@@ -702,8 +703,8 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
return 0;
fail2:
- for (page_index = 0; page_index < nr_pages; page_index++)
- free_page((unsigned long)cb->compressed_pages[page_index]);
+ for (pg_index = 0; pg_index < nr_pages; pg_index++)
+ free_page((unsigned long)cb->compressed_pages[pg_index]);
kfree(cb->compressed_pages);
fail1:
@@ -945,7 +946,7 @@ void btrfs_exit_compress(void)
int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
unsigned long total_out, u64 disk_start,
struct bio_vec *bvec, int vcnt,
- unsigned long *page_index,
+ unsigned long *pg_index,
unsigned long *pg_offset)
{
unsigned long buf_offset;
@@ -954,7 +955,7 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
unsigned long working_bytes = total_out - buf_start;
unsigned long bytes;
char *kaddr;
- struct page *page_out = bvec[*page_index].bv_page;
+ struct page *page_out = bvec[*pg_index].bv_page;
/*
* start byte is the first byte of the page we're currently
@@ -995,11 +996,11 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
/* check if we need to pick another page */
if (*pg_offset == PAGE_CACHE_SIZE) {
- (*page_index)++;
- if (*page_index >= vcnt)
+ (*pg_index)++;
+ if (*pg_index >= vcnt)
return 0;
- page_out = bvec[*page_index].bv_page;
+ page_out = bvec[*pg_index].bv_page;
*pg_offset = 0;
start_byte = page_offset(page_out) - disk_start;
diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h
index 51000174b9d7..a12059f4f0fd 100644
--- a/fs/btrfs/compression.h
+++ b/fs/btrfs/compression.h
@@ -37,7 +37,7 @@ int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page,
int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
unsigned long total_out, u64 disk_start,
struct bio_vec *bvec, int vcnt,
- unsigned long *page_index,
+ unsigned long *pg_index,
unsigned long *pg_offset);
int btrfs_submit_compressed_write(struct inode *inode, u64 start,
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 84d7ca1fe0ba..b0e18d986e0a 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -38,11 +38,6 @@ static int balance_node_right(struct btrfs_trans_handle *trans,
struct extent_buffer *src_buf);
static int del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
struct btrfs_path *path, int level, int slot);
-static int setup_items_for_insert(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, struct btrfs_path *path,
- struct btrfs_key *cpu_key, u32 *data_size,
- u32 total_data, u32 total_size, int nr);
-
struct btrfs_path *btrfs_alloc_path(void)
{
@@ -107,7 +102,7 @@ void btrfs_free_path(struct btrfs_path *p)
{
if (!p)
return;
- btrfs_release_path(NULL, p);
+ btrfs_release_path(p);
kmem_cache_free(btrfs_path_cachep, p);
}
@@ -117,7 +112,7 @@ void btrfs_free_path(struct btrfs_path *p)
*
* It is safe to call this on paths that no locks or extent buffers held.
*/
-noinline void btrfs_release_path(struct btrfs_root *root, struct btrfs_path *p)
+noinline void btrfs_release_path(struct btrfs_path *p)
{
int i;
@@ -1328,7 +1323,7 @@ static noinline int reada_for_balance(struct btrfs_root *root,
ret = -EAGAIN;
/* release the whole path */
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
/* read the blocks */
if (block1)
@@ -1475,7 +1470,7 @@ read_block_for_search(struct btrfs_trans_handle *trans,
return 0;
}
free_extent_buffer(tmp);
- btrfs_release_path(NULL, p);
+ btrfs_release_path(p);
return -EIO;
}
}
@@ -1494,7 +1489,7 @@ read_block_for_search(struct btrfs_trans_handle *trans,
if (p->reada)
reada_for_search(root, p, level, slot, key->objectid);
- btrfs_release_path(NULL, p);
+ btrfs_release_path(p);
ret = -EAGAIN;
tmp = read_tree_block(root, blocknr, blocksize, 0);
@@ -1563,7 +1558,7 @@ setup_nodes_for_search(struct btrfs_trans_handle *trans,
}
b = p->nodes[level];
if (!b) {
- btrfs_release_path(NULL, p);
+ btrfs_release_path(p);
goto again;
}
BUG_ON(btrfs_header_nritems(b) == 1);
@@ -1753,7 +1748,7 @@ done:
if (!p->leave_spinning)
btrfs_set_path_blocking(p);
if (ret < 0)
- btrfs_release_path(root, p);
+ btrfs_release_path(p);
return ret;
}
@@ -3026,7 +3021,7 @@ static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
struct btrfs_file_extent_item);
extent_len = btrfs_file_extent_num_bytes(leaf, fi);
}
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
path->keep_locks = 1;
path->search_for_split = 1;
@@ -3216,7 +3211,6 @@ int btrfs_truncate_item(struct btrfs_trans_handle *trans,
struct btrfs_path *path,
u32 new_size, int from_end)
{
- int ret = 0;
int slot;
struct extent_buffer *leaf;
struct btrfs_item *item;
@@ -3314,12 +3308,11 @@ int btrfs_truncate_item(struct btrfs_trans_handle *trans,
btrfs_set_item_size(leaf, item, new_size);
btrfs_mark_buffer_dirty(leaf);
- ret = 0;
if (btrfs_leaf_free_space(root, leaf) < 0) {
btrfs_print_leaf(root, leaf);
BUG();
}
- return ret;
+ return 0;
}
/*
@@ -3329,7 +3322,6 @@ int btrfs_extend_item(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct btrfs_path *path,
u32 data_size)
{
- int ret = 0;
int slot;
struct extent_buffer *leaf;
struct btrfs_item *item;
@@ -3394,12 +3386,11 @@ int btrfs_extend_item(struct btrfs_trans_handle *trans,
btrfs_set_item_size(leaf, item, old_size + data_size);
btrfs_mark_buffer_dirty(leaf);
- ret = 0;
if (btrfs_leaf_free_space(root, leaf) < 0) {
btrfs_print_leaf(root, leaf);
BUG();
}
- return ret;
+ return 0;
}
/*
@@ -3559,11 +3550,10 @@ out:
* to save stack depth by doing the bulk of the work in a function
* that doesn't call btrfs_search_slot
*/
-static noinline_for_stack int
-setup_items_for_insert(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, struct btrfs_path *path,
- struct btrfs_key *cpu_key, u32 *data_size,
- u32 total_data, u32 total_size, int nr)
+int setup_items_for_insert(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, struct btrfs_path *path,
+ struct btrfs_key *cpu_key, u32 *data_size,
+ u32 total_data, u32 total_size, int nr)
{
struct btrfs_item *item;
int i;
@@ -3647,7 +3637,6 @@ setup_items_for_insert(struct btrfs_trans_handle *trans,
ret = 0;
if (slot == 0) {
- struct btrfs_disk_key disk_key;
btrfs_cpu_key_to_disk(&disk_key, cpu_key);
ret = fixup_low_keys(trans, root, path, &disk_key, 1);
}
@@ -3949,7 +3938,7 @@ int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
else
return 1;
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
return ret;
@@ -4073,7 +4062,7 @@ find_next_key:
sret = btrfs_find_next_key(root, path, min_key, level,
cache_only, min_trans);
if (sret == 0) {
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
goto again;
} else {
goto out;
@@ -4152,7 +4141,7 @@ next:
btrfs_node_key_to_cpu(c, &cur_key, slot);
orig_lowest = path->lowest_level;
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
path->lowest_level = level;
ret = btrfs_search_slot(NULL, root, &cur_key, path,
0, 0);
@@ -4229,7 +4218,7 @@ int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
again:
level = 1;
next = NULL;
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
path->keep_locks = 1;
@@ -4285,7 +4274,7 @@ again:
goto again;
if (ret < 0) {
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
goto done;
}
@@ -4324,7 +4313,7 @@ again:
goto again;
if (ret < 0) {
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
goto done;
}
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 8f4b81de3ae2..6c093fa98f61 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -23,6 +23,7 @@
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/fs.h>
+#include <linux/rwsem.h>
#include <linux/completion.h>
#include <linux/backing-dev.h>
#include <linux/wait.h>
@@ -33,6 +34,7 @@
#include "extent_io.h"
#include "extent_map.h"
#include "async-thread.h"
+#include "ioctl.h"
struct btrfs_trans_handle;
struct btrfs_transaction;
@@ -105,6 +107,12 @@ struct btrfs_ordered_sum;
/* For storing free space cache */
#define BTRFS_FREE_SPACE_OBJECTID -11ULL
+/*
+ * The inode number assigned to the special inode for sotring
+ * free ino cache
+ */
+#define BTRFS_FREE_INO_OBJECTID -12ULL
+
/* dummy objectid represents multiple objectids */
#define BTRFS_MULTIPLE_OBJECTIDS -255ULL
@@ -187,7 +195,6 @@ struct btrfs_mapping_tree {
struct extent_map_tree map_tree;
};
-#define BTRFS_UUID_SIZE 16
struct btrfs_dev_item {
/* the internal btrfs device id */
__le64 devid;
@@ -294,7 +301,6 @@ static inline unsigned long btrfs_chunk_item_size(int num_stripes)
sizeof(struct btrfs_stripe) * (num_stripes - 1);
}
-#define BTRFS_FSID_SIZE 16
#define BTRFS_HEADER_FLAG_WRITTEN (1ULL << 0)
#define BTRFS_HEADER_FLAG_RELOC (1ULL << 1)
@@ -510,6 +516,12 @@ struct btrfs_extent_item_v0 {
/* use full backrefs for extent pointers in the block */
#define BTRFS_BLOCK_FLAG_FULL_BACKREF (1ULL << 8)
+/*
+ * this flag is only used internally by scrub and may be changed at any time
+ * it is only declared here to avoid collisions
+ */
+#define BTRFS_EXTENT_FLAG_SUPER (1ULL << 48)
+
struct btrfs_tree_block_info {
struct btrfs_disk_key key;
u8 level;
@@ -740,12 +752,12 @@ struct btrfs_space_info {
*/
unsigned long reservation_progress;
- int full:1; /* indicates that we cannot allocate any more
+ unsigned int full:1; /* indicates that we cannot allocate any more
chunks for this space */
- int chunk_alloc:1; /* set if we are allocating a chunk */
+ unsigned int chunk_alloc:1; /* set if we are allocating a chunk */
- int force_alloc; /* set if we need to force a chunk alloc for
- this space */
+ unsigned int force_alloc; /* set if we need to force a chunk
+ alloc for this space */
struct list_head list;
@@ -830,9 +842,6 @@ struct btrfs_block_group_cache {
u64 bytes_super;
u64 flags;
u64 sectorsize;
- int extents_thresh;
- int free_extents;
- int total_bitmaps;
unsigned int ro:1;
unsigned int dirty:1;
unsigned int iref:1;
@@ -847,9 +856,7 @@ struct btrfs_block_group_cache {
struct btrfs_space_info *space_info;
/* free space cache stuff */
- spinlock_t tree_lock;
- struct rb_root free_space_offset;
- u64 free_space;
+ struct btrfs_free_space_ctl *free_space_ctl;
/* block group cache stuff */
struct rb_node cache_node;
@@ -869,6 +876,7 @@ struct btrfs_block_group_cache {
struct reloc_control;
struct btrfs_device;
struct btrfs_fs_devices;
+struct btrfs_delayed_root;
struct btrfs_fs_info {
u8 fsid[BTRFS_FSID_SIZE];
u8 chunk_tree_uuid[BTRFS_UUID_SIZE];
@@ -895,7 +903,10 @@ struct btrfs_fs_info {
/* logical->physical extent mapping */
struct btrfs_mapping_tree mapping_tree;
- /* block reservation for extent, checksum and root tree */
+ /*
+ * block reservation for extent, checksum, root tree and
+ * delayed dir index item
+ */
struct btrfs_block_rsv global_block_rsv;
/* block reservation for delay allocation */
struct btrfs_block_rsv delalloc_block_rsv;
@@ -1022,6 +1033,7 @@ struct btrfs_fs_info {
* for the sys_munmap function call path
*/
struct btrfs_workers fixup_workers;
+ struct btrfs_workers delayed_workers;
struct task_struct *transaction_kthread;
struct task_struct *cleaner_kthread;
int thread_pool_size;
@@ -1062,6 +1074,11 @@ struct btrfs_fs_info {
/* all metadata allocations go through this cluster */
struct btrfs_free_cluster meta_alloc_cluster;
+ /* auto defrag inodes go here */
+ spinlock_t defrag_inodes_lock;
+ struct rb_root defrag_inodes;
+ atomic_t defrag_running;
+
spinlock_t ref_cache_lock;
u64 total_ref_cache_size;
@@ -1077,8 +1094,21 @@ struct btrfs_fs_info {
void *bdev_holder;
+ /* private scrub information */
+ struct mutex scrub_lock;
+ atomic_t scrubs_running;
+ atomic_t scrub_pause_req;
+ atomic_t scrubs_paused;
+ atomic_t scrub_cancel_req;
+ wait_queue_head_t scrub_pause_wait;
+ struct rw_semaphore scrub_super_lock;
+ int scrub_workers_refcnt;
+ struct btrfs_workers scrub_workers;
+
/* filesystem state */
u64 fs_state;
+
+ struct btrfs_delayed_root *delayed_root;
};
/*
@@ -1088,9 +1118,6 @@ struct btrfs_fs_info {
struct btrfs_root {
struct extent_buffer *node;
- /* the node lock is held while changing the node pointer */
- spinlock_t node_lock;
-
struct extent_buffer *commit_root;
struct btrfs_root *log_root;
struct btrfs_root *reloc_root;
@@ -1107,6 +1134,16 @@ struct btrfs_root {
spinlock_t accounting_lock;
struct btrfs_block_rsv *block_rsv;
+ /* free ino cache stuff */
+ struct mutex fs_commit_mutex;
+ struct btrfs_free_space_ctl *free_ino_ctl;
+ enum btrfs_caching_type cached;
+ spinlock_t cache_lock;
+ wait_queue_head_t cache_wait;
+ struct btrfs_free_space_ctl *free_ino_pinned;
+ u64 cache_progress;
+ struct inode *cache_inode;
+
struct mutex log_mutex;
wait_queue_head_t log_writer_wait;
wait_queue_head_t log_commit_wait[2];
@@ -1162,12 +1199,49 @@ struct btrfs_root {
struct rb_root inode_tree;
/*
+ * radix tree that keeps track of delayed nodes of every inode,
+ * protected by inode_lock
+ */
+ struct radix_tree_root delayed_nodes_tree;
+ /*
* right now this just gets used so that a root has its own devid
* for stat. It may be used for more later
*/
struct super_block anon_super;
};
+struct btrfs_ioctl_defrag_range_args {
+ /* start of the defrag operation */
+ __u64 start;
+
+ /* number of bytes to defrag, use (u64)-1 to say all */
+ __u64 len;
+
+ /*
+ * flags for the operation, which can include turning
+ * on compression for this one defrag
+ */
+ __u64 flags;
+
+ /*
+ * any extent bigger than this will be considered
+ * already defragged. Use 0 to take the kernel default
+ * Use 1 to say every single extent must be rewritten
+ */
+ __u32 extent_thresh;
+
+ /*
+ * which compression method to use if turning on compression
+ * for this defrag operation. If unspecified, zlib will
+ * be used
+ */
+ __u32 compress_type;
+
+ /* spare for later */
+ __u32 unused[4];
+};
+
+
/*
* inode items have the data typically returned from stat and store other
* info about object characteristics. There is one for every file and dir in
@@ -1265,6 +1339,7 @@ struct btrfs_root {
#define BTRFS_MOUNT_CLEAR_CACHE (1 << 13)
#define BTRFS_MOUNT_USER_SUBVOL_RM_ALLOWED (1 << 14)
#define BTRFS_MOUNT_ENOSPC_DEBUG (1 << 15)
+#define BTRFS_MOUNT_AUTO_DEFRAG (1 << 16)
#define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt)
#define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt)
@@ -1440,26 +1515,12 @@ static inline u64 btrfs_stripe_offset_nr(struct extent_buffer *eb,
return btrfs_stripe_offset(eb, btrfs_stripe_nr(c, nr));
}
-static inline void btrfs_set_stripe_offset_nr(struct extent_buffer *eb,
- struct btrfs_chunk *c, int nr,
- u64 val)
-{
- btrfs_set_stripe_offset(eb, btrfs_stripe_nr(c, nr), val);
-}
-
static inline u64 btrfs_stripe_devid_nr(struct extent_buffer *eb,
struct btrfs_chunk *c, int nr)
{
return btrfs_stripe_devid(eb, btrfs_stripe_nr(c, nr));
}
-static inline void btrfs_set_stripe_devid_nr(struct extent_buffer *eb,
- struct btrfs_chunk *c, int nr,
- u64 val)
-{
- btrfs_set_stripe_devid(eb, btrfs_stripe_nr(c, nr), val);
-}
-
/* struct btrfs_block_group_item */
BTRFS_SETGET_STACK_FUNCS(block_group_used, struct btrfs_block_group_item,
used, 64);
@@ -1517,14 +1578,6 @@ btrfs_inode_ctime(struct btrfs_inode_item *inode_item)
return (struct btrfs_timespec *)ptr;
}
-static inline struct btrfs_timespec *
-btrfs_inode_otime(struct btrfs_inode_item *inode_item)
-{
- unsigned long ptr = (unsigned long)inode_item;
- ptr += offsetof(struct btrfs_inode_item, otime);
- return (struct btrfs_timespec *)ptr;
-}
-
BTRFS_SETGET_FUNCS(timespec_sec, struct btrfs_timespec, sec, 64);
BTRFS_SETGET_FUNCS(timespec_nsec, struct btrfs_timespec, nsec, 32);
@@ -1875,33 +1928,6 @@ static inline u8 *btrfs_header_chunk_tree_uuid(struct extent_buffer *eb)
return (u8 *)ptr;
}
-static inline u8 *btrfs_super_fsid(struct extent_buffer *eb)
-{
- unsigned long ptr = offsetof(struct btrfs_super_block, fsid);
- return (u8 *)ptr;
-}
-
-static inline u8 *btrfs_header_csum(struct extent_buffer *eb)
-{
- unsigned long ptr = offsetof(struct btrfs_header, csum);
- return (u8 *)ptr;
-}
-
-static inline struct btrfs_node *btrfs_buffer_node(struct extent_buffer *eb)
-{
- return NULL;
-}
-
-static inline struct btrfs_leaf *btrfs_buffer_leaf(struct extent_buffer *eb)
-{
- return NULL;
-}
-
-static inline struct btrfs_header *btrfs_buffer_header(struct extent_buffer *eb)
-{
- return NULL;
-}
-
static inline int btrfs_is_leaf(struct extent_buffer *eb)
{
return btrfs_header_level(eb) == 0;
@@ -2055,22 +2081,6 @@ static inline struct btrfs_root *btrfs_sb(struct super_block *sb)
return sb->s_fs_info;
}
-static inline int btrfs_set_root_name(struct btrfs_root *root,
- const char *name, int len)
-{
- /* if we already have a name just free it */
- kfree(root->name);
-
- root->name = kmalloc(len+1, GFP_KERNEL);
- if (!root->name)
- return -ENOMEM;
-
- memcpy(root->name, name, len);
- root->name[len] = '\0';
-
- return 0;
-}
-
static inline u32 btrfs_level_size(struct btrfs_root *root, int level)
{
if (level == 0)
@@ -2099,6 +2109,13 @@ static inline bool btrfs_mixed_space_info(struct btrfs_space_info *space_info)
}
/* extent-tree.c */
+static inline u64 btrfs_calc_trans_metadata_size(struct btrfs_root *root,
+ int num_items)
+{
+ return (root->leafsize + root->nodesize * (BTRFS_MAX_LEVEL - 1)) *
+ 3 * num_items;
+}
+
void btrfs_put_block_group(struct btrfs_block_group_cache *cache);
int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
struct btrfs_root *root, unsigned long count);
@@ -2108,12 +2125,9 @@ int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
u64 num_bytes, u64 *refs, u64 *flags);
int btrfs_pin_extent(struct btrfs_root *root,
u64 bytenr, u64 num, int reserved);
-int btrfs_drop_leaf_ref(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, struct extent_buffer *leaf);
int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
u64 objectid, u64 offset, u64 bytenr);
-int btrfs_copy_pinned(struct btrfs_root *root, struct extent_io_tree *copy);
struct btrfs_block_group_cache *btrfs_lookup_block_group(
struct btrfs_fs_info *info,
u64 bytenr);
@@ -2290,10 +2304,12 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct extent_buffer *parent,
int start_slot, int cache_only, u64 *last_ret,
struct btrfs_key *progress);
-void btrfs_release_path(struct btrfs_root *root, struct btrfs_path *p);
+void btrfs_release_path(struct btrfs_path *p);
struct btrfs_path *btrfs_alloc_path(void);
void btrfs_free_path(struct btrfs_path *p);
void btrfs_set_path_blocking(struct btrfs_path *p);
+void btrfs_clear_path_blocking(struct btrfs_path *p,
+ struct extent_buffer *held);
void btrfs_unlock_up_safe(struct btrfs_path *p, int level);
int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
@@ -2305,13 +2321,12 @@ static inline int btrfs_del_item(struct btrfs_trans_handle *trans,
return btrfs_del_items(trans, root, path, path->slots[0], 1);
}
+int setup_items_for_insert(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, struct btrfs_path *path,
+ struct btrfs_key *cpu_key, u32 *data_size,
+ u32 total_data, u32 total_size, int nr);
int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root
*root, struct btrfs_key *key, void *data, u32 data_size);
-int btrfs_insert_some_items(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_path *path,
- struct btrfs_key *cpu_key, u32 *data_size,
- int nr);
int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path,
@@ -2357,8 +2372,6 @@ int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root
*item);
int btrfs_find_last_root(struct btrfs_root *root, u64 objectid, struct
btrfs_root_item *item, struct btrfs_key *key);
-int btrfs_search_root(struct btrfs_root *root, u64 search_start,
- u64 *found_objectid);
int btrfs_find_dead_roots(struct btrfs_root *root, u64 objectid);
int btrfs_find_orphan_roots(struct btrfs_root *tree_root);
int btrfs_set_root_node(struct btrfs_root_item *item,
@@ -2368,7 +2381,7 @@ void btrfs_check_and_init_root_item(struct btrfs_root_item *item);
/* dir-item.c */
int btrfs_insert_dir_item(struct btrfs_trans_handle *trans,
struct btrfs_root *root, const char *name,
- int name_len, u64 dir,
+ int name_len, struct inode *dir,
struct btrfs_key *location, u8 type, u64 index);
struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
@@ -2413,12 +2426,6 @@ int btrfs_del_orphan_item(struct btrfs_trans_handle *trans,
struct btrfs_root *root, u64 offset);
int btrfs_find_orphan_item(struct btrfs_root *root, u64 offset);
-/* inode-map.c */
-int btrfs_find_free_objectid(struct btrfs_trans_handle *trans,
- struct btrfs_root *fs_root,
- u64 dirid, u64 *objectid);
-int btrfs_find_highest_inode(struct btrfs_root *fs_root, u64 *objectid);
-
/* inode-item.c */
int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
@@ -2463,8 +2470,6 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
struct btrfs_ordered_sum *sums);
int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
struct bio *bio, u64 file_start, int contig);
-int btrfs_csum_file_bytes(struct btrfs_root *root, struct inode *inode,
- u64 start, unsigned long len);
struct btrfs_csum_item *btrfs_lookup_csum(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path,
@@ -2472,8 +2477,8 @@ struct btrfs_csum_item *btrfs_lookup_csum(struct btrfs_trans_handle *trans,
int btrfs_csum_truncate(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct btrfs_path *path,
u64 isize);
-int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start,
- u64 end, struct list_head *list);
+int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
+ struct list_head *list, int search_commit);
/* inode.c */
/* RHEL and EL kernels have a patch that renames PG_checked to FsMisc */
@@ -2502,8 +2507,6 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
u32 min_type);
int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput);
-int btrfs_start_one_delalloc_inode(struct btrfs_root *root, int delay_iput,
- int sync);
int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
struct extent_state **cached_state);
int btrfs_writepages(struct address_space *mapping,
@@ -2520,9 +2523,8 @@ unsigned long btrfs_force_ra(struct address_space *mapping,
int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
int btrfs_readpage(struct file *file, struct page *page);
void btrfs_evict_inode(struct inode *inode);
-void btrfs_put_inode(struct inode *inode);
int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc);
-void btrfs_dirty_inode(struct inode *inode);
+void btrfs_dirty_inode(struct inode *inode, int flags);
struct inode *btrfs_alloc_inode(struct super_block *sb);
void btrfs_destroy_inode(struct inode *inode);
int btrfs_drop_inode(struct inode *inode);
@@ -2531,10 +2533,8 @@ void btrfs_destroy_cachep(void);
long btrfs_ioctl_trans_end(struct file *file);
struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
struct btrfs_root *root, int *was_new);
-int btrfs_commit_write(struct file *file, struct page *page,
- unsigned from, unsigned to);
struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
- size_t page_offset, u64 start, u64 end,
+ size_t pg_offset, u64 start, u64 end,
int create);
int btrfs_update_inode(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
@@ -2566,12 +2566,16 @@ extern const struct dentry_operations btrfs_dentry_operations;
long btrfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
void btrfs_update_iflags(struct inode *inode);
void btrfs_inherit_iflags(struct inode *inode, struct inode *dir);
-
+int btrfs_defrag_file(struct inode *inode, struct file *file,
+ struct btrfs_ioctl_defrag_range_args *range,
+ u64 newer_than, unsigned long max_pages);
/* file.c */
+int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
+ struct inode *inode);
+int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info);
int btrfs_sync_file(struct file *file, int datasync);
int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
int skip_pinned);
-int btrfs_check_file(struct btrfs_root *root, struct inode *inode);
extern const struct file_operations btrfs_file_operations;
int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode,
u64 start, u64 end, u64 *hint_byte, int drop_cache);
@@ -2591,10 +2595,6 @@ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
/* sysfs.c */
int btrfs_init_sysfs(void);
void btrfs_exit_sysfs(void);
-int btrfs_sysfs_add_super(struct btrfs_fs_info *fs);
-int btrfs_sysfs_add_root(struct btrfs_root *root);
-void btrfs_sysfs_del_root(struct btrfs_root *root);
-void btrfs_sysfs_del_super(struct btrfs_fs_info *root);
/* xattr.c */
ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size);
@@ -2637,4 +2637,18 @@ void btrfs_reloc_pre_snapshot(struct btrfs_trans_handle *trans,
u64 *bytes_to_reserve);
void btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,
struct btrfs_pending_snapshot *pending);
+
+/* scrub.c */
+int btrfs_scrub_dev(struct btrfs_root *root, u64 devid, u64 start, u64 end,
+ struct btrfs_scrub_progress *progress, int readonly);
+int btrfs_scrub_pause(struct btrfs_root *root);
+int btrfs_scrub_pause_super(struct btrfs_root *root);
+int btrfs_scrub_continue(struct btrfs_root *root);
+int btrfs_scrub_continue_super(struct btrfs_root *root);
+int btrfs_scrub_cancel(struct btrfs_root *root);
+int btrfs_scrub_cancel_dev(struct btrfs_root *root, struct btrfs_device *dev);
+int btrfs_scrub_cancel_devid(struct btrfs_root *root, u64 devid);
+int btrfs_scrub_progress(struct btrfs_root *root, u64 devid,
+ struct btrfs_scrub_progress *progress);
+
#endif
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
new file mode 100644
index 000000000000..01e29503a54b
--- /dev/null
+++ b/fs/btrfs/delayed-inode.c
@@ -0,0 +1,1695 @@
+/*
+ * Copyright (C) 2011 Fujitsu. All rights reserved.
+ * Written by Miao Xie <miaox@cn.fujitsu.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include <linux/slab.h>
+#include "delayed-inode.h"
+#include "disk-io.h"
+#include "transaction.h"
+
+#define BTRFS_DELAYED_WRITEBACK 400
+#define BTRFS_DELAYED_BACKGROUND 100
+
+static struct kmem_cache *delayed_node_cache;
+
+int __init btrfs_delayed_inode_init(void)
+{
+ delayed_node_cache = kmem_cache_create("delayed_node",
+ sizeof(struct btrfs_delayed_node),
+ 0,
+ SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
+ NULL);
+ if (!delayed_node_cache)
+ return -ENOMEM;
+ return 0;
+}
+
+void btrfs_delayed_inode_exit(void)
+{
+ if (delayed_node_cache)
+ kmem_cache_destroy(delayed_node_cache);
+}
+
+static inline void btrfs_init_delayed_node(
+ struct btrfs_delayed_node *delayed_node,
+ struct btrfs_root *root, u64 inode_id)
+{
+ delayed_node->root = root;
+ delayed_node->inode_id = inode_id;
+ atomic_set(&delayed_node->refs, 0);
+ delayed_node->count = 0;
+ delayed_node->in_list = 0;
+ delayed_node->inode_dirty = 0;
+ delayed_node->ins_root = RB_ROOT;
+ delayed_node->del_root = RB_ROOT;
+ mutex_init(&delayed_node->mutex);
+ delayed_node->index_cnt = 0;
+ INIT_LIST_HEAD(&delayed_node->n_list);
+ INIT_LIST_HEAD(&delayed_node->p_list);
+ delayed_node->bytes_reserved = 0;
+}
+
+static inline int btrfs_is_continuous_delayed_item(
+ struct btrfs_delayed_item *item1,
+ struct btrfs_delayed_item *item2)
+{
+ if (item1->key.type == BTRFS_DIR_INDEX_KEY &&
+ item1->key.objectid == item2->key.objectid &&
+ item1->key.type == item2->key.type &&
+ item1->key.offset + 1 == item2->key.offset)
+ return 1;
+ return 0;
+}
+
+static inline struct btrfs_delayed_root *btrfs_get_delayed_root(
+ struct btrfs_root *root)
+{
+ return root->fs_info->delayed_root;
+}
+
+static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
+ struct inode *inode)
+{
+ struct btrfs_delayed_node *node;
+ struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
+ struct btrfs_root *root = btrfs_inode->root;
+ u64 ino = btrfs_ino(inode);
+ int ret;
+
+again:
+ node = ACCESS_ONCE(btrfs_inode->delayed_node);
+ if (node) {
+ atomic_inc(&node->refs); /* can be accessed */
+ return node;
+ }
+
+ spin_lock(&root->inode_lock);
+ node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
+ if (node) {
+ if (btrfs_inode->delayed_node) {
+ spin_unlock(&root->inode_lock);
+ goto again;
+ }
+ btrfs_inode->delayed_node = node;
+ atomic_inc(&node->refs); /* can be accessed */
+ atomic_inc(&node->refs); /* cached in the inode */
+ spin_unlock(&root->inode_lock);
+ return node;
+ }
+ spin_unlock(&root->inode_lock);
+
+ node = kmem_cache_alloc(delayed_node_cache, GFP_NOFS);
+ if (!node)
+ return ERR_PTR(-ENOMEM);
+ btrfs_init_delayed_node(node, root, ino);
+
+ atomic_inc(&node->refs); /* cached in the btrfs inode */
+ atomic_inc(&node->refs); /* can be accessed */
+
+ ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
+ if (ret) {
+ kmem_cache_free(delayed_node_cache, node);
+ return ERR_PTR(ret);
+ }
+
+ spin_lock(&root->inode_lock);
+ ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node);
+ if (ret == -EEXIST) {
+ kmem_cache_free(delayed_node_cache, node);
+ spin_unlock(&root->inode_lock);
+ radix_tree_preload_end();
+ goto again;
+ }
+ btrfs_inode->delayed_node = node;
+ spin_unlock(&root->inode_lock);
+ radix_tree_preload_end();
+
+ return node;
+}
+
+/*
+ * Call it when holding delayed_node->mutex
+ *
+ * If mod = 1, add this node into the prepared list.
+ */
+static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root,
+ struct btrfs_delayed_node *node,
+ int mod)
+{
+ spin_lock(&root->lock);
+ if (node->in_list) {
+ if (!list_empty(&node->p_list))
+ list_move_tail(&node->p_list, &root->prepare_list);
+ else if (mod)
+ list_add_tail(&node->p_list, &root->prepare_list);
+ } else {
+ list_add_tail(&node->n_list, &root->node_list);
+ list_add_tail(&node->p_list, &root->prepare_list);
+ atomic_inc(&node->refs); /* inserted into list */
+ root->nodes++;
+ node->in_list = 1;
+ }
+ spin_unlock(&root->lock);
+}
+
+/* Call it when holding delayed_node->mutex */
+static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
+ struct btrfs_delayed_node *node)
+{
+ spin_lock(&root->lock);
+ if (node->in_list) {
+ root->nodes--;
+ atomic_dec(&node->refs); /* not in the list */
+ list_del_init(&node->n_list);
+ if (!list_empty(&node->p_list))
+ list_del_init(&node->p_list);
+ node->in_list = 0;
+ }
+ spin_unlock(&root->lock);
+}
+
+struct btrfs_delayed_node *btrfs_first_delayed_node(
+ struct btrfs_delayed_root *delayed_root)
+{
+ struct list_head *p;
+ struct btrfs_delayed_node *node = NULL;
+
+ spin_lock(&delayed_root->lock);
+ if (list_empty(&delayed_root->node_list))
+ goto out;
+
+ p = delayed_root->node_list.next;
+ node = list_entry(p, struct btrfs_delayed_node, n_list);
+ atomic_inc(&node->refs);
+out:
+ spin_unlock(&delayed_root->lock);
+
+ return node;
+}
+
+struct btrfs_delayed_node *btrfs_next_delayed_node(
+ struct btrfs_delayed_node *node)
+{
+ struct btrfs_delayed_root *delayed_root;
+ struct list_head *p;
+ struct btrfs_delayed_node *next = NULL;
+
+ delayed_root = node->root->fs_info->delayed_root;
+ spin_lock(&delayed_root->lock);
+ if (!node->in_list) { /* not in the list */
+ if (list_empty(&delayed_root->node_list))
+ goto out;
+ p = delayed_root->node_list.next;
+ } else if (list_is_last(&node->n_list, &delayed_root->node_list))
+ goto out;
+ else
+ p = node->n_list.next;
+
+ next = list_entry(p, struct btrfs_delayed_node, n_list);
+ atomic_inc(&next->refs);
+out:
+ spin_unlock(&delayed_root->lock);
+
+ return next;
+}
+
+static void __btrfs_release_delayed_node(
+ struct btrfs_delayed_node *delayed_node,
+ int mod)
+{
+ struct btrfs_delayed_root *delayed_root;
+
+ if (!delayed_node)
+ return;
+
+ delayed_root = delayed_node->root->fs_info->delayed_root;
+
+ mutex_lock(&delayed_node->mutex);
+ if (delayed_node->count)
+ btrfs_queue_delayed_node(delayed_root, delayed_node, mod);
+ else
+ btrfs_dequeue_delayed_node(delayed_root, delayed_node);
+ mutex_unlock(&delayed_node->mutex);
+
+ if (atomic_dec_and_test(&delayed_node->refs)) {
+ struct btrfs_root *root = delayed_node->root;
+ spin_lock(&root->inode_lock);
+ if (atomic_read(&delayed_node->refs) == 0) {
+ radix_tree_delete(&root->delayed_nodes_tree,
+ delayed_node->inode_id);
+ kmem_cache_free(delayed_node_cache, delayed_node);
+ }
+ spin_unlock(&root->inode_lock);
+ }
+}
+
+static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node)
+{
+ __btrfs_release_delayed_node(node, 0);
+}
+
+struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
+ struct btrfs_delayed_root *delayed_root)
+{
+ struct list_head *p;
+ struct btrfs_delayed_node *node = NULL;
+
+ spin_lock(&delayed_root->lock);
+ if (list_empty(&delayed_root->prepare_list))
+ goto out;
+
+ p = delayed_root->prepare_list.next;
+ list_del_init(p);
+ node = list_entry(p, struct btrfs_delayed_node, p_list);
+ atomic_inc(&node->refs);
+out:
+ spin_unlock(&delayed_root->lock);
+
+ return node;
+}
+
+static inline void btrfs_release_prepared_delayed_node(
+ struct btrfs_delayed_node *node)
+{
+ __btrfs_release_delayed_node(node, 1);
+}
+
+struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len)
+{
+ struct btrfs_delayed_item *item;
+ item = kmalloc(sizeof(*item) + data_len, GFP_NOFS);
+ if (item) {
+ item->data_len = data_len;
+ item->ins_or_del = 0;
+ item->bytes_reserved = 0;
+ item->block_rsv = NULL;
+ item->delayed_node = NULL;
+ atomic_set(&item->refs, 1);
+ }
+ return item;
+}
+
+/*
+ * __btrfs_lookup_delayed_item - look up the delayed item by key
+ * @delayed_node: pointer to the delayed node
+ * @key: the key to look up
+ * @prev: used to store the prev item if the right item isn't found
+ * @next: used to store the next item if the right item isn't found
+ *
+ * Note: if we don't find the right item, we will return the prev item and
+ * the next item.
+ */
+static struct btrfs_delayed_item *__btrfs_lookup_delayed_item(
+ struct rb_root *root,
+ struct btrfs_key *key,
+ struct btrfs_delayed_item **prev,
+ struct btrfs_delayed_item **next)
+{
+ struct rb_node *node, *prev_node = NULL;
+ struct btrfs_delayed_item *delayed_item = NULL;
+ int ret = 0;
+
+ node = root->rb_node;
+
+ while (node) {
+ delayed_item = rb_entry(node, struct btrfs_delayed_item,
+ rb_node);
+ prev_node = node;
+ ret = btrfs_comp_cpu_keys(&delayed_item->key, key);
+ if (ret < 0)
+ node = node->rb_right;
+ else if (ret > 0)
+ node = node->rb_left;
+ else
+ return delayed_item;
+ }
+
+ if (prev) {
+ if (!prev_node)
+ *prev = NULL;
+ else if (ret < 0)
+ *prev = delayed_item;
+ else if ((node = rb_prev(prev_node)) != NULL) {
+ *prev = rb_entry(node, struct btrfs_delayed_item,
+ rb_node);
+ } else
+ *prev = NULL;
+ }
+
+ if (next) {
+ if (!prev_node)
+ *next = NULL;
+ else if (ret > 0)
+ *next = delayed_item;
+ else if ((node = rb_next(prev_node)) != NULL) {
+ *next = rb_entry(node, struct btrfs_delayed_item,
+ rb_node);
+ } else
+ *next = NULL;
+ }
+ return NULL;
+}
+
+struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item(
+ struct btrfs_delayed_node *delayed_node,
+ struct btrfs_key *key)
+{
+ struct btrfs_delayed_item *item;
+
+ item = __btrfs_lookup_delayed_item(&delayed_node->ins_root, key,
+ NULL, NULL);
+ return item;
+}
+
+struct btrfs_delayed_item *__btrfs_lookup_delayed_deletion_item(
+ struct btrfs_delayed_node *delayed_node,
+ struct btrfs_key *key)
+{
+ struct btrfs_delayed_item *item;
+
+ item = __btrfs_lookup_delayed_item(&delayed_node->del_root, key,
+ NULL, NULL);
+ return item;
+}
+
+struct btrfs_delayed_item *__btrfs_search_delayed_insertion_item(
+ struct btrfs_delayed_node *delayed_node,
+ struct btrfs_key *key)
+{
+ struct btrfs_delayed_item *item, *next;
+
+ item = __btrfs_lookup_delayed_item(&delayed_node->ins_root, key,
+ NULL, &next);
+ if (!item)
+ item = next;
+
+ return item;
+}
+
+struct btrfs_delayed_item *__btrfs_search_delayed_deletion_item(
+ struct btrfs_delayed_node *delayed_node,
+ struct btrfs_key *key)
+{
+ struct btrfs_delayed_item *item, *next;
+
+ item = __btrfs_lookup_delayed_item(&delayed_node->del_root, key,
+ NULL, &next);
+ if (!item)
+ item = next;
+
+ return item;
+}
+
+static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
+ struct btrfs_delayed_item *ins,
+ int action)
+{
+ struct rb_node **p, *node;
+ struct rb_node *parent_node = NULL;
+ struct rb_root *root;
+ struct btrfs_delayed_item *item;
+ int cmp;
+
+ if (action == BTRFS_DELAYED_INSERTION_ITEM)
+ root = &delayed_node->ins_root;
+ else if (action == BTRFS_DELAYED_DELETION_ITEM)
+ root = &delayed_node->del_root;
+ else
+ BUG();
+ p = &root->rb_node;
+ node = &ins->rb_node;
+
+ while (*p) {
+ parent_node = *p;
+ item = rb_entry(parent_node, struct btrfs_delayed_item,
+ rb_node);
+
+ cmp = btrfs_comp_cpu_keys(&item->key, &ins->key);
+ if (cmp < 0)
+ p = &(*p)->rb_right;
+ else if (cmp > 0)
+ p = &(*p)->rb_left;
+ else
+ return -EEXIST;
+ }
+
+ rb_link_node(node, parent_node, p);
+ rb_insert_color(node, root);
+ ins->delayed_node = delayed_node;
+ ins->ins_or_del = action;
+
+ if (ins->key.type == BTRFS_DIR_INDEX_KEY &&
+ action == BTRFS_DELAYED_INSERTION_ITEM &&
+ ins->key.offset >= delayed_node->index_cnt)
+ delayed_node->index_cnt = ins->key.offset + 1;
+
+ delayed_node->count++;
+ atomic_inc(&delayed_node->root->fs_info->delayed_root->items);
+ return 0;
+}
+
+static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node *node,
+ struct btrfs_delayed_item *item)
+{
+ return __btrfs_add_delayed_item(node, item,
+ BTRFS_DELAYED_INSERTION_ITEM);
+}
+
+static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
+ struct btrfs_delayed_item *item)
+{
+ return __btrfs_add_delayed_item(node, item,
+ BTRFS_DELAYED_DELETION_ITEM);
+}
+
+static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
+{
+ struct rb_root *root;
+ struct btrfs_delayed_root *delayed_root;
+
+ delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root;
+
+ BUG_ON(!delayed_root);
+ BUG_ON(delayed_item->ins_or_del != BTRFS_DELAYED_DELETION_ITEM &&
+ delayed_item->ins_or_del != BTRFS_DELAYED_INSERTION_ITEM);
+
+ if (delayed_item->ins_or_del == BTRFS_DELAYED_INSERTION_ITEM)
+ root = &delayed_item->delayed_node->ins_root;
+ else
+ root = &delayed_item->delayed_node->del_root;
+
+ rb_erase(&delayed_item->rb_node, root);
+ delayed_item->delayed_node->count--;
+ atomic_dec(&delayed_root->items);
+ if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND &&
+ waitqueue_active(&delayed_root->wait))
+ wake_up(&delayed_root->wait);
+}
+
+static void btrfs_release_delayed_item(struct btrfs_delayed_item *item)
+{
+ if (item) {
+ __btrfs_remove_delayed_item(item);
+ if (atomic_dec_and_test(&item->refs))
+ kfree(item);
+ }
+}
+
+struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
+ struct btrfs_delayed_node *delayed_node)
+{
+ struct rb_node *p;
+ struct btrfs_delayed_item *item = NULL;
+
+ p = rb_first(&delayed_node->ins_root);
+ if (p)
+ item = rb_entry(p, struct btrfs_delayed_item, rb_node);
+
+ return item;
+}
+
+struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
+ struct btrfs_delayed_node *delayed_node)
+{
+ struct rb_node *p;
+ struct btrfs_delayed_item *item = NULL;
+
+ p = rb_first(&delayed_node->del_root);
+ if (p)
+ item = rb_entry(p, struct btrfs_delayed_item, rb_node);
+
+ return item;
+}
+
+struct btrfs_delayed_item *__btrfs_next_delayed_item(
+ struct btrfs_delayed_item *item)
+{
+ struct rb_node *p;
+ struct btrfs_delayed_item *next = NULL;
+
+ p = rb_next(&item->rb_node);
+ if (p)
+ next = rb_entry(p, struct btrfs_delayed_item, rb_node);
+
+ return next;
+}
+
+static inline struct btrfs_delayed_node *btrfs_get_delayed_node(
+ struct inode *inode)
+{
+ struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
+ struct btrfs_delayed_node *delayed_node;
+
+ delayed_node = btrfs_inode->delayed_node;
+ if (delayed_node)
+ atomic_inc(&delayed_node->refs);
+
+ return delayed_node;
+}
+
+static inline struct btrfs_root *btrfs_get_fs_root(struct btrfs_root *root,
+ u64 root_id)
+{
+ struct btrfs_key root_key;
+
+ if (root->objectid == root_id)
+ return root;
+
+ root_key.objectid = root_id;
+ root_key.type = BTRFS_ROOT_ITEM_KEY;
+ root_key.offset = (u64)-1;
+ return btrfs_read_fs_root_no_name(root->fs_info, &root_key);
+}
+
+static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_delayed_item *item)
+{
+ struct btrfs_block_rsv *src_rsv;
+ struct btrfs_block_rsv *dst_rsv;
+ u64 num_bytes;
+ int ret;
+
+ if (!trans->bytes_reserved)
+ return 0;
+
+ src_rsv = trans->block_rsv;
+ dst_rsv = &root->fs_info->global_block_rsv;
+
+ num_bytes = btrfs_calc_trans_metadata_size(root, 1);
+ ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
+ if (!ret) {
+ item->bytes_reserved = num_bytes;
+ item->block_rsv = dst_rsv;
+ }
+
+ return ret;
+}
+
+static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
+ struct btrfs_delayed_item *item)
+{
+ if (!item->bytes_reserved)
+ return;
+
+ btrfs_block_rsv_release(root, item->block_rsv,
+ item->bytes_reserved);
+}
+
+static int btrfs_delayed_inode_reserve_metadata(
+ struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_delayed_node *node)
+{
+ struct btrfs_block_rsv *src_rsv;
+ struct btrfs_block_rsv *dst_rsv;
+ u64 num_bytes;
+ int ret;
+
+ if (!trans->bytes_reserved)
+ return 0;
+
+ src_rsv = trans->block_rsv;
+ dst_rsv = &root->fs_info->global_block_rsv;
+
+ num_bytes = btrfs_calc_trans_metadata_size(root, 1);
+ ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
+ if (!ret)
+ node->bytes_reserved = num_bytes;
+
+ return ret;
+}
+
+static void btrfs_delayed_inode_release_metadata(struct btrfs_root *root,
+ struct btrfs_delayed_node *node)
+{
+ struct btrfs_block_rsv *rsv;
+
+ if (!node->bytes_reserved)
+ return;
+
+ rsv = &root->fs_info->global_block_rsv;
+ btrfs_block_rsv_release(root, rsv,
+ node->bytes_reserved);
+ node->bytes_reserved = 0;
+}
+
+/*
+ * This helper will insert some continuous items into the same leaf according
+ * to the free space of the leaf.
+ */
+static int btrfs_batch_insert_items(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_path *path,
+ struct btrfs_delayed_item *item)
+{
+ struct btrfs_delayed_item *curr, *next;
+ int free_space;
+ int total_data_size = 0, total_size = 0;
+ struct extent_buffer *leaf;
+ char *data_ptr;
+ struct btrfs_key *keys;
+ u32 *data_size;
+ struct list_head head;
+ int slot;
+ int nitems;
+ int i;
+ int ret = 0;
+
+ BUG_ON(!path->nodes[0]);
+
+ leaf = path->nodes[0];
+ free_space = btrfs_leaf_free_space(root, leaf);
+ INIT_LIST_HEAD(&head);
+
+ next = item;
+
+ /*
+ * count the number of the continuous items that we can insert in batch
+ */
+ while (total_size + next->data_len + sizeof(struct btrfs_item) <=
+ free_space) {
+ total_data_size += next->data_len;
+ total_size += next->data_len + sizeof(struct btrfs_item);
+ list_add_tail(&next->tree_list, &head);
+ nitems++;
+
+ curr = next;
+ next = __btrfs_next_delayed_item(curr);
+ if (!next)
+ break;
+
+ if (!btrfs_is_continuous_delayed_item(curr, next))
+ break;
+ }
+
+ if (!nitems) {
+ ret = 0;
+ goto out;
+ }
+
+ /*
+ * we need allocate some memory space, but it might cause the task
+ * to sleep, so we set all locked nodes in the path to blocking locks
+ * first.
+ */
+ btrfs_set_path_blocking(path);
+
+ keys = kmalloc(sizeof(struct btrfs_key) * nitems, GFP_NOFS);
+ if (!keys) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ data_size = kmalloc(sizeof(u32) * nitems, GFP_NOFS);
+ if (!data_size) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ /* get keys of all the delayed items */
+ i = 0;
+ list_for_each_entry(next, &head, tree_list) {
+ keys[i] = next->key;
+ data_size[i] = next->data_len;
+ i++;
+ }
+
+ /* reset all the locked nodes in the patch to spinning locks. */
+ btrfs_clear_path_blocking(path, NULL);
+
+ /* insert the keys of the items */
+ ret = setup_items_for_insert(trans, root, path, keys, data_size,
+ total_data_size, total_size, nitems);
+ if (ret)
+ goto error;
+
+ /* insert the dir index items */
+ slot = path->slots[0];
+ list_for_each_entry_safe(curr, next, &head, tree_list) {
+ data_ptr = btrfs_item_ptr(leaf, slot, char);
+ write_extent_buffer(leaf, &curr->data,
+ (unsigned long)data_ptr,
+ curr->data_len);
+ slot++;
+
+ btrfs_delayed_item_release_metadata(root, curr);
+
+ list_del(&curr->tree_list);
+ btrfs_release_delayed_item(curr);
+ }
+
+error:
+ kfree(data_size);
+ kfree(keys);
+out:
+ return ret;
+}
+
+/*
+ * This helper can just do simple insertion that needn't extend item for new
+ * data, such as directory name index insertion, inode insertion.
+ */
+static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_path *path,
+ struct btrfs_delayed_item *delayed_item)
+{
+ struct extent_buffer *leaf;
+ struct btrfs_item *item;
+ char *ptr;
+ int ret;
+
+ ret = btrfs_insert_empty_item(trans, root, path, &delayed_item->key,
+ delayed_item->data_len);
+ if (ret < 0 && ret != -EEXIST)
+ return ret;
+
+ leaf = path->nodes[0];
+
+ item = btrfs_item_nr(leaf, path->slots[0]);
+ ptr = btrfs_item_ptr(leaf, path->slots[0], char);
+
+ write_extent_buffer(leaf, delayed_item->data, (unsigned long)ptr,
+ delayed_item->data_len);
+ btrfs_mark_buffer_dirty(leaf);
+
+ btrfs_delayed_item_release_metadata(root, delayed_item);
+ return 0;
+}
+
+/*
+ * we insert an item first, then if there are some continuous items, we try
+ * to insert those items into the same leaf.
+ */
+static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans,
+ struct btrfs_path *path,
+ struct btrfs_root *root,
+ struct btrfs_delayed_node *node)
+{
+ struct btrfs_delayed_item *curr, *prev;
+ int ret = 0;
+
+do_again:
+ mutex_lock(&node->mutex);
+ curr = __btrfs_first_delayed_insertion_item(node);
+ if (!curr)
+ goto insert_end;
+
+ ret = btrfs_insert_delayed_item(trans, root, path, curr);
+ if (ret < 0) {
+ btrfs_release_path(path);
+ goto insert_end;
+ }
+
+ prev = curr;
+ curr = __btrfs_next_delayed_item(prev);
+ if (curr && btrfs_is_continuous_delayed_item(prev, curr)) {
+ /* insert the continuous items into the same leaf */
+ path->slots[0]++;
+ btrfs_batch_insert_items(trans, root, path, curr);
+ }
+ btrfs_release_delayed_item(prev);
+ btrfs_mark_buffer_dirty(path->nodes[0]);
+
+ btrfs_release_path(path);
+ mutex_unlock(&node->mutex);
+ goto do_again;
+
+insert_end:
+ mutex_unlock(&node->mutex);
+ return ret;
+}
+
+static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_path *path,
+ struct btrfs_delayed_item *item)
+{
+ struct btrfs_delayed_item *curr, *next;
+ struct extent_buffer *leaf;
+ struct btrfs_key key;
+ struct list_head head;
+ int nitems, i, last_item;
+ int ret = 0;
+
+ BUG_ON(!path->nodes[0]);
+
+ leaf = path->nodes[0];
+
+ i = path->slots[0];
+ last_item = btrfs_header_nritems(leaf) - 1;
+ if (i > last_item)
+ return -ENOENT; /* FIXME: Is errno suitable? */
+
+ next = item;
+ INIT_LIST_HEAD(&head);
+ btrfs_item_key_to_cpu(leaf, &key, i);
+ nitems = 0;
+ /*
+ * count the number of the dir index items that we can delete in batch
+ */
+ while (btrfs_comp_cpu_keys(&next->key, &key) == 0) {
+ list_add_tail(&next->tree_list, &head);
+ nitems++;
+
+ curr = next;
+ next = __btrfs_next_delayed_item(curr);
+ if (!next)
+ break;
+
+ if (!btrfs_is_continuous_delayed_item(curr, next))
+ break;
+
+ i++;
+ if (i > last_item)
+ break;
+ btrfs_item_key_to_cpu(leaf, &key, i);
+ }
+
+ if (!nitems)
+ return 0;
+
+ ret = btrfs_del_items(trans, root, path, path->slots[0], nitems);
+ if (ret)
+ goto out;
+
+ list_for_each_entry_safe(curr, next, &head, tree_list) {
+ btrfs_delayed_item_release_metadata(root, curr);
+ list_del(&curr->tree_list);
+ btrfs_release_delayed_item(curr);
+ }
+
+out:
+ return ret;
+}
+
+static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans,
+ struct btrfs_path *path,
+ struct btrfs_root *root,
+ struct btrfs_delayed_node *node)
+{
+ struct btrfs_delayed_item *curr, *prev;
+ int ret = 0;
+
+do_again:
+ mutex_lock(&node->mutex);
+ curr = __btrfs_first_delayed_deletion_item(node);
+ if (!curr)
+ goto delete_fail;
+
+ ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1);
+ if (ret < 0)
+ goto delete_fail;
+ else if (ret > 0) {
+ /*
+ * can't find the item which the node points to, so this node
+ * is invalid, just drop it.
+ */
+ prev = curr;
+ curr = __btrfs_next_delayed_item(prev);
+ btrfs_release_delayed_item(prev);
+ ret = 0;
+ btrfs_release_path(path);
+ if (curr)
+ goto do_again;
+ else
+ goto delete_fail;
+ }
+
+ btrfs_batch_delete_items(trans, root, path, curr);
+ btrfs_release_path(path);
+ mutex_unlock(&node->mutex);
+ goto do_again;
+
+delete_fail:
+ btrfs_release_path(path);
+ mutex_unlock(&node->mutex);
+ return ret;
+}
+
+static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
+{
+ struct btrfs_delayed_root *delayed_root;
+
+ if (delayed_node && delayed_node->inode_dirty) {
+ BUG_ON(!delayed_node->root);
+ delayed_node->inode_dirty = 0;
+ delayed_node->count--;
+
+ delayed_root = delayed_node->root->fs_info->delayed_root;
+ atomic_dec(&delayed_root->items);
+ if (atomic_read(&delayed_root->items) <
+ BTRFS_DELAYED_BACKGROUND &&
+ waitqueue_active(&delayed_root->wait))
+ wake_up(&delayed_root->wait);
+ }
+}
+
+static int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_path *path,
+ struct btrfs_delayed_node *node)
+{
+ struct btrfs_key key;
+ struct btrfs_inode_item *inode_item;
+ struct extent_buffer *leaf;
+ int ret;
+
+ mutex_lock(&node->mutex);
+ if (!node->inode_dirty) {
+ mutex_unlock(&node->mutex);
+ return 0;
+ }
+
+ key.objectid = node->inode_id;
+ btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
+ key.offset = 0;
+ ret = btrfs_lookup_inode(trans, root, path, &key, 1);
+ if (ret > 0) {
+ btrfs_release_path(path);
+ mutex_unlock(&node->mutex);
+ return -ENOENT;
+ } else if (ret < 0) {
+ mutex_unlock(&node->mutex);
+ return ret;
+ }
+
+ btrfs_unlock_up_safe(path, 1);
+ leaf = path->nodes[0];
+ inode_item = btrfs_item_ptr(leaf, path->slots[0],
+ struct btrfs_inode_item);
+ write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
+ sizeof(struct btrfs_inode_item));
+ btrfs_mark_buffer_dirty(leaf);
+ btrfs_release_path(path);
+
+ btrfs_delayed_inode_release_metadata(root, node);
+ btrfs_release_delayed_inode(node);
+ mutex_unlock(&node->mutex);
+
+ return 0;
+}
+
+/* Called when committing the transaction. */
+int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root)
+{
+ struct btrfs_delayed_root *delayed_root;
+ struct btrfs_delayed_node *curr_node, *prev_node;
+ struct btrfs_path *path;
+ int ret = 0;
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+ path->leave_spinning = 1;
+
+ delayed_root = btrfs_get_delayed_root(root);
+
+ curr_node = btrfs_first_delayed_node(delayed_root);
+ while (curr_node) {
+ root = curr_node->root;
+ ret = btrfs_insert_delayed_items(trans, path, root,
+ curr_node);
+ if (!ret)
+ ret = btrfs_delete_delayed_items(trans, path, root,
+ curr_node);
+ if (!ret)
+ ret = btrfs_update_delayed_inode(trans, root, path,
+ curr_node);
+ if (ret) {
+ btrfs_release_delayed_node(curr_node);
+ break;
+ }
+
+ prev_node = curr_node;
+ curr_node = btrfs_next_delayed_node(curr_node);
+ btrfs_release_delayed_node(prev_node);
+ }
+
+ btrfs_free_path(path);
+ return ret;
+}
+
+static int __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
+ struct btrfs_delayed_node *node)
+{
+ struct btrfs_path *path;
+ int ret;
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+ path->leave_spinning = 1;
+
+ ret = btrfs_insert_delayed_items(trans, path, node->root, node);
+ if (!ret)
+ ret = btrfs_delete_delayed_items(trans, path, node->root, node);
+ if (!ret)
+ ret = btrfs_update_delayed_inode(trans, node->root, path, node);
+ btrfs_free_path(path);
+
+ return ret;
+}
+
+int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
+ struct inode *inode)
+{
+ struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
+ int ret;
+
+ if (!delayed_node)
+ return 0;
+
+ mutex_lock(&delayed_node->mutex);
+ if (!delayed_node->count) {
+ mutex_unlock(&delayed_node->mutex);
+ btrfs_release_delayed_node(delayed_node);
+ return 0;
+ }
+ mutex_unlock(&delayed_node->mutex);
+
+ ret = __btrfs_commit_inode_delayed_items(trans, delayed_node);
+ btrfs_release_delayed_node(delayed_node);
+ return ret;
+}
+
+void btrfs_remove_delayed_node(struct inode *inode)
+{
+ struct btrfs_delayed_node *delayed_node;
+
+ delayed_node = ACCESS_ONCE(BTRFS_I(inode)->delayed_node);
+ if (!delayed_node)
+ return;
+
+ BTRFS_I(inode)->delayed_node = NULL;
+ btrfs_release_delayed_node(delayed_node);
+}
+
+struct btrfs_async_delayed_node {
+ struct btrfs_root *root;
+ struct btrfs_delayed_node *delayed_node;
+ struct btrfs_work work;
+};
+
+static void btrfs_async_run_delayed_node_done(struct btrfs_work *work)
+{
+ struct btrfs_async_delayed_node *async_node;
+ struct btrfs_trans_handle *trans;
+ struct btrfs_path *path;
+ struct btrfs_delayed_node *delayed_node = NULL;
+ struct btrfs_root *root;
+ unsigned long nr = 0;
+ int need_requeue = 0;
+ int ret;
+
+ async_node = container_of(work, struct btrfs_async_delayed_node, work);
+
+ path = btrfs_alloc_path();
+ if (!path)
+ goto out;
+ path->leave_spinning = 1;
+
+ delayed_node = async_node->delayed_node;
+ root = delayed_node->root;
+
+ trans = btrfs_join_transaction(root, 0);
+ if (IS_ERR(trans))
+ goto free_path;
+
+ ret = btrfs_insert_delayed_items(trans, path, root, delayed_node);
+ if (!ret)
+ ret = btrfs_delete_delayed_items(trans, path, root,
+ delayed_node);
+
+ if (!ret)
+ btrfs_update_delayed_inode(trans, root, path, delayed_node);
+
+ /*
+ * Maybe new delayed items have been inserted, so we need requeue
+ * the work. Besides that, we must dequeue the empty delayed nodes
+ * to avoid the race between delayed items balance and the worker.
+ * The race like this:
+ * Task1 Worker thread
+ * count == 0, needn't requeue
+ * also needn't insert the
+ * delayed node into prepare
+ * list again.
+ * add lots of delayed items
+ * queue the delayed node
+ * already in the list,
+ * and not in the prepare
+ * list, it means the delayed
+ * node is being dealt with
+ * by the worker.
+ * do delayed items balance
+ * the delayed node is being
+ * dealt with by the worker
+ * now, just wait.
+ * the worker goto idle.
+ * Task1 will sleep until the transaction is commited.
+ */
+ mutex_lock(&delayed_node->mutex);
+ if (delayed_node->count)
+ need_requeue = 1;
+ else
+ btrfs_dequeue_delayed_node(root->fs_info->delayed_root,
+ delayed_node);
+ mutex_unlock(&delayed_node->mutex);
+
+ nr = trans->blocks_used;
+
+ btrfs_end_transaction_dmeta(trans, root);
+ __btrfs_btree_balance_dirty(root, nr);
+free_path:
+ btrfs_free_path(path);
+out:
+ if (need_requeue)
+ btrfs_requeue_work(&async_node->work);
+ else {
+ btrfs_release_prepared_delayed_node(delayed_node);
+ kfree(async_node);
+ }
+}
+
+static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
+ struct btrfs_root *root, int all)
+{
+ struct btrfs_async_delayed_node *async_node;
+ struct btrfs_delayed_node *curr;
+ int count = 0;
+
+again:
+ curr = btrfs_first_prepared_delayed_node(delayed_root);
+ if (!curr)
+ return 0;
+
+ async_node = kmalloc(sizeof(*async_node), GFP_NOFS);
+ if (!async_node) {
+ btrfs_release_prepared_delayed_node(curr);
+ return -ENOMEM;
+ }
+
+ async_node->root = root;
+ async_node->delayed_node = curr;
+
+ async_node->work.func = btrfs_async_run_delayed_node_done;
+ async_node->work.flags = 0;
+
+ btrfs_queue_worker(&root->fs_info->delayed_workers, &async_node->work);
+ count++;
+
+ if (all || count < 4)
+ goto again;
+
+ return 0;
+}
+
+void btrfs_balance_delayed_items(struct btrfs_root *root)
+{
+ struct btrfs_delayed_root *delayed_root;
+
+ delayed_root = btrfs_get_delayed_root(root);
+
+ if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
+ return;
+
+ if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
+ int ret;
+ ret = btrfs_wq_run_delayed_node(delayed_root, root, 1);
+ if (ret)
+ return;
+
+ wait_event_interruptible_timeout(
+ delayed_root->wait,
+ (atomic_read(&delayed_root->items) <
+ BTRFS_DELAYED_BACKGROUND),
+ HZ);
+ return;
+ }
+
+ btrfs_wq_run_delayed_node(delayed_root, root, 0);
+}
+
+int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, const char *name,
+ int name_len, struct inode *dir,
+ struct btrfs_disk_key *disk_key, u8 type,
+ u64 index)
+{
+ struct btrfs_delayed_node *delayed_node;
+ struct btrfs_delayed_item *delayed_item;
+ struct btrfs_dir_item *dir_item;
+ int ret;
+
+ delayed_node = btrfs_get_or_create_delayed_node(dir);
+ if (IS_ERR(delayed_node))
+ return PTR_ERR(delayed_node);
+
+ delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len);
+ if (!delayed_item) {
+ ret = -ENOMEM;
+ goto release_node;
+ }
+
+ ret = btrfs_delayed_item_reserve_metadata(trans, root, delayed_item);
+ /*
+ * we have reserved enough space when we start a new transaction,
+ * so reserving metadata failure is impossible
+ */
+ BUG_ON(ret);
+
+ delayed_item->key.objectid = btrfs_ino(dir);
+ btrfs_set_key_type(&delayed_item->key, BTRFS_DIR_INDEX_KEY);
+ delayed_item->key.offset = index;
+
+ dir_item = (struct btrfs_dir_item *)delayed_item->data;
+ dir_item->location = *disk_key;
+ dir_item->transid = cpu_to_le64(trans->transid);
+ dir_item->data_len = 0;
+ dir_item->name_len = cpu_to_le16(name_len);
+ dir_item->type = type;
+ memcpy((char *)(dir_item + 1), name, name_len);
+
+ mutex_lock(&delayed_node->mutex);
+ ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item);
+ if (unlikely(ret)) {
+ printk(KERN_ERR "err add delayed dir index item(name: %s) into "
+ "the insertion tree of the delayed node"
+ "(root id: %llu, inode id: %llu, errno: %d)\n",
+ name,
+ (unsigned long long)delayed_node->root->objectid,
+ (unsigned long long)delayed_node->inode_id,
+ ret);
+ BUG();
+ }
+ mutex_unlock(&delayed_node->mutex);
+
+release_node:
+ btrfs_release_delayed_node(delayed_node);
+ return ret;
+}
+
+static int btrfs_delete_delayed_insertion_item(struct btrfs_root *root,
+ struct btrfs_delayed_node *node,
+ struct btrfs_key *key)
+{
+ struct btrfs_delayed_item *item;
+
+ mutex_lock(&node->mutex);
+ item = __btrfs_lookup_delayed_insertion_item(node, key);
+ if (!item) {
+ mutex_unlock(&node->mutex);
+ return 1;
+ }
+
+ btrfs_delayed_item_release_metadata(root, item);
+ btrfs_release_delayed_item(item);
+ mutex_unlock(&node->mutex);
+ return 0;
+}
+
+int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, struct inode *dir,
+ u64 index)
+{
+ struct btrfs_delayed_node *node;
+ struct btrfs_delayed_item *item;
+ struct btrfs_key item_key;
+ int ret;
+
+ node = btrfs_get_or_create_delayed_node(dir);
+ if (IS_ERR(node))
+ return PTR_ERR(node);
+
+ item_key.objectid = btrfs_ino(dir);
+ btrfs_set_key_type(&item_key, BTRFS_DIR_INDEX_KEY);
+ item_key.offset = index;
+
+ ret = btrfs_delete_delayed_insertion_item(root, node, &item_key);
+ if (!ret)
+ goto end;
+
+ item = btrfs_alloc_delayed_item(0);
+ if (!item) {
+ ret = -ENOMEM;
+ goto end;
+ }
+
+ item->key = item_key;
+
+ ret = btrfs_delayed_item_reserve_metadata(trans, root, item);
+ /*
+ * we have reserved enough space when we start a new transaction,
+ * so reserving metadata failure is impossible.
+ */
+ BUG_ON(ret);
+
+ mutex_lock(&node->mutex);
+ ret = __btrfs_add_delayed_deletion_item(node, item);
+ if (unlikely(ret)) {
+ printk(KERN_ERR "err add delayed dir index item(index: %llu) "
+ "into the deletion tree of the delayed node"
+ "(root id: %llu, inode id: %llu, errno: %d)\n",
+ (unsigned long long)index,
+ (unsigned long long)node->root->objectid,
+ (unsigned long long)node->inode_id,
+ ret);
+ BUG();
+ }
+ mutex_unlock(&node->mutex);
+end:
+ btrfs_release_delayed_node(node);
+ return ret;
+}
+
+int btrfs_inode_delayed_dir_index_count(struct inode *inode)
+{
+ struct btrfs_delayed_node *delayed_node = BTRFS_I(inode)->delayed_node;
+ int ret = 0;
+
+ if (!delayed_node)
+ return -ENOENT;
+
+ /*
+ * Since we have held i_mutex of this directory, it is impossible that
+ * a new directory index is added into the delayed node and index_cnt
+ * is updated now. So we needn't lock the delayed node.
+ */
+ if (!delayed_node->index_cnt)
+ return -EINVAL;
+
+ BTRFS_I(inode)->index_cnt = delayed_node->index_cnt;
+ return ret;
+}
+
+void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list,
+ struct list_head *del_list)
+{
+ struct btrfs_delayed_node *delayed_node;
+ struct btrfs_delayed_item *item;
+
+ delayed_node = btrfs_get_delayed_node(inode);
+ if (!delayed_node)
+ return;
+
+ mutex_lock(&delayed_node->mutex);
+ item = __btrfs_first_delayed_insertion_item(delayed_node);
+ while (item) {
+ atomic_inc(&item->refs);
+ list_add_tail(&item->readdir_list, ins_list);
+ item = __btrfs_next_delayed_item(item);
+ }
+
+ item = __btrfs_first_delayed_deletion_item(delayed_node);
+ while (item) {
+ atomic_inc(&item->refs);
+ list_add_tail(&item->readdir_list, del_list);
+ item = __btrfs_next_delayed_item(item);
+ }
+ mutex_unlock(&delayed_node->mutex);
+ /*
+ * This delayed node is still cached in the btrfs inode, so refs
+ * must be > 1 now, and we needn't check it is going to be freed
+ * or not.
+ *
+ * Besides that, this function is used to read dir, we do not
+ * insert/delete delayed items in this period. So we also needn't
+ * requeue or dequeue this delayed node.
+ */
+ atomic_dec(&delayed_node->refs);
+}
+
+void btrfs_put_delayed_items(struct list_head *ins_list,
+ struct list_head *del_list)
+{
+ struct btrfs_delayed_item *curr, *next;
+
+ list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
+ list_del(&curr->readdir_list);
+ if (atomic_dec_and_test(&curr->refs))
+ kfree(curr);
+ }
+
+ list_for_each_entry_safe(curr, next, del_list, readdir_list) {
+ list_del(&curr->readdir_list);
+ if (atomic_dec_and_test(&curr->refs))
+ kfree(curr);
+ }
+}
+
+int btrfs_should_delete_dir_index(struct list_head *del_list,
+ u64 index)
+{
+ struct btrfs_delayed_item *curr, *next;
+ int ret;
+
+ if (list_empty(del_list))
+ return 0;
+
+ list_for_each_entry_safe(curr, next, del_list, readdir_list) {
+ if (curr->key.offset > index)
+ break;
+
+ list_del(&curr->readdir_list);
+ ret = (curr->key.offset == index);
+
+ if (atomic_dec_and_test(&curr->refs))
+ kfree(curr);
+
+ if (ret)
+ return 1;
+ else
+ continue;
+ }
+ return 0;
+}
+
+/*
+ * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree
+ *
+ */
+int btrfs_readdir_delayed_dir_index(struct file *filp, void *dirent,
+ filldir_t filldir,
+ struct list_head *ins_list)
+{
+ struct btrfs_dir_item *di;
+ struct btrfs_delayed_item *curr, *next;
+ struct btrfs_key location;
+ char *name;
+ int name_len;
+ int over = 0;
+ unsigned char d_type;
+
+ if (list_empty(ins_list))
+ return 0;
+
+ /*
+ * Changing the data of the delayed item is impossible. So
+ * we needn't lock them. And we have held i_mutex of the
+ * directory, nobody can delete any directory indexes now.
+ */
+ list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
+ list_del(&curr->readdir_list);
+
+ if (curr->key.offset < filp->f_pos) {
+ if (atomic_dec_and_test(&curr->refs))
+ kfree(curr);
+ continue;
+ }
+
+ filp->f_pos = curr->key.offset;
+
+ di = (struct btrfs_dir_item *)curr->data;
+ name = (char *)(di + 1);
+ name_len = le16_to_cpu(di->name_len);
+
+ d_type = btrfs_filetype_table[di->type];
+ btrfs_disk_key_to_cpu(&location, &di->location);
+
+ over = filldir(dirent, name, name_len, curr->key.offset,
+ location.objectid, d_type);
+
+ if (atomic_dec_and_test(&curr->refs))
+ kfree(curr);
+
+ if (over)
+ return 1;
+ }
+ return 0;
+}
+
+BTRFS_SETGET_STACK_FUNCS(stack_inode_generation, struct btrfs_inode_item,
+ generation, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_inode_sequence, struct btrfs_inode_item,
+ sequence, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_inode_transid, struct btrfs_inode_item,
+ transid, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_inode_size, struct btrfs_inode_item, size, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_inode_nbytes, struct btrfs_inode_item,
+ nbytes, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_inode_block_group, struct btrfs_inode_item,
+ block_group, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_inode_nlink, struct btrfs_inode_item, nlink, 32);
+BTRFS_SETGET_STACK_FUNCS(stack_inode_uid, struct btrfs_inode_item, uid, 32);
+BTRFS_SETGET_STACK_FUNCS(stack_inode_gid, struct btrfs_inode_item, gid, 32);
+BTRFS_SETGET_STACK_FUNCS(stack_inode_mode, struct btrfs_inode_item, mode, 32);
+BTRFS_SETGET_STACK_FUNCS(stack_inode_rdev, struct btrfs_inode_item, rdev, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_inode_flags, struct btrfs_inode_item, flags, 64);
+
+BTRFS_SETGET_STACK_FUNCS(stack_timespec_sec, struct btrfs_timespec, sec, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_timespec_nsec, struct btrfs_timespec, nsec, 32);
+
+static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
+ struct btrfs_inode_item *inode_item,
+ struct inode *inode)
+{
+ btrfs_set_stack_inode_uid(inode_item, inode->i_uid);
+ btrfs_set_stack_inode_gid(inode_item, inode->i_gid);
+ btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size);
+ btrfs_set_stack_inode_mode(inode_item, inode->i_mode);
+ btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink);
+ btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode));
+ btrfs_set_stack_inode_generation(inode_item,
+ BTRFS_I(inode)->generation);
+ btrfs_set_stack_inode_sequence(inode_item, BTRFS_I(inode)->sequence);
+ btrfs_set_stack_inode_transid(inode_item, trans->transid);
+ btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
+ btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags);
+ btrfs_set_stack_inode_block_group(inode_item,
+ BTRFS_I(inode)->block_group);
+
+ btrfs_set_stack_timespec_sec(btrfs_inode_atime(inode_item),
+ inode->i_atime.tv_sec);
+ btrfs_set_stack_timespec_nsec(btrfs_inode_atime(inode_item),
+ inode->i_atime.tv_nsec);
+
+ btrfs_set_stack_timespec_sec(btrfs_inode_mtime(inode_item),
+ inode->i_mtime.tv_sec);
+ btrfs_set_stack_timespec_nsec(btrfs_inode_mtime(inode_item),
+ inode->i_mtime.tv_nsec);
+
+ btrfs_set_stack_timespec_sec(btrfs_inode_ctime(inode_item),
+ inode->i_ctime.tv_sec);
+ btrfs_set_stack_timespec_nsec(btrfs_inode_ctime(inode_item),
+ inode->i_ctime.tv_nsec);
+}
+
+int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, struct inode *inode)
+{
+ struct btrfs_delayed_node *delayed_node;
+ int ret;
+
+ delayed_node = btrfs_get_or_create_delayed_node(inode);
+ if (IS_ERR(delayed_node))
+ return PTR_ERR(delayed_node);
+
+ mutex_lock(&delayed_node->mutex);
+ if (delayed_node->inode_dirty) {
+ fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
+ goto release_node;
+ }
+
+ ret = btrfs_delayed_inode_reserve_metadata(trans, root, delayed_node);
+ /*
+ * we must reserve enough space when we start a new transaction,
+ * so reserving metadata failure is impossible
+ */
+ BUG_ON(ret);
+
+ fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
+ delayed_node->inode_dirty = 1;
+ delayed_node->count++;
+ atomic_inc(&root->fs_info->delayed_root->items);
+release_node:
+ mutex_unlock(&delayed_node->mutex);
+ btrfs_release_delayed_node(delayed_node);
+ return ret;
+}
+
+static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
+{
+ struct btrfs_root *root = delayed_node->root;
+ struct btrfs_delayed_item *curr_item, *prev_item;
+
+ mutex_lock(&delayed_node->mutex);
+ curr_item = __btrfs_first_delayed_insertion_item(delayed_node);
+ while (curr_item) {
+ btrfs_delayed_item_release_metadata(root, curr_item);
+ prev_item = curr_item;
+ curr_item = __btrfs_next_delayed_item(prev_item);
+ btrfs_release_delayed_item(prev_item);
+ }
+
+ curr_item = __btrfs_first_delayed_deletion_item(delayed_node);
+ while (curr_item) {
+ btrfs_delayed_item_release_metadata(root, curr_item);
+ prev_item = curr_item;
+ curr_item = __btrfs_next_delayed_item(prev_item);
+ btrfs_release_delayed_item(prev_item);
+ }
+
+ if (delayed_node->inode_dirty) {
+ btrfs_delayed_inode_release_metadata(root, delayed_node);
+ btrfs_release_delayed_inode(delayed_node);
+ }
+ mutex_unlock(&delayed_node->mutex);
+}
+
+void btrfs_kill_delayed_inode_items(struct inode *inode)
+{
+ struct btrfs_delayed_node *delayed_node;
+
+ delayed_node = btrfs_get_delayed_node(inode);
+ if (!delayed_node)
+ return;
+
+ __btrfs_kill_delayed_node(delayed_node);
+ btrfs_release_delayed_node(delayed_node);
+}
+
+void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
+{
+ u64 inode_id = 0;
+ struct btrfs_delayed_node *delayed_nodes[8];
+ int i, n;
+
+ while (1) {
+ spin_lock(&root->inode_lock);
+ n = radix_tree_gang_lookup(&root->delayed_nodes_tree,
+ (void **)delayed_nodes, inode_id,
+ ARRAY_SIZE(delayed_nodes));
+ if (!n) {
+ spin_unlock(&root->inode_lock);
+ break;
+ }
+
+ inode_id = delayed_nodes[n - 1]->inode_id + 1;
+
+ for (i = 0; i < n; i++)
+ atomic_inc(&delayed_nodes[i]->refs);
+ spin_unlock(&root->inode_lock);
+
+ for (i = 0; i < n; i++) {
+ __btrfs_kill_delayed_node(delayed_nodes[i]);
+ btrfs_release_delayed_node(delayed_nodes[i]);
+ }
+ }
+}
diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h
new file mode 100644
index 000000000000..eb7d240aa648
--- /dev/null
+++ b/fs/btrfs/delayed-inode.h
@@ -0,0 +1,141 @@
+/*
+ * Copyright (C) 2011 Fujitsu. All rights reserved.
+ * Written by Miao Xie <miaox@cn.fujitsu.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#ifndef __DELAYED_TREE_OPERATION_H
+#define __DELAYED_TREE_OPERATION_H
+
+#include <linux/rbtree.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/wait.h>
+#include <asm/atomic.h>
+
+#include "ctree.h"
+
+/* types of the delayed item */
+#define BTRFS_DELAYED_INSERTION_ITEM 1
+#define BTRFS_DELAYED_DELETION_ITEM 2
+
+struct btrfs_delayed_root {
+ spinlock_t lock;
+ struct list_head node_list;
+ /*
+ * Used for delayed nodes which is waiting to be dealt with by the
+ * worker. If the delayed node is inserted into the work queue, we
+ * drop it from this list.
+ */
+ struct list_head prepare_list;
+ atomic_t items; /* for delayed items */
+ int nodes; /* for delayed nodes */
+ wait_queue_head_t wait;
+};
+
+struct btrfs_delayed_node {
+ u64 inode_id;
+ u64 bytes_reserved;
+ struct btrfs_root *root;
+ /* Used to add the node into the delayed root's node list. */
+ struct list_head n_list;
+ /*
+ * Used to add the node into the prepare list, the nodes in this list
+ * is waiting to be dealt with by the async worker.
+ */
+ struct list_head p_list;
+ struct rb_root ins_root;
+ struct rb_root del_root;
+ struct mutex mutex;
+ struct btrfs_inode_item inode_item;
+ atomic_t refs;
+ u64 index_cnt;
+ bool in_list;
+ bool inode_dirty;
+ int count;
+};
+
+struct btrfs_delayed_item {
+ struct rb_node rb_node;
+ struct btrfs_key key;
+ struct list_head tree_list; /* used for batch insert/delete items */
+ struct list_head readdir_list; /* used for readdir items */
+ u64 bytes_reserved;
+ struct btrfs_block_rsv *block_rsv;
+ struct btrfs_delayed_node *delayed_node;
+ atomic_t refs;
+ int ins_or_del;
+ u32 data_len;
+ char data[0];
+};
+
+static inline void btrfs_init_delayed_root(
+ struct btrfs_delayed_root *delayed_root)
+{
+ atomic_set(&delayed_root->items, 0);
+ delayed_root->nodes = 0;
+ spin_lock_init(&delayed_root->lock);
+ init_waitqueue_head(&delayed_root->wait);
+ INIT_LIST_HEAD(&delayed_root->node_list);
+ INIT_LIST_HEAD(&delayed_root->prepare_list);
+}
+
+int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, const char *name,
+ int name_len, struct inode *dir,
+ struct btrfs_disk_key *disk_key, u8 type,
+ u64 index);
+
+int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, struct inode *dir,
+ u64 index);
+
+int btrfs_inode_delayed_dir_index_count(struct inode *inode);
+
+int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root);
+
+void btrfs_balance_delayed_items(struct btrfs_root *root);
+
+int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
+ struct inode *inode);
+/* Used for evicting the inode. */
+void btrfs_remove_delayed_node(struct inode *inode);
+void btrfs_kill_delayed_inode_items(struct inode *inode);
+
+
+int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, struct inode *inode);
+
+/* Used for drop dead root */
+void btrfs_kill_all_delayed_nodes(struct btrfs_root *root);
+
+/* Used for readdir() */
+void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list,
+ struct list_head *del_list);
+void btrfs_put_delayed_items(struct list_head *ins_list,
+ struct list_head *del_list);
+int btrfs_should_delete_dir_index(struct list_head *del_list,
+ u64 index);
+int btrfs_readdir_delayed_dir_index(struct file *filp, void *dirent,
+ filldir_t filldir,
+ struct list_head *ins_list);
+
+/* for init */
+int __init btrfs_delayed_inode_init(void);
+void btrfs_delayed_inode_exit(void);
+#endif
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
index bce28f653899..125cf76fcd08 100644
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
@@ -281,44 +281,6 @@ again:
}
/*
- * This checks to see if there are any delayed refs in the
- * btree for a given bytenr. It returns one if it finds any
- * and zero otherwise.
- *
- * If it only finds a head node, it returns 0.
- *
- * The idea is to use this when deciding if you can safely delete an
- * extent from the extent allocation tree. There may be a pending
- * ref in the rbtree that adds or removes references, so as long as this
- * returns one you need to leave the BTRFS_EXTENT_ITEM in the extent
- * allocation tree.
- */
-int btrfs_delayed_ref_pending(struct btrfs_trans_handle *trans, u64 bytenr)
-{
- struct btrfs_delayed_ref_node *ref;
- struct btrfs_delayed_ref_root *delayed_refs;
- struct rb_node *prev_node;
- int ret = 0;
-
- delayed_refs = &trans->transaction->delayed_refs;
- spin_lock(&delayed_refs->lock);
-
- ref = find_ref_head(&delayed_refs->root, bytenr, NULL);
- if (ref) {
- prev_node = rb_prev(&ref->rb_node);
- if (!prev_node)
- goto out;
- ref = rb_entry(prev_node, struct btrfs_delayed_ref_node,
- rb_node);
- if (ref->bytenr == bytenr)
- ret = 1;
- }
-out:
- spin_unlock(&delayed_refs->lock);
- return ret;
-}
-
-/*
* helper function to update an extent delayed ref in the
* rbtree. existing and update must both have the same
* bytenr and parent
@@ -747,79 +709,3 @@ btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr)
return btrfs_delayed_node_to_head(ref);
return NULL;
}
-
-/*
- * add a delayed ref to the tree. This does all of the accounting required
- * to make sure the delayed ref is eventually processed before this
- * transaction commits.
- *
- * The main point of this call is to add and remove a backreference in a single
- * shot, taking the lock only once, and only searching for the head node once.
- *
- * It is the same as doing a ref add and delete in two separate calls.
- */
-#if 0
-int btrfs_update_delayed_ref(struct btrfs_trans_handle *trans,
- u64 bytenr, u64 num_bytes, u64 orig_parent,
- u64 parent, u64 orig_ref_root, u64 ref_root,
- u64 orig_ref_generation, u64 ref_generation,
- u64 owner_objectid, int pin)
-{
- struct btrfs_delayed_ref *ref;
- struct btrfs_delayed_ref *old_ref;
- struct btrfs_delayed_ref_head *head_ref;
- struct btrfs_delayed_ref_root *delayed_refs;
- int ret;
-
- ref = kmalloc(sizeof(*ref), GFP_NOFS);
- if (!ref)
- return -ENOMEM;
-
- old_ref = kmalloc(sizeof(*old_ref), GFP_NOFS);
- if (!old_ref) {
- kfree(ref);
- return -ENOMEM;
- }
-
- /*
- * the parent = 0 case comes from cases where we don't actually
- * know the parent yet. It will get updated later via a add/drop
- * pair.
- */
- if (parent == 0)
- parent = bytenr;
- if (orig_parent == 0)
- orig_parent = bytenr;
-
- head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS);
- if (!head_ref) {
- kfree(ref);
- kfree(old_ref);
- return -ENOMEM;
- }
- delayed_refs = &trans->transaction->delayed_refs;
- spin_lock(&delayed_refs->lock);
-
- /*
- * insert both the head node and the new ref without dropping
- * the spin lock
- */
- ret = __btrfs_add_delayed_ref(trans, &head_ref->node, bytenr, num_bytes,
- (u64)-1, 0, 0, 0,
- BTRFS_UPDATE_DELAYED_HEAD, 0);
- BUG_ON(ret);
-
- ret = __btrfs_add_delayed_ref(trans, &ref->node, bytenr, num_bytes,
- parent, ref_root, ref_generation,
- owner_objectid, BTRFS_ADD_DELAYED_REF, 0);
- BUG_ON(ret);
-
- ret = __btrfs_add_delayed_ref(trans, &old_ref->node, bytenr, num_bytes,
- orig_parent, orig_ref_root,
- orig_ref_generation, owner_objectid,
- BTRFS_DROP_DELAYED_REF, pin);
- BUG_ON(ret);
- spin_unlock(&delayed_refs->lock);
- return 0;
-}
-#endif
diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h
index 50e3cf92fbda..e287e3b0eab0 100644
--- a/fs/btrfs/delayed-ref.h
+++ b/fs/btrfs/delayed-ref.h
@@ -166,12 +166,6 @@ int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_head *
btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr);
-int btrfs_delayed_ref_pending(struct btrfs_trans_handle *trans, u64 bytenr);
-int btrfs_update_delayed_ref(struct btrfs_trans_handle *trans,
- u64 bytenr, u64 num_bytes, u64 orig_parent,
- u64 parent, u64 orig_ref_root, u64 ref_root,
- u64 orig_ref_generation, u64 ref_generation,
- u64 owner_objectid, int pin);
int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_head *head);
int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans,
diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c
index c62f02f6ae69..685f2593c4f0 100644
--- a/fs/btrfs/dir-item.c
+++ b/fs/btrfs/dir-item.c
@@ -50,7 +50,6 @@ static struct btrfs_dir_item *insert_with_overflow(struct btrfs_trans_handle
if (di)
return ERR_PTR(-EEXIST);
ret = btrfs_extend_item(trans, root, path, data_size);
- WARN_ON(ret > 0);
}
if (ret < 0)
return ERR_PTR(ret);
@@ -124,8 +123,9 @@ int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans,
* to use for the second index (if one is created).
*/
int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root
- *root, const char *name, int name_len, u64 dir,
- struct btrfs_key *location, u8 type, u64 index)
+ *root, const char *name, int name_len,
+ struct inode *dir, struct btrfs_key *location,
+ u8 type, u64 index)
{
int ret = 0;
int ret2 = 0;
@@ -137,13 +137,17 @@ int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root
struct btrfs_disk_key disk_key;
u32 data_size;
- key.objectid = dir;
+ key.objectid = btrfs_ino(dir);
btrfs_set_key_type(&key, BTRFS_DIR_ITEM_KEY);
key.offset = btrfs_name_hash(name, name_len);
path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
path->leave_spinning = 1;
+ btrfs_cpu_key_to_disk(&disk_key, location);
+
data_size = sizeof(*dir_item) + name_len;
dir_item = insert_with_overflow(trans, root, path, &key, data_size,
name, name_len);
@@ -155,7 +159,6 @@ int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root
}
leaf = path->nodes[0];
- btrfs_cpu_key_to_disk(&disk_key, location);
btrfs_set_dir_item_key(leaf, dir_item, &disk_key);
btrfs_set_dir_type(leaf, dir_item, type);
btrfs_set_dir_data_len(leaf, dir_item, 0);
@@ -172,29 +175,11 @@ second_insert:
ret = 0;
goto out_free;
}
- btrfs_release_path(root, path);
-
- btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY);
- key.offset = index;
- dir_item = insert_with_overflow(trans, root, path, &key, data_size,
- name, name_len);
- if (IS_ERR(dir_item)) {
- ret2 = PTR_ERR(dir_item);
- goto out_free;
- }
- leaf = path->nodes[0];
- btrfs_cpu_key_to_disk(&disk_key, location);
- btrfs_set_dir_item_key(leaf, dir_item, &disk_key);
- btrfs_set_dir_type(leaf, dir_item, type);
- btrfs_set_dir_data_len(leaf, dir_item, 0);
- btrfs_set_dir_name_len(leaf, dir_item, name_len);
- btrfs_set_dir_transid(leaf, dir_item, trans->transid);
- name_ptr = (unsigned long)(dir_item + 1);
- write_extent_buffer(leaf, name, name_ptr, name_len);
- btrfs_mark_buffer_dirty(leaf);
+ btrfs_release_path(path);
+ ret2 = btrfs_insert_delayed_dir_index(trans, root, name, name_len, dir,
+ &disk_key, type, index);
out_free:
-
btrfs_free_path(path);
if (ret)
return ret;
@@ -452,7 +437,7 @@ int verify_dir_item(struct btrfs_root *root,
namelen = XATTR_NAME_MAX;
if (btrfs_dir_name_len(leaf, dir_item) > namelen) {
- printk(KERN_CRIT "btrfS: invalid dir item name len: %u\n",
+ printk(KERN_CRIT "btrfs: invalid dir item name len: %u\n",
(unsigned)btrfs_dir_data_len(leaf, dir_item));
return 1;
}
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 228cf36ece83..98b6a71decba 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -29,6 +29,7 @@
#include <linux/crc32c.h>
#include <linux/slab.h>
#include <linux/migrate.h>
+#include <linux/ratelimit.h>
#include <asm/unaligned.h>
#include "compat.h"
#include "ctree.h"
@@ -41,6 +42,7 @@
#include "locking.h"
#include "tree-log.h"
#include "free-space-cache.h"
+#include "inode-map.h"
static struct extent_io_ops btree_extent_io_ops;
static void end_workqueue_fn(struct btrfs_work *work);
@@ -137,7 +139,7 @@ static const char *btrfs_eb_name[BTRFS_MAX_LEVEL + 1] = {
* that covers the entire device
*/
static struct extent_map *btree_get_extent(struct inode *inode,
- struct page *page, size_t page_offset, u64 start, u64 len,
+ struct page *page, size_t pg_offset, u64 start, u64 len,
int create)
{
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
@@ -154,7 +156,7 @@ static struct extent_map *btree_get_extent(struct inode *inode,
}
read_unlock(&em_tree->lock);
- em = alloc_extent_map(GFP_NOFS);
+ em = alloc_extent_map();
if (!em) {
em = ERR_PTR(-ENOMEM);
goto out;
@@ -254,14 +256,12 @@ static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf,
memcpy(&found, result, csum_size);
read_extent_buffer(buf, &val, 0, csum_size);
- if (printk_ratelimit()) {
- printk(KERN_INFO "btrfs: %s checksum verify "
+ printk_ratelimited(KERN_INFO "btrfs: %s checksum verify "
"failed on %llu wanted %X found %X "
"level %d\n",
root->fs_info->sb->s_id,
(unsigned long long)buf->start, val, found,
btrfs_header_level(buf));
- }
if (result != (char *)&inline_result)
kfree(result);
return 1;
@@ -296,13 +296,11 @@ static int verify_parent_transid(struct extent_io_tree *io_tree,
ret = 0;
goto out;
}
- if (printk_ratelimit()) {
- printk("parent transid verify failed on %llu wanted %llu "
+ printk_ratelimited("parent transid verify failed on %llu wanted %llu "
"found %llu\n",
(unsigned long long)eb->start,
(unsigned long long)parent_transid,
(unsigned long long)btrfs_header_generation(eb));
- }
ret = 1;
clear_extent_buffer_uptodate(io_tree, eb, &cached_state);
out:
@@ -380,7 +378,7 @@ static int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
len = page->private >> 2;
WARN_ON(len == 0);
- eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS);
+ eb = alloc_extent_buffer(tree, start, len, page);
if (eb == NULL) {
WARN_ON(1);
goto out;
@@ -525,7 +523,7 @@ static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
len = page->private >> 2;
WARN_ON(len == 0);
- eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS);
+ eb = alloc_extent_buffer(tree, start, len, page);
if (eb == NULL) {
ret = -EIO;
goto out;
@@ -533,12 +531,10 @@ static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
found_start = btrfs_header_bytenr(eb);
if (found_start != start) {
- if (printk_ratelimit()) {
- printk(KERN_INFO "btrfs bad tree block start "
+ printk_ratelimited(KERN_INFO "btrfs bad tree block start "
"%llu %llu\n",
(unsigned long long)found_start,
(unsigned long long)eb->start);
- }
ret = -EIO;
goto err;
}
@@ -550,10 +546,8 @@ static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
goto err;
}
if (check_tree_block_fsid(root, eb)) {
- if (printk_ratelimit()) {
- printk(KERN_INFO "btrfs bad fsid on block %llu\n",
+ printk_ratelimited(KERN_INFO "btrfs bad fsid on block %llu\n",
(unsigned long long)eb->start);
- }
ret = -EIO;
goto err;
}
@@ -650,12 +644,6 @@ unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info)
return 256 * limit;
}
-int btrfs_congested_async(struct btrfs_fs_info *info, int iodone)
-{
- return atomic_read(&info->nr_async_bios) >
- btrfs_async_submit_limit(info);
-}
-
static void run_one_async_start(struct btrfs_work *work)
{
struct async_submit_bio *async;
@@ -963,7 +951,7 @@ struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
struct inode *btree_inode = root->fs_info->btree_inode;
struct extent_buffer *eb;
eb = find_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
- bytenr, blocksize, GFP_NOFS);
+ bytenr, blocksize);
return eb;
}
@@ -974,7 +962,7 @@ struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
struct extent_buffer *eb;
eb = alloc_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
- bytenr, blocksize, NULL, GFP_NOFS);
+ bytenr, blocksize, NULL);
return eb;
}
@@ -1058,13 +1046,13 @@ static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
root->name = NULL;
root->in_sysfs = 0;
root->inode_tree = RB_ROOT;
+ INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC);
root->block_rsv = NULL;
root->orphan_block_rsv = NULL;
INIT_LIST_HEAD(&root->dirty_list);
INIT_LIST_HEAD(&root->orphan_list);
INIT_LIST_HEAD(&root->root_list);
- spin_lock_init(&root->node_lock);
spin_lock_init(&root->orphan_lock);
spin_lock_init(&root->inode_lock);
spin_lock_init(&root->accounting_lock);
@@ -1080,7 +1068,7 @@ static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
root->log_transid = 0;
root->last_log_commit = 0;
extent_io_tree_init(&root->dirty_log_pages,
- fs_info->btree_inode->i_mapping, GFP_NOFS);
+ fs_info->btree_inode->i_mapping);
memset(&root->root_key, 0, sizeof(root->root_key));
memset(&root->root_item, 0, sizeof(root->root_item));
@@ -1283,21 +1271,6 @@ out:
return root;
}
-struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
- u64 root_objectid)
-{
- struct btrfs_root *root;
-
- if (root_objectid == BTRFS_ROOT_TREE_OBJECTID)
- return fs_info->tree_root;
- if (root_objectid == BTRFS_EXTENT_TREE_OBJECTID)
- return fs_info->extent_root;
-
- root = radix_tree_lookup(&fs_info->fs_roots_radix,
- (unsigned long)root_objectid);
- return root;
-}
-
struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
struct btrfs_key *location)
{
@@ -1326,6 +1299,19 @@ again:
if (IS_ERR(root))
return root;
+ root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS);
+ if (!root->free_ino_ctl)
+ goto fail;
+ root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned),
+ GFP_NOFS);
+ if (!root->free_ino_pinned)
+ goto fail;
+
+ btrfs_init_free_ino_ctl(root);
+ mutex_init(&root->fs_commit_mutex);
+ spin_lock_init(&root->cache_lock);
+ init_waitqueue_head(&root->cache_wait);
+
set_anon_super(&root->anon_super, NULL);
if (btrfs_root_refs(&root->root_item) == 0) {
@@ -1369,41 +1355,6 @@ fail:
return ERR_PTR(ret);
}
-struct btrfs_root *btrfs_read_fs_root(struct btrfs_fs_info *fs_info,
- struct btrfs_key *location,
- const char *name, int namelen)
-{
- return btrfs_read_fs_root_no_name(fs_info, location);
-#if 0
- struct btrfs_root *root;
- int ret;
-
- root = btrfs_read_fs_root_no_name(fs_info, location);
- if (!root)
- return NULL;
-
- if (root->in_sysfs)
- return root;
-
- ret = btrfs_set_root_name(root, name, namelen);
- if (ret) {
- free_extent_buffer(root->node);
- kfree(root);
- return ERR_PTR(ret);
- }
-
- ret = btrfs_sysfs_add_root(root);
- if (ret) {
- free_extent_buffer(root->node);
- kfree(root->name);
- kfree(root);
- return ERR_PTR(ret);
- }
- root->in_sysfs = 1;
- return root;
-#endif
-}
-
static int btrfs_congested_fn(void *congested_data, int bdi_bits)
{
struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data;
@@ -1411,7 +1362,8 @@ static int btrfs_congested_fn(void *congested_data, int bdi_bits)
struct btrfs_device *device;
struct backing_dev_info *bdi;
- list_for_each_entry(device, &info->fs_devices->devices, dev_list) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(device, &info->fs_devices->devices, dev_list) {
if (!device->bdev)
continue;
bdi = blk_get_backing_dev_info(device->bdev);
@@ -1420,6 +1372,7 @@ static int btrfs_congested_fn(void *congested_data, int bdi_bits)
break;
}
}
+ rcu_read_unlock();
return ret;
}
@@ -1522,6 +1475,7 @@ static int cleaner_kthread(void *arg)
btrfs_run_delayed_iputs(root);
btrfs_clean_old_snapshots(root);
mutex_unlock(&root->fs_info->cleaner_mutex);
+ btrfs_run_defrag_inodes(root->fs_info);
}
if (freezing(current)) {
@@ -1611,7 +1565,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
struct btrfs_root *csum_root = kzalloc(sizeof(struct btrfs_root),
GFP_NOFS);
struct btrfs_root *tree_root = btrfs_sb(sb);
- struct btrfs_fs_info *fs_info = tree_root->fs_info;
+ struct btrfs_fs_info *fs_info = NULL;
struct btrfs_root *chunk_root = kzalloc(sizeof(struct btrfs_root),
GFP_NOFS);
struct btrfs_root *dev_root = kzalloc(sizeof(struct btrfs_root),
@@ -1623,11 +1577,12 @@ struct btrfs_root *open_ctree(struct super_block *sb,
struct btrfs_super_block *disk_super;
- if (!extent_root || !tree_root || !fs_info ||
+ if (!extent_root || !tree_root || !tree_root->fs_info ||
!chunk_root || !dev_root || !csum_root) {
err = -ENOMEM;
goto fail;
}
+ fs_info = tree_root->fs_info;
ret = init_srcu_struct(&fs_info->subvol_srcu);
if (ret) {
@@ -1662,6 +1617,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
spin_lock_init(&fs_info->ref_cache_lock);
spin_lock_init(&fs_info->fs_roots_radix_lock);
spin_lock_init(&fs_info->delayed_iput_lock);
+ spin_lock_init(&fs_info->defrag_inodes_lock);
init_completion(&fs_info->kobj_unregister);
fs_info->tree_root = tree_root;
@@ -1684,15 +1640,35 @@ struct btrfs_root *open_ctree(struct super_block *sb,
atomic_set(&fs_info->async_delalloc_pages, 0);
atomic_set(&fs_info->async_submit_draining, 0);
atomic_set(&fs_info->nr_async_bios, 0);
+ atomic_set(&fs_info->defrag_running, 0);
fs_info->sb = sb;
fs_info->max_inline = 8192 * 1024;
fs_info->metadata_ratio = 0;
+ fs_info->defrag_inodes = RB_ROOT;
fs_info->thread_pool_size = min_t(unsigned long,
num_online_cpus() + 2, 8);
INIT_LIST_HEAD(&fs_info->ordered_extents);
spin_lock_init(&fs_info->ordered_extent_lock);
+ fs_info->delayed_root = kmalloc(sizeof(struct btrfs_delayed_root),
+ GFP_NOFS);
+ if (!fs_info->delayed_root) {
+ err = -ENOMEM;
+ goto fail_iput;
+ }
+ btrfs_init_delayed_root(fs_info->delayed_root);
+
+ mutex_init(&fs_info->scrub_lock);
+ atomic_set(&fs_info->scrubs_running, 0);
+ atomic_set(&fs_info->scrub_pause_req, 0);
+ atomic_set(&fs_info->scrubs_paused, 0);
+ atomic_set(&fs_info->scrub_cancel_req, 0);
+ init_waitqueue_head(&fs_info->scrub_pause_wait);
+ init_rwsem(&fs_info->scrub_super_lock);
+ fs_info->scrub_workers_refcnt = 0;
+ btrfs_init_workers(&fs_info->scrub_workers, "scrub",
+ fs_info->thread_pool_size, &fs_info->generic_worker);
sb->s_blocksize = 4096;
sb->s_blocksize_bits = blksize_bits(4096);
@@ -1711,10 +1687,8 @@ struct btrfs_root *open_ctree(struct super_block *sb,
RB_CLEAR_NODE(&BTRFS_I(fs_info->btree_inode)->rb_node);
extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree,
- fs_info->btree_inode->i_mapping,
- GFP_NOFS);
- extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree,
- GFP_NOFS);
+ fs_info->btree_inode->i_mapping);
+ extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree);
BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops;
@@ -1728,9 +1702,9 @@ struct btrfs_root *open_ctree(struct super_block *sb,
fs_info->block_group_cache_tree = RB_ROOT;
extent_io_tree_init(&fs_info->freed_extents[0],
- fs_info->btree_inode->i_mapping, GFP_NOFS);
+ fs_info->btree_inode->i_mapping);
extent_io_tree_init(&fs_info->freed_extents[1],
- fs_info->btree_inode->i_mapping, GFP_NOFS);
+ fs_info->btree_inode->i_mapping);
fs_info->pinned_extents = &fs_info->freed_extents[0];
fs_info->do_barriers = 1;
@@ -1760,7 +1734,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
bh = btrfs_read_dev_super(fs_devices->latest_bdev);
if (!bh) {
err = -EINVAL;
- goto fail_iput;
+ goto fail_alloc;
}
memcpy(&fs_info->super_copy, bh->b_data, sizeof(fs_info->super_copy));
@@ -1772,7 +1746,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
disk_super = &fs_info->super_copy;
if (!btrfs_super_root(disk_super))
- goto fail_iput;
+ goto fail_alloc;
/* check FS state, whether FS is broken. */
fs_info->fs_state |= btrfs_super_flags(disk_super);
@@ -1788,7 +1762,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
ret = btrfs_parse_options(tree_root, options);
if (ret) {
err = ret;
- goto fail_iput;
+ goto fail_alloc;
}
features = btrfs_super_incompat_flags(disk_super) &
@@ -1798,7 +1772,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
"unsupported optional features (%Lx).\n",
(unsigned long long)features);
err = -EINVAL;
- goto fail_iput;
+ goto fail_alloc;
}
features = btrfs_super_incompat_flags(disk_super);
@@ -1814,7 +1788,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
"unsupported option features (%Lx).\n",
(unsigned long long)features);
err = -EINVAL;
- goto fail_iput;
+ goto fail_alloc;
}
btrfs_init_workers(&fs_info->generic_worker,
@@ -1861,6 +1835,9 @@ struct btrfs_root *open_ctree(struct super_block *sb,
&fs_info->generic_worker);
btrfs_init_workers(&fs_info->endio_freespace_worker, "freespace-write",
1, &fs_info->generic_worker);
+ btrfs_init_workers(&fs_info->delayed_workers, "delayed-meta",
+ fs_info->thread_pool_size,
+ &fs_info->generic_worker);
/*
* endios are largely parallel and should have a very
@@ -1882,6 +1859,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
btrfs_start_workers(&fs_info->endio_meta_write_workers, 1);
btrfs_start_workers(&fs_info->endio_write_workers, 1);
btrfs_start_workers(&fs_info->endio_freespace_worker, 1);
+ btrfs_start_workers(&fs_info->delayed_workers, 1);
fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
@@ -2138,6 +2116,9 @@ fail_sb_buffer:
btrfs_stop_workers(&fs_info->endio_write_workers);
btrfs_stop_workers(&fs_info->endio_freespace_worker);
btrfs_stop_workers(&fs_info->submit_workers);
+ btrfs_stop_workers(&fs_info->delayed_workers);
+fail_alloc:
+ kfree(fs_info->delayed_root);
fail_iput:
invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
iput(fs_info->btree_inode);
@@ -2165,11 +2146,9 @@ static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
if (uptodate) {
set_buffer_uptodate(bh);
} else {
- if (printk_ratelimit()) {
- printk(KERN_WARNING "lost page write due to "
+ printk_ratelimited(KERN_WARNING "lost page write due to "
"I/O error on %s\n",
bdevname(bh->b_bdev, b));
- }
/* note, we dont' set_buffer_write_io_error because we have
* our own ways of dealing with the IO errors
*/
@@ -2333,7 +2312,7 @@ int write_all_supers(struct btrfs_root *root, int max_mirrors)
mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
head = &root->fs_info->fs_devices->devices;
- list_for_each_entry(dev, head, dev_list) {
+ list_for_each_entry_rcu(dev, head, dev_list) {
if (!dev->bdev) {
total_errors++;
continue;
@@ -2366,7 +2345,7 @@ int write_all_supers(struct btrfs_root *root, int max_mirrors)
}
total_errors = 0;
- list_for_each_entry(dev, head, dev_list) {
+ list_for_each_entry_rcu(dev, head, dev_list) {
if (!dev->bdev)
continue;
if (!dev->in_fs_metadata || !dev->writeable)
@@ -2404,12 +2383,15 @@ int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
if (btrfs_root_refs(&root->root_item) == 0)
synchronize_srcu(&fs_info->subvol_srcu);
+ __btrfs_remove_free_space_cache(root->free_ino_pinned);
+ __btrfs_remove_free_space_cache(root->free_ino_ctl);
free_fs_root(root);
return 0;
}
static void free_fs_root(struct btrfs_root *root)
{
+ iput(root->cache_inode);
WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree));
if (root->anon_super.s_dev) {
down_write(&root->anon_super.s_umount);
@@ -2417,6 +2399,8 @@ static void free_fs_root(struct btrfs_root *root)
}
free_extent_buffer(root->node);
free_extent_buffer(root->commit_root);
+ kfree(root->free_ino_ctl);
+ kfree(root->free_ino_pinned);
kfree(root->name);
kfree(root);
}
@@ -2520,6 +2504,15 @@ int close_ctree(struct btrfs_root *root)
fs_info->closing = 1;
smp_mb();
+ btrfs_scrub_cancel(root);
+
+ /* wait for any defraggers to finish */
+ wait_event(fs_info->transaction_wait,
+ (atomic_read(&fs_info->defrag_running) == 0));
+
+ /* clear out the rbtree of defraggable inodes */
+ btrfs_run_defrag_inodes(root->fs_info);
+
btrfs_put_block_group_cache(fs_info);
/*
@@ -2578,6 +2571,7 @@ int close_ctree(struct btrfs_root *root)
del_fs_roots(fs_info);
iput(fs_info->btree_inode);
+ kfree(fs_info->delayed_root);
btrfs_stop_workers(&fs_info->generic_worker);
btrfs_stop_workers(&fs_info->fixup_workers);
@@ -2589,6 +2583,7 @@ int close_ctree(struct btrfs_root *root)
btrfs_stop_workers(&fs_info->endio_write_workers);
btrfs_stop_workers(&fs_info->endio_freespace_worker);
btrfs_stop_workers(&fs_info->submit_workers);
+ btrfs_stop_workers(&fs_info->delayed_workers);
btrfs_close_devices(fs_info->fs_devices);
btrfs_mapping_tree_free(&fs_info->mapping_tree);
@@ -2665,6 +2660,29 @@ void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr)
if (current->flags & PF_MEMALLOC)
return;
+ btrfs_balance_delayed_items(root);
+
+ num_dirty = root->fs_info->dirty_metadata_bytes;
+
+ if (num_dirty > thresh) {
+ balance_dirty_pages_ratelimited_nr(
+ root->fs_info->btree_inode->i_mapping, 1);
+ }
+ return;
+}
+
+void __btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr)
+{
+ /*
+ * looks as though older kernels can get into trouble with
+ * this code, they end up stuck in balance_dirty_pages forever
+ */
+ u64 num_dirty;
+ unsigned long thresh = 32 * 1024 * 1024;
+
+ if (current->flags & PF_MEMALLOC)
+ return;
+
num_dirty = root->fs_info->dirty_metadata_bytes;
if (num_dirty > thresh) {
@@ -2697,7 +2715,7 @@ int btree_lock_page_hook(struct page *page)
goto out;
len = page->private >> 2;
- eb = find_extent_buffer(io_tree, bytenr, len, GFP_NOFS);
+ eb = find_extent_buffer(io_tree, bytenr, len);
if (!eb)
goto out;
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
index 07b20dc2fd95..a0b610a67aae 100644
--- a/fs/btrfs/disk-io.h
+++ b/fs/btrfs/disk-io.h
@@ -55,35 +55,20 @@ int btrfs_commit_super(struct btrfs_root *root);
int btrfs_error_commit_super(struct btrfs_root *root);
struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
u64 bytenr, u32 blocksize);
-struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
- u64 root_objectid);
-struct btrfs_root *btrfs_read_fs_root(struct btrfs_fs_info *fs_info,
- struct btrfs_key *location,
- const char *name, int namelen);
struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root,
struct btrfs_key *location);
struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
struct btrfs_key *location);
int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info);
-int btrfs_insert_dev_radix(struct btrfs_root *root,
- struct block_device *bdev,
- u64 device_id,
- u64 block_start,
- u64 num_blocks);
void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr);
+void __btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr);
int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root);
void btrfs_mark_buffer_dirty(struct extent_buffer *buf);
-void btrfs_mark_buffer_dirty_nonblocking(struct extent_buffer *buf);
int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid);
int btrfs_set_buffer_uptodate(struct extent_buffer *buf);
-int wait_on_tree_block_writeback(struct btrfs_root *root,
- struct extent_buffer *buf);
int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid);
u32 btrfs_csum_data(struct btrfs_root *root, char *data, u32 seed, size_t len);
void btrfs_csum_final(u32 crc, char *result);
-int btrfs_open_device(struct btrfs_device *dev);
-int btrfs_verify_block_csum(struct btrfs_root *root,
- struct extent_buffer *buf);
int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
int metadata);
int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
@@ -91,8 +76,6 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
unsigned long bio_flags, u64 bio_offset,
extent_submit_bio_hook_t *submit_bio_start,
extent_submit_bio_hook_t *submit_bio_done);
-
-int btrfs_congested_async(struct btrfs_fs_info *info, int iodone);
unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info);
int btrfs_write_tree_block(struct extent_buffer *buf);
int btrfs_wait_tree_block_writeback(struct extent_buffer *buf);
diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c
index b4ffad859adb..1b8dc33778f9 100644
--- a/fs/btrfs/export.c
+++ b/fs/btrfs/export.c
@@ -32,7 +32,7 @@ static int btrfs_encode_fh(struct dentry *dentry, u32 *fh, int *max_len,
len = BTRFS_FID_SIZE_NON_CONNECTABLE;
type = FILEID_BTRFS_WITHOUT_PARENT;
- fid->objectid = inode->i_ino;
+ fid->objectid = btrfs_ino(inode);
fid->root_objectid = BTRFS_I(inode)->root->objectid;
fid->gen = inode->i_generation;
@@ -178,13 +178,13 @@ static struct dentry *btrfs_get_parent(struct dentry *child)
if (!path)
return ERR_PTR(-ENOMEM);
- if (dir->i_ino == BTRFS_FIRST_FREE_OBJECTID) {
+ if (btrfs_ino(dir) == BTRFS_FIRST_FREE_OBJECTID) {
key.objectid = root->root_key.objectid;
key.type = BTRFS_ROOT_BACKREF_KEY;
key.offset = (u64)-1;
root = root->fs_info->tree_root;
} else {
- key.objectid = dir->i_ino;
+ key.objectid = btrfs_ino(dir);
key.type = BTRFS_INODE_REF_KEY;
key.offset = (u64)-1;
}
@@ -244,6 +244,7 @@ static int btrfs_get_name(struct dentry *parent, char *name,
struct btrfs_key key;
int name_len;
int ret;
+ u64 ino;
if (!dir || !inode)
return -EINVAL;
@@ -251,19 +252,21 @@ static int btrfs_get_name(struct dentry *parent, char *name,
if (!S_ISDIR(dir->i_mode))
return -EINVAL;
+ ino = btrfs_ino(inode);
+
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
path->leave_spinning = 1;
- if (inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) {
+ if (ino == BTRFS_FIRST_FREE_OBJECTID) {
key.objectid = BTRFS_I(inode)->root->root_key.objectid;
key.type = BTRFS_ROOT_BACKREF_KEY;
key.offset = (u64)-1;
root = root->fs_info->tree_root;
} else {
- key.objectid = inode->i_ino;
- key.offset = dir->i_ino;
+ key.objectid = ino;
+ key.offset = btrfs_ino(dir);
key.type = BTRFS_INODE_REF_KEY;
}
@@ -272,7 +275,7 @@ static int btrfs_get_name(struct dentry *parent, char *name,
btrfs_free_path(path);
return ret;
} else if (ret > 0) {
- if (inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) {
+ if (ino == BTRFS_FIRST_FREE_OBJECTID) {
path->slots[0]--;
} else {
btrfs_free_path(path);
@@ -281,11 +284,11 @@ static int btrfs_get_name(struct dentry *parent, char *name,
}
leaf = path->nodes[0];
- if (inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) {
- rref = btrfs_item_ptr(leaf, path->slots[0],
+ if (ino == BTRFS_FIRST_FREE_OBJECTID) {
+ rref = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_root_ref);
- name_ptr = (unsigned long)(rref + 1);
- name_len = btrfs_root_ref_name_len(leaf, rref);
+ name_ptr = (unsigned long)(rref + 1);
+ name_len = btrfs_root_ref_name_len(leaf, rref);
} else {
iref = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_inode_ref);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 9ee6bd55e16c..169bd62ce776 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -94,7 +94,7 @@ static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
return (cache->flags & bits) == bits;
}
-void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
+static void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
{
atomic_inc(&cache->count);
}
@@ -105,6 +105,7 @@ void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
WARN_ON(cache->pinned > 0);
WARN_ON(cache->reserved > 0);
WARN_ON(cache->reserved_pinned > 0);
+ kfree(cache->free_space_ctl);
kfree(cache);
}
}
@@ -379,7 +380,7 @@ again:
break;
caching_ctl->progress = last;
- btrfs_release_path(extent_root, path);
+ btrfs_release_path(path);
up_read(&fs_info->extent_commit_sem);
mutex_unlock(&caching_ctl->mutex);
if (btrfs_transaction_in_commit(fs_info))
@@ -754,8 +755,12 @@ again:
atomic_inc(&head->node.refs);
spin_unlock(&delayed_refs->lock);
- btrfs_release_path(root->fs_info->extent_root, path);
+ btrfs_release_path(path);
+ /*
+ * Mutex was contended, block until it's released and try
+ * again
+ */
mutex_lock(&head->mutex);
mutex_unlock(&head->mutex);
btrfs_put_delayed_ref(&head->node);
@@ -934,7 +939,7 @@ static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
break;
}
}
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
if (owner < BTRFS_FIRST_FREE_OBJECTID)
new_size += sizeof(*bi);
@@ -947,7 +952,6 @@ static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
BUG_ON(ret);
ret = btrfs_extend_item(trans, root, path, new_size);
- BUG_ON(ret);
leaf = path->nodes[0];
item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
@@ -1042,7 +1046,7 @@ again:
return 0;
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
key.type = BTRFS_EXTENT_REF_V0_KEY;
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
if (ret < 0) {
err = ret;
@@ -1080,7 +1084,7 @@ again:
if (match_extent_data_ref(leaf, ref, root_objectid,
owner, offset)) {
if (recow) {
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
goto again;
}
err = 0;
@@ -1141,7 +1145,7 @@ static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
if (match_extent_data_ref(leaf, ref, root_objectid,
owner, offset))
break;
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
key.offset++;
ret = btrfs_insert_empty_item(trans, root, path, &key,
size);
@@ -1167,7 +1171,7 @@ static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
btrfs_mark_buffer_dirty(leaf);
ret = 0;
fail:
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
return ret;
}
@@ -1293,7 +1297,7 @@ static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
ret = -ENOENT;
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
if (ret == -ENOENT && parent) {
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
key.type = BTRFS_EXTENT_REF_V0_KEY;
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
if (ret > 0)
@@ -1322,7 +1326,7 @@ static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
}
ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
return ret;
}
@@ -1555,7 +1559,6 @@ int setup_inline_extent_backref(struct btrfs_trans_handle *trans,
size = btrfs_extent_inline_ref_size(type);
ret = btrfs_extend_item(trans, root, path, size);
- BUG_ON(ret);
ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
refs = btrfs_extent_refs(leaf, ei);
@@ -1608,7 +1611,7 @@ static int lookup_extent_backref(struct btrfs_trans_handle *trans,
if (ret != -ENOENT)
return ret;
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
*ref_ret = NULL;
if (owner < BTRFS_FIRST_FREE_OBJECTID) {
@@ -1684,7 +1687,6 @@ int update_inline_extent_backref(struct btrfs_trans_handle *trans,
end - ptr - size);
item_size -= size;
ret = btrfs_truncate_item(trans, root, path, item_size, 1);
- BUG_ON(ret);
}
btrfs_mark_buffer_dirty(leaf);
return 0;
@@ -1862,7 +1864,7 @@ static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
__run_delayed_extent_op(extent_op, leaf, item);
btrfs_mark_buffer_dirty(leaf);
- btrfs_release_path(root->fs_info->extent_root, path);
+ btrfs_release_path(path);
path->reada = 1;
path->leave_spinning = 1;
@@ -2297,6 +2299,10 @@ again:
atomic_inc(&ref->refs);
spin_unlock(&delayed_refs->lock);
+ /*
+ * Mutex was contended, block until it's
+ * released and try again
+ */
mutex_lock(&head->mutex);
mutex_unlock(&head->mutex);
@@ -2361,8 +2367,12 @@ static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
atomic_inc(&head->node.refs);
spin_unlock(&delayed_refs->lock);
- btrfs_release_path(root->fs_info->extent_root, path);
+ btrfs_release_path(path);
+ /*
+ * Mutex was contended, block until it's released and let
+ * caller try again
+ */
mutex_lock(&head->mutex);
mutex_unlock(&head->mutex);
btrfs_put_delayed_ref(&head->node);
@@ -2510,126 +2520,6 @@ out:
return ret;
}
-#if 0
-int btrfs_cache_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
- struct extent_buffer *buf, u32 nr_extents)
-{
- struct btrfs_key key;
- struct btrfs_file_extent_item *fi;
- u64 root_gen;
- u32 nritems;
- int i;
- int level;
- int ret = 0;
- int shared = 0;
-
- if (!root->ref_cows)
- return 0;
-
- if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
- shared = 0;
- root_gen = root->root_key.offset;
- } else {
- shared = 1;
- root_gen = trans->transid - 1;
- }
-
- level = btrfs_header_level(buf);
- nritems = btrfs_header_nritems(buf);
-
- if (level == 0) {
- struct btrfs_leaf_ref *ref;
- struct btrfs_extent_info *info;
-
- ref = btrfs_alloc_leaf_ref(root, nr_extents);
- if (!ref) {
- ret = -ENOMEM;
- goto out;
- }
-
- ref->root_gen = root_gen;
- ref->bytenr = buf->start;
- ref->owner = btrfs_header_owner(buf);
- ref->generation = btrfs_header_generation(buf);
- ref->nritems = nr_extents;
- info = ref->extents;
-
- for (i = 0; nr_extents > 0 && i < nritems; i++) {
- u64 disk_bytenr;
- btrfs_item_key_to_cpu(buf, &key, i);
- if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
- continue;
- fi = btrfs_item_ptr(buf, i,
- struct btrfs_file_extent_item);
- if (btrfs_file_extent_type(buf, fi) ==
- BTRFS_FILE_EXTENT_INLINE)
- continue;
- disk_bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
- if (disk_bytenr == 0)
- continue;
-
- info->bytenr = disk_bytenr;
- info->num_bytes =
- btrfs_file_extent_disk_num_bytes(buf, fi);
- info->objectid = key.objectid;
- info->offset = key.offset;
- info++;
- }
-
- ret = btrfs_add_leaf_ref(root, ref, shared);
- if (ret == -EEXIST && shared) {
- struct btrfs_leaf_ref *old;
- old = btrfs_lookup_leaf_ref(root, ref->bytenr);
- BUG_ON(!old);
- btrfs_remove_leaf_ref(root, old);
- btrfs_free_leaf_ref(root, old);
- ret = btrfs_add_leaf_ref(root, ref, shared);
- }
- WARN_ON(ret);
- btrfs_free_leaf_ref(root, ref);
- }
-out:
- return ret;
-}
-
-/* when a block goes through cow, we update the reference counts of
- * everything that block points to. The internal pointers of the block
- * can be in just about any order, and it is likely to have clusters of
- * things that are close together and clusters of things that are not.
- *
- * To help reduce the seeks that come with updating all of these reference
- * counts, sort them by byte number before actual updates are done.
- *
- * struct refsort is used to match byte number to slot in the btree block.
- * we sort based on the byte number and then use the slot to actually
- * find the item.
- *
- * struct refsort is smaller than strcut btrfs_item and smaller than
- * struct btrfs_key_ptr. Since we're currently limited to the page size
- * for a btree block, there's no way for a kmalloc of refsorts for a
- * single node to be bigger than a page.
- */
-struct refsort {
- u64 bytenr;
- u32 slot;
-};
-
-/*
- * for passing into sort()
- */
-static int refsort_cmp(const void *a_void, const void *b_void)
-{
- const struct refsort *a = a_void;
- const struct refsort *b = b_void;
-
- if (a->bytenr < b->bytenr)
- return -1;
- if (a->bytenr > b->bytenr)
- return 1;
- return 0;
-}
-#endif
-
static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct extent_buffer *buf,
@@ -2732,7 +2622,7 @@ static int write_one_cache_group(struct btrfs_trans_handle *trans,
bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
btrfs_mark_buffer_dirty(leaf);
- btrfs_release_path(extent_root, path);
+ btrfs_release_path(path);
fail:
if (ret)
return ret;
@@ -2785,7 +2675,7 @@ again:
inode = lookup_free_space_inode(root, block_group, path);
if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
ret = PTR_ERR(inode);
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
goto out;
}
@@ -2854,7 +2744,7 @@ again:
out_put:
iput(inode);
out_free:
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
out:
spin_lock(&block_group->lock);
block_group->disk_cache_state = dcs;
@@ -3144,7 +3034,8 @@ int btrfs_check_data_free_space(struct inode *inode, u64 bytes)
/* make sure bytes are sectorsize aligned */
bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
- if (root == root->fs_info->tree_root) {
+ if (root == root->fs_info->tree_root ||
+ BTRFS_I(inode)->location.objectid == BTRFS_FREE_INO_OBJECTID) {
alloc_chunk = 0;
committed = 1;
}
@@ -3211,18 +3102,6 @@ commit_trans:
goto again;
}
-#if 0 /* I hope we never need this code again, just in case */
- printk(KERN_ERR "no space left, need %llu, %llu bytes_used, "
- "%llu bytes_reserved, " "%llu bytes_pinned, "
- "%llu bytes_readonly, %llu may use %llu total\n",
- (unsigned long long)bytes,
- (unsigned long long)data_sinfo->bytes_used,
- (unsigned long long)data_sinfo->bytes_reserved,
- (unsigned long long)data_sinfo->bytes_pinned,
- (unsigned long long)data_sinfo->bytes_readonly,
- (unsigned long long)data_sinfo->bytes_may_use,
- (unsigned long long)data_sinfo->total_bytes);
-#endif
return -ENOSPC;
}
data_sinfo->bytes_may_use += bytes;
@@ -3425,6 +3304,10 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans,
if (reserved == 0)
return 0;
+ /* nothing to shrink - nothing to reclaim */
+ if (root->fs_info->delalloc_bytes == 0)
+ return 0;
+
max_reclaim = min(reserved, to_reclaim);
while (loops < 1024) {
@@ -3651,8 +3534,8 @@ static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
spin_unlock(&block_rsv->lock);
}
-void block_rsv_release_bytes(struct btrfs_block_rsv *block_rsv,
- struct btrfs_block_rsv *dest, u64 num_bytes)
+static void block_rsv_release_bytes(struct btrfs_block_rsv *block_rsv,
+ struct btrfs_block_rsv *dest, u64 num_bytes)
{
struct btrfs_space_info *space_info = block_rsv->space_info;
@@ -3855,23 +3738,7 @@ static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
u64 meta_used;
u64 data_used;
int csum_size = btrfs_super_csum_size(&fs_info->super_copy);
-#if 0
- /*
- * per tree used space accounting can be inaccuracy, so we
- * can't rely on it.
- */
- spin_lock(&fs_info->extent_root->accounting_lock);
- num_bytes = btrfs_root_used(&fs_info->extent_root->root_item);
- spin_unlock(&fs_info->extent_root->accounting_lock);
-
- spin_lock(&fs_info->csum_root->accounting_lock);
- num_bytes += btrfs_root_used(&fs_info->csum_root->root_item);
- spin_unlock(&fs_info->csum_root->accounting_lock);
- spin_lock(&fs_info->tree_root->accounting_lock);
- num_bytes += btrfs_root_used(&fs_info->tree_root->root_item);
- spin_unlock(&fs_info->tree_root->accounting_lock);
-#endif
sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
spin_lock(&sinfo->lock);
data_used = sinfo->bytes_used;
@@ -3924,10 +3791,7 @@ static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
block_rsv->reserved = block_rsv->size;
block_rsv->full = 1;
}
-#if 0
- printk(KERN_INFO"global block rsv size %llu reserved %llu\n",
- block_rsv->size, block_rsv->reserved);
-#endif
+
spin_unlock(&sinfo->lock);
spin_unlock(&block_rsv->lock);
}
@@ -3973,12 +3837,6 @@ static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
}
-static u64 calc_trans_metadata_size(struct btrfs_root *root, int num_items)
-{
- return (root->leafsize + root->nodesize * (BTRFS_MAX_LEVEL - 1)) *
- 3 * num_items;
-}
-
int btrfs_trans_reserve_metadata(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
int num_items)
@@ -3989,7 +3847,7 @@ int btrfs_trans_reserve_metadata(struct btrfs_trans_handle *trans,
if (num_items == 0 || root->fs_info->chunk_root == root)
return 0;
- num_bytes = calc_trans_metadata_size(root, num_items);
+ num_bytes = btrfs_calc_trans_metadata_size(root, num_items);
ret = btrfs_block_rsv_add(trans, root, &root->fs_info->trans_block_rsv,
num_bytes);
if (!ret) {
@@ -4028,14 +3886,14 @@ int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
* If all of the metadata space is used, we can commit
* transaction and use space it freed.
*/
- u64 num_bytes = calc_trans_metadata_size(root, 4);
+ u64 num_bytes = btrfs_calc_trans_metadata_size(root, 4);
return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
}
void btrfs_orphan_release_metadata(struct inode *inode)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
- u64 num_bytes = calc_trans_metadata_size(root, 4);
+ u64 num_bytes = btrfs_calc_trans_metadata_size(root, 4);
btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
}
@@ -4049,7 +3907,7 @@ int btrfs_snap_reserve_metadata(struct btrfs_trans_handle *trans,
* two for root back/forward refs, two for directory entries
* and one for root of the snapshot.
*/
- u64 num_bytes = calc_trans_metadata_size(root, 5);
+ u64 num_bytes = btrfs_calc_trans_metadata_size(root, 5);
dst_rsv->space_info = src_rsv->space_info;
return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
}
@@ -4078,7 +3936,7 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
if (nr_extents > reserved_extents) {
nr_extents -= reserved_extents;
- to_reserve = calc_trans_metadata_size(root, nr_extents);
+ to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
} else {
nr_extents = 0;
to_reserve = 0;
@@ -4132,7 +3990,7 @@ void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
to_free = calc_csum_metadata_size(inode, num_bytes);
if (nr_extents > 0)
- to_free += calc_trans_metadata_size(root, nr_extents);
+ to_free += btrfs_calc_trans_metadata_size(root, nr_extents);
btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
to_free);
@@ -4541,7 +4399,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
NULL, refs_to_drop,
is_data);
BUG_ON(ret);
- btrfs_release_path(extent_root, path);
+ btrfs_release_path(path);
path->leave_spinning = 1;
key.objectid = bytenr;
@@ -4580,7 +4438,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
owner_objectid, 0);
BUG_ON(ret < 0);
- btrfs_release_path(extent_root, path);
+ btrfs_release_path(path);
path->leave_spinning = 1;
key.objectid = bytenr;
@@ -4650,7 +4508,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
num_to_del);
BUG_ON(ret);
- btrfs_release_path(extent_root, path);
+ btrfs_release_path(path);
if (is_data) {
ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
@@ -4893,7 +4751,7 @@ wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
return 0;
wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
- (cache->free_space >= num_bytes));
+ (cache->free_space_ctl->free_space >= num_bytes));
put_caching_control(caching_ctl);
return 0;
@@ -6480,7 +6338,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
trans->block_rsv = block_rsv;
}
}
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
BUG_ON(err);
ret = btrfs_del_root(trans, tree_root, &root->root_key);
@@ -6584,1514 +6442,6 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
return ret;
}
-#if 0
-static unsigned long calc_ra(unsigned long start, unsigned long last,
- unsigned long nr)
-{
- return min(last, start + nr - 1);
-}
-
-static noinline int relocate_inode_pages(struct inode *inode, u64 start,
- u64 len)
-{
- u64 page_start;
- u64 page_end;
- unsigned long first_index;
- unsigned long last_index;
- unsigned long i;
- struct page *page;
- struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
- struct file_ra_state *ra;
- struct btrfs_ordered_extent *ordered;
- unsigned int total_read = 0;
- unsigned int total_dirty = 0;
- int ret = 0;
-
- ra = kzalloc(sizeof(*ra), GFP_NOFS);
- if (!ra)
- return -ENOMEM;
-
- mutex_lock(&inode->i_mutex);
- first_index = start >> PAGE_CACHE_SHIFT;
- last_index = (start + len - 1) >> PAGE_CACHE_SHIFT;
-
- /* make sure the dirty trick played by the caller work */
- ret = invalidate_inode_pages2_range(inode->i_mapping,
- first_index, last_index);
- if (ret)
- goto out_unlock;
-
- file_ra_state_init(ra, inode->i_mapping);
-
- for (i = first_index ; i <= last_index; i++) {
- if (total_read % ra->ra_pages == 0) {
- btrfs_force_ra(inode->i_mapping, ra, NULL, i,
- calc_ra(i, last_index, ra->ra_pages));
- }
- total_read++;
-again:
- if (((u64)i << PAGE_CACHE_SHIFT) > i_size_read(inode))
- BUG_ON(1);
- page = grab_cache_page(inode->i_mapping, i);
- if (!page) {
- ret = -ENOMEM;
- goto out_unlock;
- }
- if (!PageUptodate(page)) {
- btrfs_readpage(NULL, page);
- lock_page(page);
- if (!PageUptodate(page)) {
- unlock_page(page);
- page_cache_release(page);
- ret = -EIO;
- goto out_unlock;
- }
- }
- wait_on_page_writeback(page);
-
- page_start = (u64)page->index << PAGE_CACHE_SHIFT;
- page_end = page_start + PAGE_CACHE_SIZE - 1;
- lock_extent(io_tree, page_start, page_end, GFP_NOFS);
-
- ordered = btrfs_lookup_ordered_extent(inode, page_start);
- if (ordered) {
- unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
- unlock_page(page);
- page_cache_release(page);
- btrfs_start_ordered_extent(inode, ordered, 1);
- btrfs_put_ordered_extent(ordered);
- goto again;
- }
- set_page_extent_mapped(page);
-
- if (i == first_index)
- set_extent_bits(io_tree, page_start, page_end,
- EXTENT_BOUNDARY, GFP_NOFS);
- btrfs_set_extent_delalloc(inode, page_start, page_end);
-
- set_page_dirty(page);
- total_dirty++;
-
- unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
- unlock_page(page);
- page_cache_release(page);
- }
-
-out_unlock:
- kfree(ra);
- mutex_unlock(&inode->i_mutex);
- balance_dirty_pages_ratelimited_nr(inode->i_mapping, total_dirty);
- return ret;
-}
-
-static noinline int relocate_data_extent(struct inode *reloc_inode,
- struct btrfs_key *extent_key,
- u64 offset)
-{
- struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
- struct extent_map_tree *em_tree = &BTRFS_I(reloc_inode)->extent_tree;
- struct extent_map *em;
- u64 start = extent_key->objectid - offset;
- u64 end = start + extent_key->offset - 1;
-
- em = alloc_extent_map(GFP_NOFS);
- BUG_ON(!em);
-
- em->start = start;
- em->len = extent_key->offset;
- em->block_len = extent_key->offset;
- em->block_start = extent_key->objectid;
- em->bdev = root->fs_info->fs_devices->latest_bdev;
- set_bit(EXTENT_FLAG_PINNED, &em->flags);
-
- /* setup extent map to cheat btrfs_readpage */
- lock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS);
- while (1) {
- int ret;
- write_lock(&em_tree->lock);
- ret = add_extent_mapping(em_tree, em);
- write_unlock(&em_tree->lock);
- if (ret != -EEXIST) {
- free_extent_map(em);
- break;
- }
- btrfs_drop_extent_cache(reloc_inode, start, end, 0);
- }
- unlock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS);
-
- return relocate_inode_pages(reloc_inode, start, extent_key->offset);
-}
-
-struct btrfs_ref_path {
- u64 extent_start;
- u64 nodes[BTRFS_MAX_LEVEL];
- u64 root_objectid;
- u64 root_generation;
- u64 owner_objectid;
- u32 num_refs;
- int lowest_level;
- int current_level;
- int shared_level;
-
- struct btrfs_key node_keys[BTRFS_MAX_LEVEL];
- u64 new_nodes[BTRFS_MAX_LEVEL];
-};
-
-struct disk_extent {
- u64 ram_bytes;
- u64 disk_bytenr;
- u64 disk_num_bytes;
- u64 offset;
- u64 num_bytes;
- u8 compression;
- u8 encryption;
- u16 other_encoding;
-};
-
-static int is_cowonly_root(u64 root_objectid)
-{
- if (root_objectid == BTRFS_ROOT_TREE_OBJECTID ||
- root_objectid == BTRFS_EXTENT_TREE_OBJECTID ||
- root_objectid == BTRFS_CHUNK_TREE_OBJECTID ||
- root_objectid == BTRFS_DEV_TREE_OBJECTID ||
- root_objectid == BTRFS_TREE_LOG_OBJECTID ||
- root_objectid == BTRFS_CSUM_TREE_OBJECTID)
- return 1;
- return 0;
-}
-
-static noinline int __next_ref_path(struct btrfs_trans_handle *trans,
- struct btrfs_root *extent_root,
- struct btrfs_ref_path *ref_path,
- int first_time)
-{
- struct extent_buffer *leaf;
- struct btrfs_path *path;
- struct btrfs_extent_ref *ref;
- struct btrfs_key key;
- struct btrfs_key found_key;
- u64 bytenr;
- u32 nritems;
- int level;
- int ret = 1;
-
- path = btrfs_alloc_path();
- if (!path)
- return -ENOMEM;
-
- if (first_time) {
- ref_path->lowest_level = -1;
- ref_path->current_level = -1;
- ref_path->shared_level = -1;
- goto walk_up;
- }
-walk_down:
- level = ref_path->current_level - 1;
- while (level >= -1) {
- u64 parent;
- if (level < ref_path->lowest_level)
- break;
-
- if (level >= 0)
- bytenr = ref_path->nodes[level];
- else
- bytenr = ref_path->extent_start;
- BUG_ON(bytenr == 0);
-
- parent = ref_path->nodes[level + 1];
- ref_path->nodes[level + 1] = 0;
- ref_path->current_level = level;
- BUG_ON(parent == 0);
-
- key.objectid = bytenr;
- key.offset = parent + 1;
- key.type = BTRFS_EXTENT_REF_KEY;
-
- ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 0);
- if (ret < 0)
- goto out;
- BUG_ON(ret == 0);
-
- leaf = path->nodes[0];
- nritems = btrfs_header_nritems(leaf);
- if (path->slots[0] >= nritems) {
- ret = btrfs_next_leaf(extent_root, path);
- if (ret < 0)
- goto out;
- if (ret > 0)
- goto next;
- leaf = path->nodes[0];
- }
-
- btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
- if (found_key.objectid == bytenr &&
- found_key.type == BTRFS_EXTENT_REF_KEY) {
- if (level < ref_path->shared_level)
- ref_path->shared_level = level;
- goto found;
- }
-next:
- level--;
- btrfs_release_path(extent_root, path);
- cond_resched();
- }
- /* reached lowest level */
- ret = 1;
- goto out;
-walk_up:
- level = ref_path->current_level;
- while (level < BTRFS_MAX_LEVEL - 1) {
- u64 ref_objectid;
-
- if (level >= 0)
- bytenr = ref_path->nodes[level];
- else
- bytenr = ref_path->extent_start;
-
- BUG_ON(bytenr == 0);
-
- key.objectid = bytenr;
- key.offset = 0;
- key.type = BTRFS_EXTENT_REF_KEY;
-
- ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 0);
- if (ret < 0)
- goto out;
-
- leaf = path->nodes[0];
- nritems = btrfs_header_nritems(leaf);
- if (path->slots[0] >= nritems) {
- ret = btrfs_next_leaf(extent_root, path);
- if (ret < 0)
- goto out;
- if (ret > 0) {
- /* the extent was freed by someone */
- if (ref_path->lowest_level == level)
- goto out;
- btrfs_release_path(extent_root, path);
- goto walk_down;
- }
- leaf = path->nodes[0];
- }
-
- btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
- if (found_key.objectid != bytenr ||
- found_key.type != BTRFS_EXTENT_REF_KEY) {
- /* the extent was freed by someone */
- if (ref_path->lowest_level == level) {
- ret = 1;
- goto out;
- }
- btrfs_release_path(extent_root, path);
- goto walk_down;
- }
-found:
- ref = btrfs_item_ptr(leaf, path->slots[0],
- struct btrfs_extent_ref);
- ref_objectid = btrfs_ref_objectid(leaf, ref);
- if (ref_objectid < BTRFS_FIRST_FREE_OBJECTID) {
- if (first_time) {
- level = (int)ref_objectid;
- BUG_ON(level >= BTRFS_MAX_LEVEL);
- ref_path->lowest_level = level;
- ref_path->current_level = level;
- ref_path->nodes[level] = bytenr;
- } else {
- WARN_ON(ref_objectid != level);
- }
- } else {
- WARN_ON(level != -1);
- }
- first_time = 0;
-
- if (ref_path->lowest_level == level) {
- ref_path->owner_objectid = ref_objectid;
- ref_path->num_refs = btrfs_ref_num_refs(leaf, ref);
- }
-
- /*
- * the block is tree root or the block isn't in reference
- * counted tree.
- */
- if (found_key.objectid == found_key.offset ||
- is_cowonly_root(btrfs_ref_root(leaf, ref))) {
- ref_path->root_objectid = btrfs_ref_root(leaf, ref);
- ref_path->root_generation =
- btrfs_ref_generation(leaf, ref);
- if (level < 0) {
- /* special reference from the tree log */
- ref_path->nodes[0] = found_key.offset;
- ref_path->current_level = 0;
- }
- ret = 0;
- goto out;
- }
-
- level++;
- BUG_ON(ref_path->nodes[level] != 0);
- ref_path->nodes[level] = found_key.offset;
- ref_path->current_level = level;
-
- /*
- * the reference was created in the running transaction,
- * no need to continue walking up.
- */
- if (btrfs_ref_generation(leaf, ref) == trans->transid) {
- ref_path->root_objectid = btrfs_ref_root(leaf, ref);
- ref_path->root_generation =
- btrfs_ref_generation(leaf, ref);
- ret = 0;
- goto out;
- }
-
- btrfs_release_path(extent_root, path);
- cond_resched();
- }
- /* reached max tree level, but no tree root found. */
- BUG();
-out:
- btrfs_free_path(path);
- return ret;
-}
-
-static int btrfs_first_ref_path(struct btrfs_trans_handle *trans,
- struct btrfs_root *extent_root,
- struct btrfs_ref_path *ref_path,
- u64 extent_start)
-{
- memset(ref_path, 0, sizeof(*ref_path));
- ref_path->extent_start = extent_start;
-
- return __next_ref_path(trans, extent_root, ref_path, 1);
-}
-
-static int btrfs_next_ref_path(struct btrfs_trans_handle *trans,
- struct btrfs_root *extent_root,
- struct btrfs_ref_path *ref_path)
-{
- return __next_ref_path(trans, extent_root, ref_path, 0);
-}
-
-static noinline int get_new_locations(struct inode *reloc_inode,
- struct btrfs_key *extent_key,
- u64 offset, int no_fragment,
- struct disk_extent **extents,
- int *nr_extents)
-{
- struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
- struct btrfs_path *path;
- struct btrfs_file_extent_item *fi;
- struct extent_buffer *leaf;
- struct disk_extent *exts = *extents;
- struct btrfs_key found_key;
- u64 cur_pos;
- u64 last_byte;
- u32 nritems;
- int nr = 0;
- int max = *nr_extents;
- int ret;
-
- WARN_ON(!no_fragment && *extents);
- if (!exts) {
- max = 1;
- exts = kmalloc(sizeof(*exts) * max, GFP_NOFS);
- if (!exts)
- return -ENOMEM;
- }
-
- path = btrfs_alloc_path();
- if (!path) {
- if (exts != *extents)
- kfree(exts);
- return -ENOMEM;
- }
-
- cur_pos = extent_key->objectid - offset;
- last_byte = extent_key->objectid + extent_key->offset;
- ret = btrfs_lookup_file_extent(NULL, root, path, reloc_inode->i_ino,
- cur_pos, 0);
- if (ret < 0)
- goto out;
- if (ret > 0) {
- ret = -ENOENT;
- goto out;
- }
-
- while (1) {
- leaf = path->nodes[0];
- nritems = btrfs_header_nritems(leaf);
- if (path->slots[0] >= nritems) {
- ret = btrfs_next_leaf(root, path);
- if (ret < 0)
- goto out;
- if (ret > 0)
- break;
- leaf = path->nodes[0];
- }
-
- btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
- if (found_key.offset != cur_pos ||
- found_key.type != BTRFS_EXTENT_DATA_KEY ||
- found_key.objectid != reloc_inode->i_ino)
- break;
-
- fi = btrfs_item_ptr(leaf, path->slots[0],
- struct btrfs_file_extent_item);
- if (btrfs_file_extent_type(leaf, fi) !=
- BTRFS_FILE_EXTENT_REG ||
- btrfs_file_extent_disk_bytenr(leaf, fi) == 0)
- break;
-
- if (nr == max) {
- struct disk_extent *old = exts;
- max *= 2;
- exts = kzalloc(sizeof(*exts) * max, GFP_NOFS);
- if (!exts) {
- ret = -ENOMEM;
- goto out;
- }
- memcpy(exts, old, sizeof(*exts) * nr);
- if (old != *extents)
- kfree(old);
- }
-
- exts[nr].disk_bytenr =
- btrfs_file_extent_disk_bytenr(leaf, fi);
- exts[nr].disk_num_bytes =
- btrfs_file_extent_disk_num_bytes(leaf, fi);
- exts[nr].offset = btrfs_file_extent_offset(leaf, fi);
- exts[nr].num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
- exts[nr].ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
- exts[nr].compression = btrfs_file_extent_compression(leaf, fi);
- exts[nr].encryption = btrfs_file_extent_encryption(leaf, fi);
- exts[nr].other_encoding = btrfs_file_extent_other_encoding(leaf,
- fi);
- BUG_ON(exts[nr].offset > 0);
- BUG_ON(exts[nr].compression || exts[nr].encryption);
- BUG_ON(exts[nr].num_bytes != exts[nr].disk_num_bytes);
-
- cur_pos += exts[nr].num_bytes;
- nr++;
-
- if (cur_pos + offset >= last_byte)
- break;
-
- if (no_fragment) {
- ret = 1;
- goto out;
- }
- path->slots[0]++;
- }
-
- BUG_ON(cur_pos + offset > last_byte);
- if (cur_pos + offset < last_byte) {
- ret = -ENOENT;
- goto out;
- }
- ret = 0;
-out:
- btrfs_free_path(path);
- if (ret) {
- if (exts != *extents)
- kfree(exts);
- } else {
- *extents = exts;
- *nr_extents = nr;
- }
- return ret;
-}
-
-static noinline int replace_one_extent(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_path *path,
- struct btrfs_key *extent_key,
- struct btrfs_key *leaf_key,
- struct btrfs_ref_path *ref_path,
- struct disk_extent *new_extents,
- int nr_extents)
-{
- struct extent_buffer *leaf;
- struct btrfs_file_extent_item *fi;
- struct inode *inode = NULL;
- struct btrfs_key key;
- u64 lock_start = 0;
- u64 lock_end = 0;
- u64 num_bytes;
- u64 ext_offset;
- u64 search_end = (u64)-1;
- u32 nritems;
- int nr_scaned = 0;
- int extent_locked = 0;
- int extent_type;
- int ret;
-
- memcpy(&key, leaf_key, sizeof(key));
- if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS) {
- if (key.objectid < ref_path->owner_objectid ||
- (key.objectid == ref_path->owner_objectid &&
- key.type < BTRFS_EXTENT_DATA_KEY)) {
- key.objectid = ref_path->owner_objectid;
- key.type = BTRFS_EXTENT_DATA_KEY;
- key.offset = 0;
- }
- }
-
- while (1) {
- ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
- if (ret < 0)
- goto out;
-
- leaf = path->nodes[0];
- nritems = btrfs_header_nritems(leaf);
-next:
- if (extent_locked && ret > 0) {
- /*
- * the file extent item was modified by someone
- * before the extent got locked.
- */
- unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
- lock_end, GFP_NOFS);
- extent_locked = 0;
- }
-
- if (path->slots[0] >= nritems) {
- if (++nr_scaned > 2)
- break;
-
- BUG_ON(extent_locked);
- ret = btrfs_next_leaf(root, path);
- if (ret < 0)
- goto out;
- if (ret > 0)
- break;
- leaf = path->nodes[0];
- nritems = btrfs_header_nritems(leaf);
- }
-
- btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
-
- if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS) {
- if ((key.objectid > ref_path->owner_objectid) ||
- (key.objectid == ref_path->owner_objectid &&
- key.type > BTRFS_EXTENT_DATA_KEY) ||
- key.offset >= search_end)
- break;
- }
-
- if (inode && key.objectid != inode->i_ino) {
- BUG_ON(extent_locked);
- btrfs_release_path(root, path);
- mutex_unlock(&inode->i_mutex);
- iput(inode);
- inode = NULL;
- continue;
- }
-
- if (key.type != BTRFS_EXTENT_DATA_KEY) {
- path->slots[0]++;
- ret = 1;
- goto next;
- }
- fi = btrfs_item_ptr(leaf, path->slots[0],
- struct btrfs_file_extent_item);
- extent_type = btrfs_file_extent_type(leaf, fi);
- if ((extent_type != BTRFS_FILE_EXTENT_REG &&
- extent_type != BTRFS_FILE_EXTENT_PREALLOC) ||
- (btrfs_file_extent_disk_bytenr(leaf, fi) !=
- extent_key->objectid)) {
- path->slots[0]++;
- ret = 1;
- goto next;
- }
-
- num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
- ext_offset = btrfs_file_extent_offset(leaf, fi);
-
- if (search_end == (u64)-1) {
- search_end = key.offset - ext_offset +
- btrfs_file_extent_ram_bytes(leaf, fi);
- }
-
- if (!extent_locked) {
- lock_start = key.offset;
- lock_end = lock_start + num_bytes - 1;
- } else {
- if (lock_start > key.offset ||
- lock_end + 1 < key.offset + num_bytes) {
- unlock_extent(&BTRFS_I(inode)->io_tree,
- lock_start, lock_end, GFP_NOFS);
- extent_locked = 0;
- }
- }
-
- if (!inode) {
- btrfs_release_path(root, path);
-
- inode = btrfs_iget_locked(root->fs_info->sb,
- key.objectid, root);
- if (inode->i_state & I_NEW) {
- BTRFS_I(inode)->root = root;
- BTRFS_I(inode)->location.objectid =
- key.objectid;
- BTRFS_I(inode)->location.type =
- BTRFS_INODE_ITEM_KEY;
- BTRFS_I(inode)->location.offset = 0;
- btrfs_read_locked_inode(inode);
- unlock_new_inode(inode);
- }
- /*
- * some code call btrfs_commit_transaction while
- * holding the i_mutex, so we can't use mutex_lock
- * here.
- */
- if (is_bad_inode(inode) ||
- !mutex_trylock(&inode->i_mutex)) {
- iput(inode);
- inode = NULL;
- key.offset = (u64)-1;
- goto skip;
- }
- }
-
- if (!extent_locked) {
- struct btrfs_ordered_extent *ordered;
-
- btrfs_release_path(root, path);
-
- lock_extent(&BTRFS_I(inode)->io_tree, lock_start,
- lock_end, GFP_NOFS);
- ordered = btrfs_lookup_first_ordered_extent(inode,
- lock_end);
- if (ordered &&
- ordered->file_offset <= lock_end &&
- ordered->file_offset + ordered->len > lock_start) {
- unlock_extent(&BTRFS_I(inode)->io_tree,
- lock_start, lock_end, GFP_NOFS);
- btrfs_start_ordered_extent(inode, ordered, 1);
- btrfs_put_ordered_extent(ordered);
- key.offset += num_bytes;
- goto skip;
- }
- if (ordered)
- btrfs_put_ordered_extent(ordered);
-
- extent_locked = 1;
- continue;
- }
-
- if (nr_extents == 1) {
- /* update extent pointer in place */
- btrfs_set_file_extent_disk_bytenr(leaf, fi,
- new_extents[0].disk_bytenr);
- btrfs_set_file_extent_disk_num_bytes(leaf, fi,
- new_extents[0].disk_num_bytes);
- btrfs_mark_buffer_dirty(leaf);
-
- btrfs_drop_extent_cache(inode, key.offset,
- key.offset + num_bytes - 1, 0);
-
- ret = btrfs_inc_extent_ref(trans, root,
- new_extents[0].disk_bytenr,
- new_extents[0].disk_num_bytes,
- leaf->start,
- root->root_key.objectid,
- trans->transid,
- key.objectid);
- BUG_ON(ret);
-
- ret = btrfs_free_extent(trans, root,
- extent_key->objectid,
- extent_key->offset,
- leaf->start,
- btrfs_header_owner(leaf),
- btrfs_header_generation(leaf),
- key.objectid, 0);
- BUG_ON(ret);
-
- btrfs_release_path(root, path);
- key.offset += num_bytes;
- } else {
- BUG_ON(1);
-#if 0
- u64 alloc_hint;
- u64 extent_len;
- int i;
- /*
- * drop old extent pointer at first, then insert the
- * new pointers one bye one
- */
- btrfs_release_path(root, path);
- ret = btrfs_drop_extents(trans, root, inode, key.offset,
- key.offset + num_bytes,
- key.offset, &alloc_hint);
- BUG_ON(ret);
-
- for (i = 0; i < nr_extents; i++) {
- if (ext_offset >= new_extents[i].num_bytes) {
- ext_offset -= new_extents[i].num_bytes;
- continue;
- }
- extent_len = min(new_extents[i].num_bytes -
- ext_offset, num_bytes);
-
- ret = btrfs_insert_empty_item(trans, root,
- path, &key,
- sizeof(*fi));
- BUG_ON(ret);
-
- leaf = path->nodes[0];
- fi = btrfs_item_ptr(leaf, path->slots[0],
- struct btrfs_file_extent_item);
- btrfs_set_file_extent_generation(leaf, fi,
- trans->transid);
- btrfs_set_file_extent_type(leaf, fi,
- BTRFS_FILE_EXTENT_REG);
- btrfs_set_file_extent_disk_bytenr(leaf, fi,
- new_extents[i].disk_bytenr);
- btrfs_set_file_extent_disk_num_bytes(leaf, fi,
- new_extents[i].disk_num_bytes);
- btrfs_set_file_extent_ram_bytes(leaf, fi,
- new_extents[i].ram_bytes);
-
- btrfs_set_file_extent_compression(leaf, fi,
- new_extents[i].compression);
- btrfs_set_file_extent_encryption(leaf, fi,
- new_extents[i].encryption);
- btrfs_set_file_extent_other_encoding(leaf, fi,
- new_extents[i].other_encoding);
-
- btrfs_set_file_extent_num_bytes(leaf, fi,
- extent_len);
- ext_offset += new_extents[i].offset;
- btrfs_set_file_extent_offset(leaf, fi,
- ext_offset);
- btrfs_mark_buffer_dirty(leaf);
-
- btrfs_drop_extent_cache(inode, key.offset,
- key.offset + extent_len - 1, 0);
-
- ret = btrfs_inc_extent_ref(trans, root,
- new_extents[i].disk_bytenr,
- new_extents[i].disk_num_bytes,
- leaf->start,
- root->root_key.objectid,
- trans->transid, key.objectid);
- BUG_ON(ret);
- btrfs_release_path(root, path);
-
- inode_add_bytes(inode, extent_len);
-
- ext_offset = 0;
- num_bytes -= extent_len;
- key.offset += extent_len;
-
- if (num_bytes == 0)
- break;
- }
- BUG_ON(i >= nr_extents);
-#endif
- }
-
- if (extent_locked) {
- unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
- lock_end, GFP_NOFS);
- extent_locked = 0;
- }
-skip:
- if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS &&
- key.offset >= search_end)
- break;
-
- cond_resched();
- }
- ret = 0;
-out:
- btrfs_release_path(root, path);
- if (inode) {
- mutex_unlock(&inode->i_mutex);
- if (extent_locked) {
- unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
- lock_end, GFP_NOFS);
- }
- iput(inode);
- }
- return ret;
-}
-
-int btrfs_reloc_tree_cache_ref(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct extent_buffer *buf, u64 orig_start)
-{
- int level;
- int ret;
-
- BUG_ON(btrfs_header_generation(buf) != trans->transid);
- BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
-
- level = btrfs_header_level(buf);
- if (level == 0) {
- struct btrfs_leaf_ref *ref;
- struct btrfs_leaf_ref *orig_ref;
-
- orig_ref = btrfs_lookup_leaf_ref(root, orig_start);
- if (!orig_ref)
- return -ENOENT;
-
- ref = btrfs_alloc_leaf_ref(root, orig_ref->nritems);
- if (!ref) {
- btrfs_free_leaf_ref(root, orig_ref);
- return -ENOMEM;
- }
-
- ref->nritems = orig_ref->nritems;
- memcpy(ref->extents, orig_ref->extents,
- sizeof(ref->extents[0]) * ref->nritems);
-
- btrfs_free_leaf_ref(root, orig_ref);
-
- ref->root_gen = trans->transid;
- ref->bytenr = buf->start;
- ref->owner = btrfs_header_owner(buf);
- ref->generation = btrfs_header_generation(buf);
-
- ret = btrfs_add_leaf_ref(root, ref, 0);
- WARN_ON(ret);
- btrfs_free_leaf_ref(root, ref);
- }
- return 0;
-}
-
-static noinline int invalidate_extent_cache(struct btrfs_root *root,
- struct extent_buffer *leaf,
- struct btrfs_block_group_cache *group,
- struct btrfs_root *target_root)
-{
- struct btrfs_key key;
- struct inode *inode = NULL;
- struct btrfs_file_extent_item *fi;
- struct extent_state *cached_state = NULL;
- u64 num_bytes;
- u64 skip_objectid = 0;
- u32 nritems;
- u32 i;
-
- nritems = btrfs_header_nritems(leaf);
- for (i = 0; i < nritems; i++) {
- btrfs_item_key_to_cpu(leaf, &key, i);
- if (key.objectid == skip_objectid ||
- key.type != BTRFS_EXTENT_DATA_KEY)
- continue;
- fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
- if (btrfs_file_extent_type(leaf, fi) ==
- BTRFS_FILE_EXTENT_INLINE)
- continue;
- if (btrfs_file_extent_disk_bytenr(leaf, fi) == 0)
- continue;
- if (!inode || inode->i_ino != key.objectid) {
- iput(inode);
- inode = btrfs_ilookup(target_root->fs_info->sb,
- key.objectid, target_root, 1);
- }
- if (!inode) {
- skip_objectid = key.objectid;
- continue;
- }
- num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
-
- lock_extent_bits(&BTRFS_I(inode)->io_tree, key.offset,
- key.offset + num_bytes - 1, 0, &cached_state,
- GFP_NOFS);
- btrfs_drop_extent_cache(inode, key.offset,
- key.offset + num_bytes - 1, 1);
- unlock_extent_cached(&BTRFS_I(inode)->io_tree, key.offset,
- key.offset + num_bytes - 1, &cached_state,
- GFP_NOFS);
- cond_resched();
- }
- iput(inode);
- return 0;
-}
-
-static noinline int replace_extents_in_leaf(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct extent_buffer *leaf,
- struct btrfs_block_group_cache *group,
- struct inode *reloc_inode)
-{
- struct btrfs_key key;
- struct btrfs_key extent_key;
- struct btrfs_file_extent_item *fi;
- struct btrfs_leaf_ref *ref;
- struct disk_extent *new_extent;
- u64 bytenr;
- u64 num_bytes;
- u32 nritems;
- u32 i;
- int ext_index;
- int nr_extent;
- int ret;
-
- new_extent = kmalloc(sizeof(*new_extent), GFP_NOFS);
- if (!new_extent)
- return -ENOMEM;
-
- ref = btrfs_lookup_leaf_ref(root, leaf->start);
- BUG_ON(!ref);
-
- ext_index = -1;
- nritems = btrfs_header_nritems(leaf);
- for (i = 0; i < nritems; i++) {
- btrfs_item_key_to_cpu(leaf, &key, i);
- if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
- continue;
- fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
- if (btrfs_file_extent_type(leaf, fi) ==
- BTRFS_FILE_EXTENT_INLINE)
- continue;
- bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
- num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
- if (bytenr == 0)
- continue;
-
- ext_index++;
- if (bytenr >= group->key.objectid + group->key.offset ||
- bytenr + num_bytes <= group->key.objectid)
- continue;
-
- extent_key.objectid = bytenr;
- extent_key.offset = num_bytes;
- extent_key.type = BTRFS_EXTENT_ITEM_KEY;
- nr_extent = 1;
- ret = get_new_locations(reloc_inode, &extent_key,
- group->key.objectid, 1,
- &new_extent, &nr_extent);
- if (ret > 0)
- continue;
- BUG_ON(ret < 0);
-
- BUG_ON(ref->extents[ext_index].bytenr != bytenr);
- BUG_ON(ref->extents[ext_index].num_bytes != num_bytes);
- ref->extents[ext_index].bytenr = new_extent->disk_bytenr;
- ref->extents[ext_index].num_bytes = new_extent->disk_num_bytes;
-
- btrfs_set_file_extent_disk_bytenr(leaf, fi,
- new_extent->disk_bytenr);
- btrfs_set_file_extent_disk_num_bytes(leaf, fi,
- new_extent->disk_num_bytes);
- btrfs_mark_buffer_dirty(leaf);
-
- ret = btrfs_inc_extent_ref(trans, root,
- new_extent->disk_bytenr,
- new_extent->disk_num_bytes,
- leaf->start,
- root->root_key.objectid,
- trans->transid, key.objectid);
- BUG_ON(ret);
-
- ret = btrfs_free_extent(trans, root,
- bytenr, num_bytes, leaf->start,
- btrfs_header_owner(leaf),
- btrfs_header_generation(leaf),
- key.objectid, 0);
- BUG_ON(ret);
- cond_resched();
- }
- kfree(new_extent);
- BUG_ON(ext_index + 1 != ref->nritems);
- btrfs_free_leaf_ref(root, ref);
- return 0;
-}
-
-int btrfs_free_reloc_root(struct btrfs_trans_handle *trans,
- struct btrfs_root *root)
-{
- struct btrfs_root *reloc_root;
- int ret;
-
- if (root->reloc_root) {
- reloc_root = root->reloc_root;
- root->reloc_root = NULL;
- list_add(&reloc_root->dead_list,
- &root->fs_info->dead_reloc_roots);
-
- btrfs_set_root_bytenr(&reloc_root->root_item,
- reloc_root->node->start);
- btrfs_set_root_level(&root->root_item,
- btrfs_header_level(reloc_root->node));
- memset(&reloc_root->root_item.drop_progress, 0,
- sizeof(struct btrfs_disk_key));
- reloc_root->root_item.drop_level = 0;
-
- ret = btrfs_update_root(trans, root->fs_info->tree_root,
- &reloc_root->root_key,
- &reloc_root->root_item);
- BUG_ON(ret);
- }
- return 0;
-}
-
-int btrfs_drop_dead_reloc_roots(struct btrfs_root *root)
-{
- struct btrfs_trans_handle *trans;
- struct btrfs_root *reloc_root;
- struct btrfs_root *prev_root = NULL;
- struct list_head dead_roots;
- int ret;
- unsigned long nr;
-
- INIT_LIST_HEAD(&dead_roots);
- list_splice_init(&root->fs_info->dead_reloc_roots, &dead_roots);
-
- while (!list_empty(&dead_roots)) {
- reloc_root = list_entry(dead_roots.prev,
- struct btrfs_root, dead_list);
- list_del_init(&reloc_root->dead_list);
-
- BUG_ON(reloc_root->commit_root != NULL);
- while (1) {
- trans = btrfs_join_transaction(root, 1);
- BUG_ON(IS_ERR(trans));
-
- mutex_lock(&root->fs_info->drop_mutex);
- ret = btrfs_drop_snapshot(trans, reloc_root);
- if (ret != -EAGAIN)
- break;
- mutex_unlock(&root->fs_info->drop_mutex);
-
- nr = trans->blocks_used;
- ret = btrfs_end_transaction(trans, root);
- BUG_ON(ret);
- btrfs_btree_balance_dirty(root, nr);
- }
-
- free_extent_buffer(reloc_root->node);
-
- ret = btrfs_del_root(trans, root->fs_info->tree_root,
- &reloc_root->root_key);
- BUG_ON(ret);
- mutex_unlock(&root->fs_info->drop_mutex);
-
- nr = trans->blocks_used;
- ret = btrfs_end_transaction(trans, root);
- BUG_ON(ret);
- btrfs_btree_balance_dirty(root, nr);
-
- kfree(prev_root);
- prev_root = reloc_root;
- }
- if (prev_root) {
- btrfs_remove_leaf_refs(prev_root, (u64)-1, 0);
- kfree(prev_root);
- }
- return 0;
-}
-
-int btrfs_add_dead_reloc_root(struct btrfs_root *root)
-{
- list_add(&root->dead_list, &root->fs_info->dead_reloc_roots);
- return 0;
-}
-
-int btrfs_cleanup_reloc_trees(struct btrfs_root *root)
-{
- struct btrfs_root *reloc_root;
- struct btrfs_trans_handle *trans;
- struct btrfs_key location;
- int found;
- int ret;
-
- mutex_lock(&root->fs_info->tree_reloc_mutex);
- ret = btrfs_find_dead_roots(root, BTRFS_TREE_RELOC_OBJECTID, NULL);
- BUG_ON(ret);
- found = !list_empty(&root->fs_info->dead_reloc_roots);
- mutex_unlock(&root->fs_info->tree_reloc_mutex);
-
- if (found) {
- trans = btrfs_start_transaction(root, 1);
- BUG_ON(IS_ERR(trans));
- ret = btrfs_commit_transaction(trans, root);
- BUG_ON(ret);
- }
-
- location.objectid = BTRFS_DATA_RELOC_TREE_OBJECTID;
- location.offset = (u64)-1;
- location.type = BTRFS_ROOT_ITEM_KEY;
-
- reloc_root = btrfs_read_fs_root_no_name(root->fs_info, &location);
- BUG_ON(!reloc_root);
- ret = btrfs_orphan_cleanup(reloc_root);
- BUG_ON(ret);
- return 0;
-}
-
-static noinline int init_reloc_tree(struct btrfs_trans_handle *trans,
- struct btrfs_root *root)
-{
- struct btrfs_root *reloc_root;
- struct extent_buffer *eb;
- struct btrfs_root_item *root_item;
- struct btrfs_key root_key;
- int ret;
-
- BUG_ON(!root->ref_cows);
- if (root->reloc_root)
- return 0;
-
- root_item = kmalloc(sizeof(*root_item), GFP_NOFS);
- if (!root_item)
- return -ENOMEM;
-
- ret = btrfs_copy_root(trans, root, root->commit_root,
- &eb, BTRFS_TREE_RELOC_OBJECTID);
- BUG_ON(ret);
-
- root_key.objectid = BTRFS_TREE_RELOC_OBJECTID;
- root_key.offset = root->root_key.objectid;
- root_key.type = BTRFS_ROOT_ITEM_KEY;
-
- memcpy(root_item, &root->root_item, sizeof(root_item));
- btrfs_set_root_refs(root_item, 0);
- btrfs_set_root_bytenr(root_item, eb->start);
- btrfs_set_root_level(root_item, btrfs_header_level(eb));
- btrfs_set_root_generation(root_item, trans->transid);
-
- btrfs_tree_unlock(eb);
- free_extent_buffer(eb);
-
- ret = btrfs_insert_root(trans, root->fs_info->tree_root,
- &root_key, root_item);
- BUG_ON(ret);
- kfree(root_item);
-
- reloc_root = btrfs_read_fs_root_no_radix(root->fs_info->tree_root,
- &root_key);
- BUG_ON(IS_ERR(reloc_root));
- reloc_root->last_trans = trans->transid;
- reloc_root->commit_root = NULL;
- reloc_root->ref_tree = &root->fs_info->reloc_ref_tree;
-
- root->reloc_root = reloc_root;
- return 0;
-}
-
-/*
- * Core function of space balance.
- *
- * The idea is using reloc trees to relocate tree blocks in reference
- * counted roots. There is one reloc tree for each subvol, and all
- * reloc trees share same root key objectid. Reloc trees are snapshots
- * of the latest committed roots of subvols (root->commit_root).
- *
- * To relocate a tree block referenced by a subvol, there are two steps.
- * COW the block through subvol's reloc tree, then update block pointer
- * in the subvol to point to the new block. Since all reloc trees share
- * same root key objectid, doing special handing for tree blocks owned
- * by them is easy. Once a tree block has been COWed in one reloc tree,
- * we can use the resulting new block directly when the same block is
- * required to COW again through other reloc trees. By this way, relocated
- * tree blocks are shared between reloc trees, so they are also shared
- * between subvols.
- */
-static noinline int relocate_one_path(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_path *path,
- struct btrfs_key *first_key,
- struct btrfs_ref_path *ref_path,
- struct btrfs_block_group_cache *group,
- struct inode *reloc_inode)
-{
- struct btrfs_root *reloc_root;
- struct extent_buffer *eb = NULL;
- struct btrfs_key *keys;
- u64 *nodes;
- int level;
- int shared_level;
- int lowest_level = 0;
- int ret;
-
- if (ref_path->owner_objectid < BTRFS_FIRST_FREE_OBJECTID)
- lowest_level = ref_path->owner_objectid;
-
- if (!root->ref_cows) {
- path->lowest_level = lowest_level;
- ret = btrfs_search_slot(trans, root, first_key, path, 0, 1);
- BUG_ON(ret < 0);
- path->lowest_level = 0;
- btrfs_release_path(root, path);
- return 0;
- }
-
- mutex_lock(&root->fs_info->tree_reloc_mutex);
- ret = init_reloc_tree(trans, root);
- BUG_ON(ret);
- reloc_root = root->reloc_root;
-
- shared_level = ref_path->shared_level;
- ref_path->shared_level = BTRFS_MAX_LEVEL - 1;
-
- keys = ref_path->node_keys;
- nodes = ref_path->new_nodes;
- memset(&keys[shared_level + 1], 0,
- sizeof(*keys) * (BTRFS_MAX_LEVEL - shared_level - 1));
- memset(&nodes[shared_level + 1], 0,
- sizeof(*nodes) * (BTRFS_MAX_LEVEL - shared_level - 1));
-
- if (nodes[lowest_level] == 0) {
- path->lowest_level = lowest_level;
- ret = btrfs_search_slot(trans, reloc_root, first_key, path,
- 0, 1);
- BUG_ON(ret);
- for (level = lowest_level; level < BTRFS_MAX_LEVEL; level++) {
- eb = path->nodes[level];
- if (!eb || eb == reloc_root->node)
- break;
- nodes[level] = eb->start;
- if (level == 0)
- btrfs_item_key_to_cpu(eb, &keys[level], 0);
- else
- btrfs_node_key_to_cpu(eb, &keys[level], 0);
- }
- if (nodes[0] &&
- ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
- eb = path->nodes[0];
- ret = replace_extents_in_leaf(trans, reloc_root, eb,
- group, reloc_inode);
- BUG_ON(ret);
- }
- btrfs_release_path(reloc_root, path);
- } else {
- ret = btrfs_merge_path(trans, reloc_root, keys, nodes,
- lowest_level);
- BUG_ON(ret);
- }
-
- /*
- * replace tree blocks in the fs tree with tree blocks in
- * the reloc tree.
- */
- ret = btrfs_merge_path(trans, root, keys, nodes, lowest_level);
- BUG_ON(ret < 0);
-
- if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
- ret = btrfs_search_slot(trans, reloc_root, first_key, path,
- 0, 0);
- BUG_ON(ret);
- extent_buffer_get(path->nodes[0]);
- eb = path->nodes[0];
- btrfs_release_path(reloc_root, path);
- ret = invalidate_extent_cache(reloc_root, eb, group, root);
- BUG_ON(ret);
- free_extent_buffer(eb);
- }
-
- mutex_unlock(&root->fs_info->tree_reloc_mutex);
- path->lowest_level = 0;
- return 0;
-}
-
-static noinline int relocate_tree_block(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_path *path,
- struct btrfs_key *first_key,
- struct btrfs_ref_path *ref_path)
-{
- int ret;
-
- ret = relocate_one_path(trans, root, path, first_key,
- ref_path, NULL, NULL);
- BUG_ON(ret);
-
- return 0;
-}
-
-static noinline int del_extent_zero(struct btrfs_trans_handle *trans,
- struct btrfs_root *extent_root,
- struct btrfs_path *path,
- struct btrfs_key *extent_key)
-{
- int ret;
-
- ret = btrfs_search_slot(trans, extent_root, extent_key, path, -1, 1);
- if (ret)
- goto out;
- ret = btrfs_del_item(trans, extent_root, path);
-out:
- btrfs_release_path(extent_root, path);
- return ret;
-}
-
-static noinline struct btrfs_root *read_ref_root(struct btrfs_fs_info *fs_info,
- struct btrfs_ref_path *ref_path)
-{
- struct btrfs_key root_key;
-
- root_key.objectid = ref_path->root_objectid;
- root_key.type = BTRFS_ROOT_ITEM_KEY;
- if (is_cowonly_root(ref_path->root_objectid))
- root_key.offset = 0;
- else
- root_key.offset = (u64)-1;
-
- return btrfs_read_fs_root_no_name(fs_info, &root_key);
-}
-
-static noinline int relocate_one_extent(struct btrfs_root *extent_root,
- struct btrfs_path *path,
- struct btrfs_key *extent_key,
- struct btrfs_block_group_cache *group,
- struct inode *reloc_inode, int pass)
-{
- struct btrfs_trans_handle *trans;
- struct btrfs_root *found_root;
- struct btrfs_ref_path *ref_path = NULL;
- struct disk_extent *new_extents = NULL;
- int nr_extents = 0;
- int loops;
- int ret;
- int level;
- struct btrfs_key first_key;
- u64 prev_block = 0;
-
-
- trans = btrfs_start_transaction(extent_root, 1);
- BUG_ON(IS_ERR(trans));
-
- if (extent_key->objectid == 0) {
- ret = del_extent_zero(trans, extent_root, path, extent_key);
- goto out;
- }
-
- ref_path = kmalloc(sizeof(*ref_path), GFP_NOFS);
- if (!ref_path) {
- ret = -ENOMEM;
- goto out;
- }
-
- for (loops = 0; ; loops++) {
- if (loops == 0) {
- ret = btrfs_first_ref_path(trans, extent_root, ref_path,
- extent_key->objectid);
- } else {
- ret = btrfs_next_ref_path(trans, extent_root, ref_path);
- }
- if (ret < 0)
- goto out;
- if (ret > 0)
- break;
-
- if (ref_path->root_objectid == BTRFS_TREE_LOG_OBJECTID ||
- ref_path->root_objectid == BTRFS_TREE_RELOC_OBJECTID)
- continue;
-
- found_root = read_ref_root(extent_root->fs_info, ref_path);
- BUG_ON(!found_root);
- /*
- * for reference counted tree, only process reference paths
- * rooted at the latest committed root.
- */
- if (found_root->ref_cows &&
- ref_path->root_generation != found_root->root_key.offset)
- continue;
-
- if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
- if (pass == 0) {
- /*
- * copy data extents to new locations
- */
- u64 group_start = group->key.objectid;
- ret = relocate_data_extent(reloc_inode,
- extent_key,
- group_start);
- if (ret < 0)
- goto out;
- break;
- }
- level = 0;
- } else {
- level = ref_path->owner_objectid;
- }
-
- if (prev_block != ref_path->nodes[level]) {
- struct extent_buffer *eb;
- u64 block_start = ref_path->nodes[level];
- u64 block_size = btrfs_level_size(found_root, level);
-
- eb = read_tree_block(found_root, block_start,
- block_size, 0);
- if (!eb) {
- ret = -EIO;
- goto out;
- }
- btrfs_tree_lock(eb);
- BUG_ON(level != btrfs_header_level(eb));
-
- if (level == 0)
- btrfs_item_key_to_cpu(eb, &first_key, 0);
- else
- btrfs_node_key_to_cpu(eb, &first_key, 0);
-
- btrfs_tree_unlock(eb);
- free_extent_buffer(eb);
- prev_block = block_start;
- }
-
- mutex_lock(&extent_root->fs_info->trans_mutex);
- btrfs_record_root_in_trans(found_root);
- mutex_unlock(&extent_root->fs_info->trans_mutex);
- if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
- /*
- * try to update data extent references while
- * keeping metadata shared between snapshots.
- */
- if (pass == 1) {
- ret = relocate_one_path(trans, found_root,
- path, &first_key, ref_path,
- group, reloc_inode);
- if (ret < 0)
- goto out;
- continue;
- }
- /*
- * use fallback method to process the remaining
- * references.
- */
- if (!new_extents) {
- u64 group_start = group->key.objectid;
- new_extents = kmalloc(sizeof(*new_extents),
- GFP_NOFS);
- if (!new_extents) {
- ret = -ENOMEM;
- goto out;
- }
- nr_extents = 1;
- ret = get_new_locations(reloc_inode,
- extent_key,
- group_start, 1,
- &new_extents,
- &nr_extents);
- if (ret)
- goto out;
- }
- ret = replace_one_extent(trans, found_root,
- path, extent_key,
- &first_key, ref_path,
- new_extents, nr_extents);
- } else {
- ret = relocate_tree_block(trans, found_root, path,
- &first_key, ref_path);
- }
- if (ret < 0)
- goto out;
- }
- ret = 0;
-out:
- btrfs_end_transaction(trans, extent_root);
- kfree(new_extents);
- kfree(ref_path);
- return ret;
-}
-#endif
-
static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
{
u64 num_devices;
@@ -8555,10 +6905,16 @@ int btrfs_read_block_groups(struct btrfs_root *root)
ret = -ENOMEM;
goto error;
}
+ cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
+ GFP_NOFS);
+ if (!cache->free_space_ctl) {
+ kfree(cache);
+ ret = -ENOMEM;
+ goto error;
+ }
atomic_set(&cache->count, 1);
spin_lock_init(&cache->lock);
- spin_lock_init(&cache->tree_lock);
cache->fs_info = info;
INIT_LIST_HEAD(&cache->list);
INIT_LIST_HEAD(&cache->cluster_list);
@@ -8566,24 +6922,18 @@ int btrfs_read_block_groups(struct btrfs_root *root)
if (need_clear)
cache->disk_cache_state = BTRFS_DC_CLEAR;
- /*
- * we only want to have 32k of ram per block group for keeping
- * track of free space, and if we pass 1/2 of that we want to
- * start converting things over to using bitmaps
- */
- cache->extents_thresh = ((1024 * 32) / 2) /
- sizeof(struct btrfs_free_space);
-
read_extent_buffer(leaf, &cache->item,
btrfs_item_ptr_offset(leaf, path->slots[0]),
sizeof(cache->item));
memcpy(&cache->key, &found_key, sizeof(found_key));
key.objectid = found_key.objectid + found_key.offset;
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
cache->flags = btrfs_block_group_flags(&cache->item);
cache->sectorsize = root->sectorsize;
+ btrfs_init_free_space_ctl(cache);
+
/*
* We need to exclude the super stripes now so that the space
* info has super bytes accounted for, otherwise we'll think
@@ -8670,6 +7020,12 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
cache = kzalloc(sizeof(*cache), GFP_NOFS);
if (!cache)
return -ENOMEM;
+ cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
+ GFP_NOFS);
+ if (!cache->free_space_ctl) {
+ kfree(cache);
+ return -ENOMEM;
+ }
cache->key.objectid = chunk_offset;
cache->key.offset = size;
@@ -8677,19 +7033,13 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
cache->sectorsize = root->sectorsize;
cache->fs_info = root->fs_info;
- /*
- * we only want to have 32k of ram per block group for keeping track
- * of free space, and if we pass 1/2 of that we want to start
- * converting things over to using bitmaps
- */
- cache->extents_thresh = ((1024 * 32) / 2) /
- sizeof(struct btrfs_free_space);
atomic_set(&cache->count, 1);
spin_lock_init(&cache->lock);
- spin_lock_init(&cache->tree_lock);
INIT_LIST_HEAD(&cache->list);
INIT_LIST_HEAD(&cache->cluster_list);
+ btrfs_init_free_space_ctl(cache);
+
btrfs_set_block_group_used(&cache->item, bytes_used);
btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
cache->flags = type;
@@ -8802,12 +7152,12 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
if (ret < 0)
goto out;
if (ret > 0)
- btrfs_release_path(tree_root, path);
+ btrfs_release_path(path);
if (ret == 0) {
ret = btrfs_del_item(trans, tree_root, path);
if (ret)
goto out;
- btrfs_release_path(tree_root, path);
+ btrfs_release_path(path);
}
spin_lock(&root->fs_info->block_group_cache_lock);
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 4f9893243dae..c5d9fbb92bc3 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -103,7 +103,7 @@ void extent_io_exit(void)
}
void extent_io_tree_init(struct extent_io_tree *tree,
- struct address_space *mapping, gfp_t mask)
+ struct address_space *mapping)
{
tree->state = RB_ROOT;
INIT_RADIX_TREE(&tree->buffer, GFP_ATOMIC);
@@ -441,6 +441,15 @@ static int clear_state_bit(struct extent_io_tree *tree,
return ret;
}
+static struct extent_state *
+alloc_extent_state_atomic(struct extent_state *prealloc)
+{
+ if (!prealloc)
+ prealloc = alloc_extent_state(GFP_ATOMIC);
+
+ return prealloc;
+}
+
/*
* clear some bits on a range in the tree. This may require splitting
* or inserting elements in the tree, so the gfp mask is used to
@@ -531,8 +540,8 @@ hit_next:
*/
if (state->start < start) {
- if (!prealloc)
- prealloc = alloc_extent_state(GFP_ATOMIC);
+ prealloc = alloc_extent_state_atomic(prealloc);
+ BUG_ON(!prealloc);
err = split_state(tree, state, prealloc, start);
BUG_ON(err == -EEXIST);
prealloc = NULL;
@@ -553,8 +562,8 @@ hit_next:
* on the first half
*/
if (state->start <= end && state->end > end) {
- if (!prealloc)
- prealloc = alloc_extent_state(GFP_ATOMIC);
+ prealloc = alloc_extent_state_atomic(prealloc);
+ BUG_ON(!prealloc);
err = split_state(tree, state, prealloc, end + 1);
BUG_ON(err == -EEXIST);
if (wake)
@@ -727,8 +736,7 @@ int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
again:
if (!prealloc && (mask & __GFP_WAIT)) {
prealloc = alloc_extent_state(mask);
- if (!prealloc)
- return -ENOMEM;
+ BUG_ON(!prealloc);
}
spin_lock(&tree->lock);
@@ -745,6 +753,8 @@ again:
*/
node = tree_search(tree, start);
if (!node) {
+ prealloc = alloc_extent_state_atomic(prealloc);
+ BUG_ON(!prealloc);
err = insert_state(tree, prealloc, start, end, &bits);
prealloc = NULL;
BUG_ON(err == -EEXIST);
@@ -773,20 +783,18 @@ hit_next:
if (err)
goto out;
+ next_node = rb_next(node);
cache_state(state, cached_state);
merge_state(tree, state);
if (last_end == (u64)-1)
goto out;
start = last_end + 1;
- if (start < end && prealloc && !need_resched()) {
- next_node = rb_next(node);
- if (next_node) {
- state = rb_entry(next_node, struct extent_state,
- rb_node);
- if (state->start == start)
- goto hit_next;
- }
+ if (next_node && start < end && prealloc && !need_resched()) {
+ state = rb_entry(next_node, struct extent_state,
+ rb_node);
+ if (state->start == start)
+ goto hit_next;
}
goto search_again;
}
@@ -813,6 +821,9 @@ hit_next:
err = -EEXIST;
goto out;
}
+
+ prealloc = alloc_extent_state_atomic(prealloc);
+ BUG_ON(!prealloc);
err = split_state(tree, state, prealloc, start);
BUG_ON(err == -EEXIST);
prealloc = NULL;
@@ -843,14 +854,25 @@ hit_next:
this_end = end;
else
this_end = last_start - 1;
+
+ prealloc = alloc_extent_state_atomic(prealloc);
+ BUG_ON(!prealloc);
+
+ /*
+ * Avoid to free 'prealloc' if it can be merged with
+ * the later extent.
+ */
+ atomic_inc(&prealloc->refs);
err = insert_state(tree, prealloc, start, this_end,
&bits);
BUG_ON(err == -EEXIST);
if (err) {
+ free_extent_state(prealloc);
prealloc = NULL;
goto out;
}
cache_state(prealloc, cached_state);
+ free_extent_state(prealloc);
prealloc = NULL;
start = this_end + 1;
goto search_again;
@@ -867,6 +889,9 @@ hit_next:
err = -EEXIST;
goto out;
}
+
+ prealloc = alloc_extent_state_atomic(prealloc);
+ BUG_ON(!prealloc);
err = split_state(tree, state, prealloc, end + 1);
BUG_ON(err == -EEXIST);
@@ -943,13 +968,6 @@ int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
NULL, mask);
}
-static int clear_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
- gfp_t mask)
-{
- return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0,
- NULL, mask);
-}
-
int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
struct extent_state **cached_state, gfp_t mask)
{
@@ -965,11 +983,6 @@ static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
cached_state, mask);
}
-int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end)
-{
- return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK);
-}
-
/*
* either insert or lock state struct between start and end use mask to tell
* us if waiting is desired.
@@ -1030,25 +1043,6 @@ int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
}
/*
- * helper function to set pages and extents in the tree dirty
- */
-int set_range_dirty(struct extent_io_tree *tree, u64 start, u64 end)
-{
- unsigned long index = start >> PAGE_CACHE_SHIFT;
- unsigned long end_index = end >> PAGE_CACHE_SHIFT;
- struct page *page;
-
- while (index <= end_index) {
- page = find_get_page(tree->mapping, index);
- BUG_ON(!page);
- __set_page_dirty_nobuffers(page);
- page_cache_release(page);
- index++;
- }
- return 0;
-}
-
-/*
* helper function to set both pages and extents in the tree writeback
*/
static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
@@ -1821,46 +1815,6 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
bio_put(bio);
}
-/*
- * IO done from prepare_write is pretty simple, we just unlock
- * the structs in the extent tree when done, and set the uptodate bits
- * as appropriate.
- */
-static void end_bio_extent_preparewrite(struct bio *bio, int err)
-{
- const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
- struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
- struct extent_io_tree *tree;
- u64 start;
- u64 end;
-
- do {
- struct page *page = bvec->bv_page;
- struct extent_state *cached = NULL;
- tree = &BTRFS_I(page->mapping->host)->io_tree;
-
- start = ((u64)page->index << PAGE_CACHE_SHIFT) +
- bvec->bv_offset;
- end = start + bvec->bv_len - 1;
-
- if (--bvec >= bio->bi_io_vec)
- prefetchw(&bvec->bv_page->flags);
-
- if (uptodate) {
- set_extent_uptodate(tree, start, end, &cached,
- GFP_ATOMIC);
- } else {
- ClearPageUptodate(page);
- SetPageError(page);
- }
-
- unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC);
-
- } while (bvec >= bio->bi_io_vec);
-
- bio_put(bio);
-}
-
struct bio *
btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
gfp_t gfp_flags)
@@ -2009,7 +1963,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
struct btrfs_ordered_extent *ordered;
int ret;
int nr = 0;
- size_t page_offset = 0;
+ size_t pg_offset = 0;
size_t iosize;
size_t disk_io_size;
size_t blocksize = inode->i_sb->s_blocksize;
@@ -2052,9 +2006,9 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
char *userpage;
struct extent_state *cached = NULL;
- iosize = PAGE_CACHE_SIZE - page_offset;
+ iosize = PAGE_CACHE_SIZE - pg_offset;
userpage = kmap_atomic(page, KM_USER0);
- memset(userpage + page_offset, 0, iosize);
+ memset(userpage + pg_offset, 0, iosize);
flush_dcache_page(page);
kunmap_atomic(userpage, KM_USER0);
set_extent_uptodate(tree, cur, cur + iosize - 1,
@@ -2063,9 +2017,9 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
&cached, GFP_NOFS);
break;
}
- em = get_extent(inode, page, page_offset, cur,
+ em = get_extent(inode, page, pg_offset, cur,
end - cur + 1, 0);
- if (IS_ERR(em) || !em) {
+ if (IS_ERR_OR_NULL(em)) {
SetPageError(page);
unlock_extent(tree, cur, end, GFP_NOFS);
break;
@@ -2103,7 +2057,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
struct extent_state *cached = NULL;
userpage = kmap_atomic(page, KM_USER0);
- memset(userpage + page_offset, 0, iosize);
+ memset(userpage + pg_offset, 0, iosize);
flush_dcache_page(page);
kunmap_atomic(userpage, KM_USER0);
@@ -2112,7 +2066,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
unlock_extent_cached(tree, cur, cur + iosize - 1,
&cached, GFP_NOFS);
cur = cur + iosize;
- page_offset += iosize;
+ pg_offset += iosize;
continue;
}
/* the get_extent function already copied into the page */
@@ -2121,7 +2075,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
check_page_uptodate(tree, page);
unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
cur = cur + iosize;
- page_offset += iosize;
+ pg_offset += iosize;
continue;
}
/* we have an inline extent but it didn't get marked up
@@ -2131,7 +2085,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
SetPageError(page);
unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
cur = cur + iosize;
- page_offset += iosize;
+ pg_offset += iosize;
continue;
}
@@ -2144,7 +2098,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
pnr -= page->index;
ret = submit_extent_page(READ, tree, page,
- sector, disk_io_size, page_offset,
+ sector, disk_io_size, pg_offset,
bdev, bio, pnr,
end_bio_extent_readpage, mirror_num,
*bio_flags,
@@ -2155,7 +2109,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
if (ret)
SetPageError(page);
cur = cur + iosize;
- page_offset += iosize;
+ pg_offset += iosize;
}
out:
if (!nr) {
@@ -2351,7 +2305,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
}
em = epd->get_extent(inode, page, pg_offset, cur,
end - cur + 1, 1);
- if (IS_ERR(em) || !em) {
+ if (IS_ERR_OR_NULL(em)) {
SetPageError(page);
break;
}
@@ -2730,128 +2684,6 @@ int extent_invalidatepage(struct extent_io_tree *tree,
}
/*
- * simple commit_write call, set_range_dirty is used to mark both
- * the pages and the extent records as dirty
- */
-int extent_commit_write(struct extent_io_tree *tree,
- struct inode *inode, struct page *page,
- unsigned from, unsigned to)
-{
- loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
-
- set_page_extent_mapped(page);
- set_page_dirty(page);
-
- if (pos > inode->i_size) {
- i_size_write(inode, pos);
- mark_inode_dirty(inode);
- }
- return 0;
-}
-
-int extent_prepare_write(struct extent_io_tree *tree,
- struct inode *inode, struct page *page,
- unsigned from, unsigned to, get_extent_t *get_extent)
-{
- u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
- u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
- u64 block_start;
- u64 orig_block_start;
- u64 block_end;
- u64 cur_end;
- struct extent_map *em;
- unsigned blocksize = 1 << inode->i_blkbits;
- size_t page_offset = 0;
- size_t block_off_start;
- size_t block_off_end;
- int err = 0;
- int iocount = 0;
- int ret = 0;
- int isnew;
-
- set_page_extent_mapped(page);
-
- block_start = (page_start + from) & ~((u64)blocksize - 1);
- block_end = (page_start + to - 1) | (blocksize - 1);
- orig_block_start = block_start;
-
- lock_extent(tree, page_start, page_end, GFP_NOFS);
- while (block_start <= block_end) {
- em = get_extent(inode, page, page_offset, block_start,
- block_end - block_start + 1, 1);
- if (IS_ERR(em) || !em)
- goto err;
-
- cur_end = min(block_end, extent_map_end(em) - 1);
- block_off_start = block_start & (PAGE_CACHE_SIZE - 1);
- block_off_end = block_off_start + blocksize;
- isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS);
-
- if (!PageUptodate(page) && isnew &&
- (block_off_end > to || block_off_start < from)) {
- void *kaddr;
-
- kaddr = kmap_atomic(page, KM_USER0);
- if (block_off_end > to)
- memset(kaddr + to, 0, block_off_end - to);
- if (block_off_start < from)
- memset(kaddr + block_off_start, 0,
- from - block_off_start);
- flush_dcache_page(page);
- kunmap_atomic(kaddr, KM_USER0);
- }
- if ((em->block_start != EXTENT_MAP_HOLE &&
- em->block_start != EXTENT_MAP_INLINE) &&
- !isnew && !PageUptodate(page) &&
- (block_off_end > to || block_off_start < from) &&
- !test_range_bit(tree, block_start, cur_end,
- EXTENT_UPTODATE, 1, NULL)) {
- u64 sector;
- u64 extent_offset = block_start - em->start;
- size_t iosize;
- sector = (em->block_start + extent_offset) >> 9;
- iosize = (cur_end - block_start + blocksize) &
- ~((u64)blocksize - 1);
- /*
- * we've already got the extent locked, but we
- * need to split the state such that our end_bio
- * handler can clear the lock.
- */
- set_extent_bit(tree, block_start,
- block_start + iosize - 1,
- EXTENT_LOCKED, 0, NULL, NULL, GFP_NOFS);
- ret = submit_extent_page(READ, tree, page,
- sector, iosize, page_offset, em->bdev,
- NULL, 1,
- end_bio_extent_preparewrite, 0,
- 0, 0);
- if (ret && !err)
- err = ret;
- iocount++;
- block_start = block_start + iosize;
- } else {
- struct extent_state *cached = NULL;
-
- set_extent_uptodate(tree, block_start, cur_end, &cached,
- GFP_NOFS);
- unlock_extent_cached(tree, block_start, cur_end,
- &cached, GFP_NOFS);
- block_start = cur_end + 1;
- }
- page_offset = block_start & (PAGE_CACHE_SIZE - 1);
- free_extent_map(em);
- }
- if (iocount) {
- wait_extent_bit(tree, orig_block_start,
- block_end, EXTENT_LOCKED);
- }
- check_page_uptodate(tree, page);
-err:
- /* FIXME, zero out newly allocated blocks on error */
- return err;
-}
-
-/*
* a helper for releasepage, this tests for areas of the page that
* are locked or under IO and drops the related state bits if it is safe
* to drop the page.
@@ -2909,7 +2741,7 @@ int try_release_extent_mapping(struct extent_map_tree *map,
len = end - start + 1;
write_lock(&map->lock);
em = lookup_extent_mapping(map, start, len);
- if (!em || IS_ERR(em)) {
+ if (IS_ERR_OR_NULL(em)) {
write_unlock(&map->lock);
break;
}
@@ -2937,33 +2769,6 @@ int try_release_extent_mapping(struct extent_map_tree *map,
return try_release_extent_state(map, tree, page, mask);
}
-sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
- get_extent_t *get_extent)
-{
- struct inode *inode = mapping->host;
- struct extent_state *cached_state = NULL;
- u64 start = iblock << inode->i_blkbits;
- sector_t sector = 0;
- size_t blksize = (1 << inode->i_blkbits);
- struct extent_map *em;
-
- lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + blksize - 1,
- 0, &cached_state, GFP_NOFS);
- em = get_extent(inode, NULL, 0, start, blksize, 0);
- unlock_extent_cached(&BTRFS_I(inode)->io_tree, start,
- start + blksize - 1, &cached_state, GFP_NOFS);
- if (!em || IS_ERR(em))
- return 0;
-
- if (em->block_start > EXTENT_MAP_LAST_BYTE)
- goto out;
-
- sector = (em->block_start + start - em->start) >> inode->i_blkbits;
-out:
- free_extent_map(em);
- return sector;
-}
-
/*
* helper function for fiemap, which doesn't want to see any holes.
* This maps until we find something past 'last'
@@ -2986,7 +2791,7 @@ static struct extent_map *get_extent_skip_holes(struct inode *inode,
break;
len = (len + sectorsize - 1) & ~(sectorsize - 1);
em = get_extent(inode, NULL, 0, offset, len, 0);
- if (!em || IS_ERR(em))
+ if (IS_ERR_OR_NULL(em))
return em;
/* if this isn't a hole return it */
@@ -3040,7 +2845,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
* because there might be preallocation past i_size
*/
ret = btrfs_lookup_file_extent(NULL, BTRFS_I(inode)->root,
- path, inode->i_ino, -1, 0);
+ path, btrfs_ino(inode), -1, 0);
if (ret < 0) {
btrfs_free_path(path);
return ret;
@@ -3053,7 +2858,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
found_type = btrfs_key_type(&found_key);
/* No extents, but there might be delalloc bits */
- if (found_key.objectid != inode->i_ino ||
+ if (found_key.objectid != btrfs_ino(inode) ||
found_type != BTRFS_EXTENT_DATA_KEY) {
/* have to trust i_size as the end */
last = (u64)-1;
@@ -3276,8 +3081,7 @@ static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
u64 start, unsigned long len,
- struct page *page0,
- gfp_t mask)
+ struct page *page0)
{
unsigned long num_pages = num_extent_pages(start, len);
unsigned long i;
@@ -3298,7 +3102,7 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
}
rcu_read_unlock();
- eb = __alloc_extent_buffer(tree, start, len, mask);
+ eb = __alloc_extent_buffer(tree, start, len, GFP_NOFS);
if (!eb)
return NULL;
@@ -3315,7 +3119,7 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
i = 0;
}
for (; i < num_pages; i++, index++) {
- p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM);
+ p = find_or_create_page(mapping, index, GFP_NOFS | __GFP_HIGHMEM);
if (!p) {
WARN_ON(1);
goto free_eb;
@@ -3387,8 +3191,7 @@ free_eb:
}
struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
- u64 start, unsigned long len,
- gfp_t mask)
+ u64 start, unsigned long len)
{
struct extent_buffer *eb;
@@ -3449,13 +3252,6 @@ int clear_extent_buffer_dirty(struct extent_io_tree *tree,
return 0;
}
-int wait_on_extent_buffer_writeback(struct extent_io_tree *tree,
- struct extent_buffer *eb)
-{
- return wait_on_extent_writeback(tree, eb->start,
- eb->start + eb->len - 1);
-}
-
int set_extent_buffer_dirty(struct extent_io_tree *tree,
struct extent_buffer *eb)
{
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index af2d7179c372..4e8445a4757c 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -153,23 +153,14 @@ static inline int extent_compress_type(unsigned long bio_flags)
struct extent_map_tree;
-static inline struct extent_state *extent_state_next(struct extent_state *state)
-{
- struct rb_node *node;
- node = rb_next(&state->rb_node);
- if (!node)
- return NULL;
- return rb_entry(node, struct extent_state, rb_node);
-}
-
typedef struct extent_map *(get_extent_t)(struct inode *inode,
struct page *page,
- size_t page_offset,
+ size_t pg_offset,
u64 start, u64 len,
int create);
void extent_io_tree_init(struct extent_io_tree *tree,
- struct address_space *mapping, gfp_t mask);
+ struct address_space *mapping);
int try_release_extent_mapping(struct extent_map_tree *map,
struct extent_io_tree *tree, struct page *page,
gfp_t mask);
@@ -215,14 +206,8 @@ int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
gfp_t mask);
int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
gfp_t mask);
-int clear_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end,
- gfp_t mask);
-int clear_extent_ordered_metadata(struct extent_io_tree *tree, u64 start,
- u64 end, gfp_t mask);
int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
struct extent_state **cached_state, gfp_t mask);
-int set_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end,
- gfp_t mask);
int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
u64 *start_ret, u64 *end_ret, int bits);
struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree,
@@ -243,28 +228,17 @@ int extent_readpages(struct extent_io_tree *tree,
struct address_space *mapping,
struct list_head *pages, unsigned nr_pages,
get_extent_t get_extent);
-int extent_prepare_write(struct extent_io_tree *tree,
- struct inode *inode, struct page *page,
- unsigned from, unsigned to, get_extent_t *get_extent);
-int extent_commit_write(struct extent_io_tree *tree,
- struct inode *inode, struct page *page,
- unsigned from, unsigned to);
-sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
- get_extent_t *get_extent);
int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
__u64 start, __u64 len, get_extent_t *get_extent);
-int set_range_dirty(struct extent_io_tree *tree, u64 start, u64 end);
int set_state_private(struct extent_io_tree *tree, u64 start, u64 private);
int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private);
void set_page_extent_mapped(struct page *page);
struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
u64 start, unsigned long len,
- struct page *page0,
- gfp_t mask);
+ struct page *page0);
struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
- u64 start, unsigned long len,
- gfp_t mask);
+ u64 start, unsigned long len);
void free_extent_buffer(struct extent_buffer *eb);
int read_extent_buffer_pages(struct extent_io_tree *tree,
struct extent_buffer *eb, u64 start, int wait,
@@ -292,16 +266,11 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
unsigned long src_offset, unsigned long len);
void memset_extent_buffer(struct extent_buffer *eb, char c,
unsigned long start, unsigned long len);
-int wait_on_extent_buffer_writeback(struct extent_io_tree *tree,
- struct extent_buffer *eb);
-int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end);
int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits);
int clear_extent_buffer_dirty(struct extent_io_tree *tree,
struct extent_buffer *eb);
int set_extent_buffer_dirty(struct extent_io_tree *tree,
struct extent_buffer *eb);
-int test_extent_buffer_dirty(struct extent_io_tree *tree,
- struct extent_buffer *eb);
int set_extent_buffer_uptodate(struct extent_io_tree *tree,
struct extent_buffer *eb);
int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
@@ -319,7 +288,6 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long offset,
unsigned long *map_start,
unsigned long *map_len, int km);
void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km);
-int release_extent_buffer_tail_pages(struct extent_buffer *eb);
int extent_range_uptodate(struct extent_io_tree *tree,
u64 start, u64 end);
int extent_clear_unlock_delalloc(struct inode *inode,
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
index a24a3f2fa13e..2d0410344ea3 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -28,12 +28,11 @@ void extent_map_exit(void)
/**
* extent_map_tree_init - initialize extent map tree
* @tree: tree to initialize
- * @mask: flags for memory allocations during tree operations
*
* Initialize the extent tree @tree. Should be called for each new inode
* or other user of the extent_map interface.
*/
-void extent_map_tree_init(struct extent_map_tree *tree, gfp_t mask)
+void extent_map_tree_init(struct extent_map_tree *tree)
{
tree->map = RB_ROOT;
rwlock_init(&tree->lock);
@@ -41,16 +40,15 @@ void extent_map_tree_init(struct extent_map_tree *tree, gfp_t mask)
/**
* alloc_extent_map - allocate new extent map structure
- * @mask: memory allocation flags
*
* Allocate a new extent_map structure. The new structure is
* returned with a reference count of one and needs to be
* freed using free_extent_map()
*/
-struct extent_map *alloc_extent_map(gfp_t mask)
+struct extent_map *alloc_extent_map(void)
{
struct extent_map *em;
- em = kmem_cache_alloc(extent_map_cache, mask);
+ em = kmem_cache_alloc(extent_map_cache, GFP_NOFS);
if (!em)
return NULL;
em->in_tree = 0;
diff --git a/fs/btrfs/extent_map.h b/fs/btrfs/extent_map.h
index 28b44dbd1e35..33a7890b1f40 100644
--- a/fs/btrfs/extent_map.h
+++ b/fs/btrfs/extent_map.h
@@ -49,14 +49,14 @@ static inline u64 extent_map_block_end(struct extent_map *em)
return em->block_start + em->block_len;
}
-void extent_map_tree_init(struct extent_map_tree *tree, gfp_t mask);
+void extent_map_tree_init(struct extent_map_tree *tree);
struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
u64 start, u64 len);
int add_extent_mapping(struct extent_map_tree *tree,
struct extent_map *em);
int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em);
-struct extent_map *alloc_extent_map(gfp_t mask);
+struct extent_map *alloc_extent_map(void);
void free_extent_map(struct extent_map *em);
int __init extent_map_init(void);
void extent_map_exit(void);
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index a6a9d4e8b491..90d4ee52cd45 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -193,7 +193,7 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
u32 item_size;
if (item)
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
item = btrfs_lookup_csum(NULL, root->fs_info->csum_root,
path, disk_bytenr, 0);
if (IS_ERR(item)) {
@@ -208,12 +208,13 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
EXTENT_NODATASUM, GFP_NOFS);
} else {
printk(KERN_INFO "btrfs no csum found "
- "for inode %lu start %llu\n",
- inode->i_ino,
+ "for inode %llu start %llu\n",
+ (unsigned long long)
+ btrfs_ino(inode),
(unsigned long long)offset);
}
item = NULL;
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
goto found;
}
btrfs_item_key_to_cpu(path->nodes[0], &found_key,
@@ -266,7 +267,7 @@ int btrfs_lookup_bio_sums_dio(struct btrfs_root *root, struct inode *inode,
}
int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
- struct list_head *list)
+ struct list_head *list, int search_commit)
{
struct btrfs_key key;
struct btrfs_path *path;
@@ -283,6 +284,12 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
path = btrfs_alloc_path();
BUG_ON(!path);
+ if (search_commit) {
+ path->skip_locking = 1;
+ path->reada = 2;
+ path->search_commit_root = 1;
+ }
+
key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
key.offset = start;
key.type = BTRFS_EXTENT_CSUM_KEY;
@@ -495,7 +502,6 @@ static noinline int truncate_one_csum(struct btrfs_trans_handle *trans,
u32 new_size = (bytenr - key->offset) >> blocksize_bits;
new_size *= csum_size;
ret = btrfs_truncate_item(trans, root, path, new_size, 1);
- BUG_ON(ret);
} else if (key->offset >= bytenr && csum_end > end_byte &&
end_byte > key->offset) {
/*
@@ -508,7 +514,6 @@ static noinline int truncate_one_csum(struct btrfs_trans_handle *trans,
new_size *= csum_size;
ret = btrfs_truncate_item(trans, root, path, new_size, 0);
- BUG_ON(ret);
key->offset = end_byte;
ret = btrfs_set_item_key_safe(trans, root, path, key);
@@ -551,10 +556,10 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
if (ret > 0) {
if (path->slots[0] == 0)
- goto out;
+ break;
path->slots[0]--;
} else if (ret < 0) {
- goto out;
+ break;
}
leaf = path->nodes[0];
@@ -579,7 +584,8 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
/* delete the entire item, it is inside our range */
if (key.offset >= bytenr && csum_end <= end_byte) {
ret = btrfs_del_item(trans, root, path);
- BUG_ON(ret);
+ if (ret)
+ goto out;
if (key.offset == bytenr)
break;
} else if (key.offset < bytenr && csum_end > end_byte) {
@@ -631,11 +637,12 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
if (key.offset < bytenr)
break;
}
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
}
+ ret = 0;
out:
btrfs_free_path(path);
- return 0;
+ return ret;
}
int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
@@ -722,7 +729,7 @@ again:
* at this point, we know the tree has an item, but it isn't big
* enough yet to put our csum in. Grow it
*/
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
ret = btrfs_search_slot(trans, root, &file_key, path,
csum_size, 1);
if (ret < 0)
@@ -761,12 +768,11 @@ again:
goto insert;
ret = btrfs_extend_item(trans, root, path, diff);
- BUG_ON(ret);
goto csum;
}
insert:
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
csum_offset = 0;
if (found_next) {
u64 tmp = total_bytes + root->sectorsize;
@@ -850,7 +856,7 @@ next_sector:
}
btrfs_mark_buffer_dirty(path->nodes[0]);
if (total_bytes < sums->len) {
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
cond_resched();
goto again;
}
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 75899a01dded..c6a22d783c35 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -40,6 +40,263 @@
#include "locking.h"
#include "compat.h"
+/*
+ * when auto defrag is enabled we
+ * queue up these defrag structs to remember which
+ * inodes need defragging passes
+ */
+struct inode_defrag {
+ struct rb_node rb_node;
+ /* objectid */
+ u64 ino;
+ /*
+ * transid where the defrag was added, we search for
+ * extents newer than this
+ */
+ u64 transid;
+
+ /* root objectid */
+ u64 root;
+
+ /* last offset we were able to defrag */
+ u64 last_offset;
+
+ /* if we've wrapped around back to zero once already */
+ int cycled;
+};
+
+/* pop a record for an inode into the defrag tree. The lock
+ * must be held already
+ *
+ * If you're inserting a record for an older transid than an
+ * existing record, the transid already in the tree is lowered
+ *
+ * If an existing record is found the defrag item you
+ * pass in is freed
+ */
+static int __btrfs_add_inode_defrag(struct inode *inode,
+ struct inode_defrag *defrag)
+{
+ struct btrfs_root *root = BTRFS_I(inode)->root;
+ struct inode_defrag *entry;
+ struct rb_node **p;
+ struct rb_node *parent = NULL;
+
+ p = &root->fs_info->defrag_inodes.rb_node;
+ while (*p) {
+ parent = *p;
+ entry = rb_entry(parent, struct inode_defrag, rb_node);
+
+ if (defrag->ino < entry->ino)
+ p = &parent->rb_left;
+ else if (defrag->ino > entry->ino)
+ p = &parent->rb_right;
+ else {
+ /* if we're reinserting an entry for
+ * an old defrag run, make sure to
+ * lower the transid of our existing record
+ */
+ if (defrag->transid < entry->transid)
+ entry->transid = defrag->transid;
+ if (defrag->last_offset > entry->last_offset)
+ entry->last_offset = defrag->last_offset;
+ goto exists;
+ }
+ }
+ BTRFS_I(inode)->in_defrag = 1;
+ rb_link_node(&defrag->rb_node, parent, p);
+ rb_insert_color(&defrag->rb_node, &root->fs_info->defrag_inodes);
+ return 0;
+
+exists:
+ kfree(defrag);
+ return 0;
+
+}
+
+/*
+ * insert a defrag record for this inode if auto defrag is
+ * enabled
+ */
+int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
+ struct inode *inode)
+{
+ struct btrfs_root *root = BTRFS_I(inode)->root;
+ struct inode_defrag *defrag;
+ int ret = 0;
+ u64 transid;
+
+ if (!btrfs_test_opt(root, AUTO_DEFRAG))
+ return 0;
+
+ if (root->fs_info->closing)
+ return 0;
+
+ if (BTRFS_I(inode)->in_defrag)
+ return 0;
+
+ if (trans)
+ transid = trans->transid;
+ else
+ transid = BTRFS_I(inode)->root->last_trans;
+
+ defrag = kzalloc(sizeof(*defrag), GFP_NOFS);
+ if (!defrag)
+ return -ENOMEM;
+
+ defrag->ino = inode->i_ino;
+ defrag->transid = transid;
+ defrag->root = root->root_key.objectid;
+
+ spin_lock(&root->fs_info->defrag_inodes_lock);
+ if (!BTRFS_I(inode)->in_defrag)
+ ret = __btrfs_add_inode_defrag(inode, defrag);
+ spin_unlock(&root->fs_info->defrag_inodes_lock);
+ return ret;
+}
+
+/*
+ * must be called with the defrag_inodes lock held
+ */
+struct inode_defrag *btrfs_find_defrag_inode(struct btrfs_fs_info *info, u64 ino,
+ struct rb_node **next)
+{
+ struct inode_defrag *entry = NULL;
+ struct rb_node *p;
+ struct rb_node *parent = NULL;
+
+ p = info->defrag_inodes.rb_node;
+ while (p) {
+ parent = p;
+ entry = rb_entry(parent, struct inode_defrag, rb_node);
+
+ if (ino < entry->ino)
+ p = parent->rb_left;
+ else if (ino > entry->ino)
+ p = parent->rb_right;
+ else
+ return entry;
+ }
+
+ if (next) {
+ while (parent && ino > entry->ino) {
+ parent = rb_next(parent);
+ entry = rb_entry(parent, struct inode_defrag, rb_node);
+ }
+ *next = parent;
+ }
+ return NULL;
+}
+
+/*
+ * run through the list of inodes in the FS that need
+ * defragging
+ */
+int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
+{
+ struct inode_defrag *defrag;
+ struct btrfs_root *inode_root;
+ struct inode *inode;
+ struct rb_node *n;
+ struct btrfs_key key;
+ struct btrfs_ioctl_defrag_range_args range;
+ u64 first_ino = 0;
+ int num_defrag;
+ int defrag_batch = 1024;
+
+ memset(&range, 0, sizeof(range));
+ range.len = (u64)-1;
+
+ atomic_inc(&fs_info->defrag_running);
+ spin_lock(&fs_info->defrag_inodes_lock);
+ while(1) {
+ n = NULL;
+
+ /* find an inode to defrag */
+ defrag = btrfs_find_defrag_inode(fs_info, first_ino, &n);
+ if (!defrag) {
+ if (n)
+ defrag = rb_entry(n, struct inode_defrag, rb_node);
+ else if (first_ino) {
+ first_ino = 0;
+ continue;
+ } else {
+ break;
+ }
+ }
+
+ /* remove it from the rbtree */
+ first_ino = defrag->ino + 1;
+ rb_erase(&defrag->rb_node, &fs_info->defrag_inodes);
+
+ if (fs_info->closing)
+ goto next_free;
+
+ spin_unlock(&fs_info->defrag_inodes_lock);
+
+ /* get the inode */
+ key.objectid = defrag->root;
+ btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
+ key.offset = (u64)-1;
+ inode_root = btrfs_read_fs_root_no_name(fs_info, &key);
+ if (IS_ERR(inode_root))
+ goto next;
+
+ key.objectid = defrag->ino;
+ btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
+ key.offset = 0;
+
+ inode = btrfs_iget(fs_info->sb, &key, inode_root, NULL);
+ if (IS_ERR(inode))
+ goto next;
+
+ /* do a chunk of defrag */
+ BTRFS_I(inode)->in_defrag = 0;
+ range.start = defrag->last_offset;
+ num_defrag = btrfs_defrag_file(inode, NULL, &range, defrag->transid,
+ defrag_batch);
+ /*
+ * if we filled the whole defrag batch, there
+ * must be more work to do. Queue this defrag
+ * again
+ */
+ if (num_defrag == defrag_batch) {
+ defrag->last_offset = range.start;
+ __btrfs_add_inode_defrag(inode, defrag);
+ /*
+ * we don't want to kfree defrag, we added it back to
+ * the rbtree
+ */
+ defrag = NULL;
+ } else if (defrag->last_offset && !defrag->cycled) {
+ /*
+ * we didn't fill our defrag batch, but
+ * we didn't start at zero. Make sure we loop
+ * around to the start of the file.
+ */
+ defrag->last_offset = 0;
+ defrag->cycled = 1;
+ __btrfs_add_inode_defrag(inode, defrag);
+ defrag = NULL;
+ }
+
+ iput(inode);
+next:
+ spin_lock(&fs_info->defrag_inodes_lock);
+next_free:
+ kfree(defrag);
+ }
+ spin_unlock(&fs_info->defrag_inodes_lock);
+
+ atomic_dec(&fs_info->defrag_running);
+
+ /*
+ * during unmount, we use the transaction_wait queue to
+ * wait for the defragger to stop
+ */
+ wake_up(&fs_info->transaction_wait);
+ return 0;
+}
/* simple helper to fault in pages and copy. This should go away
* and be replaced with calls into generic code.
@@ -191,9 +448,9 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
}
while (1) {
if (!split)
- split = alloc_extent_map(GFP_NOFS);
+ split = alloc_extent_map();
if (!split2)
- split2 = alloc_extent_map(GFP_NOFS);
+ split2 = alloc_extent_map();
BUG_ON(!split || !split2);
write_lock(&em_tree->lock);
@@ -298,6 +555,7 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode,
struct btrfs_path *path;
struct btrfs_key key;
struct btrfs_key new_key;
+ u64 ino = btrfs_ino(inode);
u64 search_start = start;
u64 disk_bytenr = 0;
u64 num_bytes = 0;
@@ -318,14 +576,14 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode,
while (1) {
recow = 0;
- ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
+ ret = btrfs_lookup_file_extent(trans, root, path, ino,
search_start, -1);
if (ret < 0)
break;
if (ret > 0 && path->slots[0] > 0 && search_start == start) {
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
- if (key.objectid == inode->i_ino &&
+ if (key.objectid == ino &&
key.type == BTRFS_EXTENT_DATA_KEY)
path->slots[0]--;
}
@@ -346,7 +604,7 @@ next_slot:
}
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
- if (key.objectid > inode->i_ino ||
+ if (key.objectid > ino ||
key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end)
break;
@@ -376,7 +634,7 @@ next_slot:
search_start = max(key.offset, start);
if (recow) {
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
continue;
}
@@ -393,7 +651,7 @@ next_slot:
ret = btrfs_duplicate_item(trans, root, path,
&new_key);
if (ret == -EAGAIN) {
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
continue;
}
if (ret < 0)
@@ -516,7 +774,7 @@ next_slot:
del_nr = 0;
del_slot = 0;
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
continue;
}
@@ -592,6 +850,7 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
int del_slot = 0;
int recow;
int ret;
+ u64 ino = btrfs_ino(inode);
btrfs_drop_extent_cache(inode, start, end - 1, 0);
@@ -600,7 +859,7 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
again:
recow = 0;
split = start;
- key.objectid = inode->i_ino;
+ key.objectid = ino;
key.type = BTRFS_EXTENT_DATA_KEY;
key.offset = split;
@@ -612,8 +871,7 @@ again:
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
- BUG_ON(key.objectid != inode->i_ino ||
- key.type != BTRFS_EXTENT_DATA_KEY);
+ BUG_ON(key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY);
fi = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
BUG_ON(btrfs_file_extent_type(leaf, fi) !=
@@ -630,7 +888,7 @@ again:
other_start = 0;
other_end = start;
if (extent_mergeable(leaf, path->slots[0] - 1,
- inode->i_ino, bytenr, orig_offset,
+ ino, bytenr, orig_offset,
&other_start, &other_end)) {
new_key.offset = end;
btrfs_set_item_key_safe(trans, root, path, &new_key);
@@ -653,7 +911,7 @@ again:
other_start = end;
other_end = 0;
if (extent_mergeable(leaf, path->slots[0] + 1,
- inode->i_ino, bytenr, orig_offset,
+ ino, bytenr, orig_offset,
&other_start, &other_end)) {
fi = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
@@ -681,7 +939,7 @@ again:
new_key.offset = split;
ret = btrfs_duplicate_item(trans, root, path, &new_key);
if (ret == -EAGAIN) {
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
goto again;
}
BUG_ON(ret < 0);
@@ -702,7 +960,7 @@ again:
ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0,
root->root_key.objectid,
- inode->i_ino, orig_offset);
+ ino, orig_offset);
BUG_ON(ret);
if (split == start) {
@@ -718,10 +976,10 @@ again:
other_start = end;
other_end = 0;
if (extent_mergeable(leaf, path->slots[0] + 1,
- inode->i_ino, bytenr, orig_offset,
+ ino, bytenr, orig_offset,
&other_start, &other_end)) {
if (recow) {
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
goto again;
}
extent_end = other_end;
@@ -729,16 +987,16 @@ again:
del_nr++;
ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
0, root->root_key.objectid,
- inode->i_ino, orig_offset);
+ ino, orig_offset);
BUG_ON(ret);
}
other_start = 0;
other_end = start;
if (extent_mergeable(leaf, path->slots[0] - 1,
- inode->i_ino, bytenr, orig_offset,
+ ino, bytenr, orig_offset,
&other_start, &other_end)) {
if (recow) {
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
goto again;
}
key.offset = other_start;
@@ -746,7 +1004,7 @@ again:
del_nr++;
ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
0, root->root_key.objectid,
- inode->i_ino, orig_offset);
+ ino, orig_offset);
BUG_ON(ret);
}
if (del_nr == 0) {
@@ -1375,7 +1633,7 @@ static long btrfs_fallocate(struct file *file, int mode,
while (1) {
em = btrfs_get_extent(inode, NULL, 0, cur_offset,
alloc_end - cur_offset, 0);
- BUG_ON(IS_ERR(em) || !em);
+ BUG_ON(IS_ERR_OR_NULL(em));
last_byte = min(extent_map_end(em), alloc_end);
last_byte = (last_byte + mask) & ~mask;
if (em->block_start == EXTENT_MAP_HOLE ||
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 63731a1fb0a1..70d45795d758 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -25,18 +25,17 @@
#include "transaction.h"
#include "disk-io.h"
#include "extent_io.h"
+#include "inode-map.h"
#define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8)
#define MAX_CACHE_BYTES_PER_GIG (32 * 1024)
-static void recalculate_thresholds(struct btrfs_block_group_cache
- *block_group);
-static int link_free_space(struct btrfs_block_group_cache *block_group,
+static int link_free_space(struct btrfs_free_space_ctl *ctl,
struct btrfs_free_space *info);
-struct inode *lookup_free_space_inode(struct btrfs_root *root,
- struct btrfs_block_group_cache
- *block_group, struct btrfs_path *path)
+static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
+ struct btrfs_path *path,
+ u64 offset)
{
struct btrfs_key key;
struct btrfs_key location;
@@ -46,22 +45,15 @@ struct inode *lookup_free_space_inode(struct btrfs_root *root,
struct inode *inode = NULL;
int ret;
- spin_lock(&block_group->lock);
- if (block_group->inode)
- inode = igrab(block_group->inode);
- spin_unlock(&block_group->lock);
- if (inode)
- return inode;
-
key.objectid = BTRFS_FREE_SPACE_OBJECTID;
- key.offset = block_group->key.objectid;
+ key.offset = offset;
key.type = 0;
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
return ERR_PTR(ret);
if (ret > 0) {
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
return ERR_PTR(-ENOENT);
}
@@ -70,7 +62,7 @@ struct inode *lookup_free_space_inode(struct btrfs_root *root,
struct btrfs_free_space_header);
btrfs_free_space_key(leaf, header, &disk_key);
btrfs_disk_key_to_cpu(&location, &disk_key);
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
inode = btrfs_iget(root->fs_info->sb, &location, root, NULL);
if (!inode)
@@ -84,6 +76,27 @@ struct inode *lookup_free_space_inode(struct btrfs_root *root,
inode->i_mapping->flags &= ~__GFP_FS;
+ return inode;
+}
+
+struct inode *lookup_free_space_inode(struct btrfs_root *root,
+ struct btrfs_block_group_cache
+ *block_group, struct btrfs_path *path)
+{
+ struct inode *inode = NULL;
+
+ spin_lock(&block_group->lock);
+ if (block_group->inode)
+ inode = igrab(block_group->inode);
+ spin_unlock(&block_group->lock);
+ if (inode)
+ return inode;
+
+ inode = __lookup_free_space_inode(root, path,
+ block_group->key.objectid);
+ if (IS_ERR(inode))
+ return inode;
+
spin_lock(&block_group->lock);
if (!root->fs_info->closing) {
block_group->inode = igrab(inode);
@@ -94,24 +107,18 @@ struct inode *lookup_free_space_inode(struct btrfs_root *root,
return inode;
}
-int create_free_space_inode(struct btrfs_root *root,
- struct btrfs_trans_handle *trans,
- struct btrfs_block_group_cache *block_group,
- struct btrfs_path *path)
+int __create_free_space_inode(struct btrfs_root *root,
+ struct btrfs_trans_handle *trans,
+ struct btrfs_path *path, u64 ino, u64 offset)
{
struct btrfs_key key;
struct btrfs_disk_key disk_key;
struct btrfs_free_space_header *header;
struct btrfs_inode_item *inode_item;
struct extent_buffer *leaf;
- u64 objectid;
int ret;
- ret = btrfs_find_free_objectid(trans, root, 0, &objectid);
- if (ret < 0)
- return ret;
-
- ret = btrfs_insert_empty_inode(trans, root, path, objectid);
+ ret = btrfs_insert_empty_inode(trans, root, path, ino);
if (ret)
return ret;
@@ -131,19 +138,18 @@ int create_free_space_inode(struct btrfs_root *root,
BTRFS_INODE_PREALLOC | BTRFS_INODE_NODATASUM);
btrfs_set_inode_nlink(leaf, inode_item, 1);
btrfs_set_inode_transid(leaf, inode_item, trans->transid);
- btrfs_set_inode_block_group(leaf, inode_item,
- block_group->key.objectid);
+ btrfs_set_inode_block_group(leaf, inode_item, offset);
btrfs_mark_buffer_dirty(leaf);
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
key.objectid = BTRFS_FREE_SPACE_OBJECTID;
- key.offset = block_group->key.objectid;
+ key.offset = offset;
key.type = 0;
ret = btrfs_insert_empty_item(trans, root, path, &key,
sizeof(struct btrfs_free_space_header));
if (ret < 0) {
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
return ret;
}
leaf = path->nodes[0];
@@ -152,11 +158,27 @@ int create_free_space_inode(struct btrfs_root *root,
memset_extent_buffer(leaf, 0, (unsigned long)header, sizeof(*header));
btrfs_set_free_space_key(leaf, header, &disk_key);
btrfs_mark_buffer_dirty(leaf);
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
return 0;
}
+int create_free_space_inode(struct btrfs_root *root,
+ struct btrfs_trans_handle *trans,
+ struct btrfs_block_group_cache *block_group,
+ struct btrfs_path *path)
+{
+ int ret;
+ u64 ino;
+
+ ret = btrfs_find_free_objectid(root, &ino);
+ if (ret < 0)
+ return ret;
+
+ return __create_free_space_inode(root, trans, path, ino,
+ block_group->key.objectid);
+}
+
int btrfs_truncate_free_space_cache(struct btrfs_root *root,
struct btrfs_trans_handle *trans,
struct btrfs_path *path,
@@ -187,7 +209,8 @@ int btrfs_truncate_free_space_cache(struct btrfs_root *root,
return ret;
}
- return btrfs_update_inode(trans, root, inode);
+ ret = btrfs_update_inode(trans, root, inode);
+ return ret;
}
static int readahead_cache(struct inode *inode)
@@ -209,15 +232,13 @@ static int readahead_cache(struct inode *inode)
return 0;
}
-int load_free_space_cache(struct btrfs_fs_info *fs_info,
- struct btrfs_block_group_cache *block_group)
+int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
+ struct btrfs_free_space_ctl *ctl,
+ struct btrfs_path *path, u64 offset)
{
- struct btrfs_root *root = fs_info->tree_root;
- struct inode *inode;
struct btrfs_free_space_header *header;
struct extent_buffer *leaf;
struct page *page;
- struct btrfs_path *path;
u32 *checksums = NULL, *crc;
char *disk_crcs = NULL;
struct btrfs_key key;
@@ -225,76 +246,47 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
u64 num_entries;
u64 num_bitmaps;
u64 generation;
- u64 used = btrfs_block_group_used(&block_group->item);
u32 cur_crc = ~(u32)0;
pgoff_t index = 0;
unsigned long first_page_offset;
int num_checksums;
- int ret = 0;
-
- /*
- * If we're unmounting then just return, since this does a search on the
- * normal root and not the commit root and we could deadlock.
- */
- smp_mb();
- if (fs_info->closing)
- return 0;
-
- /*
- * If this block group has been marked to be cleared for one reason or
- * another then we can't trust the on disk cache, so just return.
- */
- spin_lock(&block_group->lock);
- if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) {
- spin_unlock(&block_group->lock);
- return 0;
- }
- spin_unlock(&block_group->lock);
+ int ret = 0, ret2;
INIT_LIST_HEAD(&bitmaps);
- path = btrfs_alloc_path();
- if (!path)
- return 0;
-
- inode = lookup_free_space_inode(root, block_group, path);
- if (IS_ERR(inode)) {
- btrfs_free_path(path);
- return 0;
- }
-
/* Nothing in the space cache, goodbye */
- if (!i_size_read(inode)) {
- btrfs_free_path(path);
+ if (!i_size_read(inode))
goto out;
- }
key.objectid = BTRFS_FREE_SPACE_OBJECTID;
- key.offset = block_group->key.objectid;
+ key.offset = offset;
key.type = 0;
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
- if (ret) {
- btrfs_free_path(path);
+ if (ret < 0)
+ goto out;
+ else if (ret > 0) {
+ btrfs_release_path(path);
+ ret = 0;
goto out;
}
+ ret = -1;
+
leaf = path->nodes[0];
header = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_free_space_header);
num_entries = btrfs_free_space_entries(leaf, header);
num_bitmaps = btrfs_free_space_bitmaps(leaf, header);
generation = btrfs_free_space_generation(leaf, header);
- btrfs_free_path(path);
+ btrfs_release_path(path);
if (BTRFS_I(inode)->generation != generation) {
printk(KERN_ERR "btrfs: free space inode generation (%llu) did"
- " not match free space cache generation (%llu) for "
- "block group %llu\n",
+ " not match free space cache generation (%llu)\n",
(unsigned long long)BTRFS_I(inode)->generation,
- (unsigned long long)generation,
- (unsigned long long)block_group->key.objectid);
- goto free_cache;
+ (unsigned long long)generation);
+ goto out;
}
if (!num_entries)
@@ -311,10 +303,8 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
goto out;
ret = readahead_cache(inode);
- if (ret) {
- ret = 0;
+ if (ret)
goto out;
- }
while (1) {
struct btrfs_free_space_entry *entry;
@@ -333,10 +323,8 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
}
page = grab_cache_page(inode->i_mapping, index);
- if (!page) {
- ret = 0;
+ if (!page)
goto free_cache;
- }
if (!PageUptodate(page)) {
btrfs_readpage(NULL, page);
@@ -345,9 +333,7 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
unlock_page(page);
page_cache_release(page);
printk(KERN_ERR "btrfs: error reading free "
- "space cache: %llu\n",
- (unsigned long long)
- block_group->key.objectid);
+ "space cache\n");
goto free_cache;
}
}
@@ -360,13 +346,10 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
gen = addr + (sizeof(u32) * num_checksums);
if (*gen != BTRFS_I(inode)->generation) {
printk(KERN_ERR "btrfs: space cache generation"
- " (%llu) does not match inode (%llu) "
- "for block group %llu\n",
+ " (%llu) does not match inode (%llu)\n",
(unsigned long long)*gen,
(unsigned long long)
- BTRFS_I(inode)->generation,
- (unsigned long long)
- block_group->key.objectid);
+ BTRFS_I(inode)->generation);
kunmap(page);
unlock_page(page);
page_cache_release(page);
@@ -382,9 +365,8 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
PAGE_CACHE_SIZE - start_offset);
btrfs_csum_final(cur_crc, (char *)&cur_crc);
if (cur_crc != *crc) {
- printk(KERN_ERR "btrfs: crc mismatch for page %lu in "
- "block group %llu\n", index,
- (unsigned long long)block_group->key.objectid);
+ printk(KERN_ERR "btrfs: crc mismatch for page %lu\n",
+ index);
kunmap(page);
unlock_page(page);
page_cache_release(page);
@@ -417,9 +399,9 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
}
if (entry->type == BTRFS_FREE_SPACE_EXTENT) {
- spin_lock(&block_group->tree_lock);
- ret = link_free_space(block_group, e);
- spin_unlock(&block_group->tree_lock);
+ spin_lock(&ctl->tree_lock);
+ ret = link_free_space(ctl, e);
+ spin_unlock(&ctl->tree_lock);
BUG_ON(ret);
} else {
e->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
@@ -431,11 +413,11 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
page_cache_release(page);
goto free_cache;
}
- spin_lock(&block_group->tree_lock);
- ret = link_free_space(block_group, e);
- block_group->total_bitmaps++;
- recalculate_thresholds(block_group);
- spin_unlock(&block_group->tree_lock);
+ spin_lock(&ctl->tree_lock);
+ ret2 = link_free_space(ctl, e);
+ ctl->total_bitmaps++;
+ ctl->op->recalc_thresholds(ctl);
+ spin_unlock(&ctl->tree_lock);
list_add_tail(&e->list, &bitmaps);
}
@@ -471,41 +453,97 @@ next:
index++;
}
- spin_lock(&block_group->tree_lock);
- if (block_group->free_space != (block_group->key.offset - used -
- block_group->bytes_super)) {
- spin_unlock(&block_group->tree_lock);
- printk(KERN_ERR "block group %llu has an wrong amount of free "
- "space\n", block_group->key.objectid);
- ret = 0;
- goto free_cache;
- }
- spin_unlock(&block_group->tree_lock);
-
ret = 1;
out:
kfree(checksums);
kfree(disk_crcs);
- iput(inode);
return ret;
-
free_cache:
- /* This cache is bogus, make sure it gets cleared */
+ __btrfs_remove_free_space_cache(ctl);
+ goto out;
+}
+
+int load_free_space_cache(struct btrfs_fs_info *fs_info,
+ struct btrfs_block_group_cache *block_group)
+{
+ struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
+ struct btrfs_root *root = fs_info->tree_root;
+ struct inode *inode;
+ struct btrfs_path *path;
+ int ret;
+ bool matched;
+ u64 used = btrfs_block_group_used(&block_group->item);
+
+ /*
+ * If we're unmounting then just return, since this does a search on the
+ * normal root and not the commit root and we could deadlock.
+ */
+ smp_mb();
+ if (fs_info->closing)
+ return 0;
+
+ /*
+ * If this block group has been marked to be cleared for one reason or
+ * another then we can't trust the on disk cache, so just return.
+ */
spin_lock(&block_group->lock);
- block_group->disk_cache_state = BTRFS_DC_CLEAR;
+ if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) {
+ spin_unlock(&block_group->lock);
+ return 0;
+ }
spin_unlock(&block_group->lock);
- btrfs_remove_free_space_cache(block_group);
- goto out;
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return 0;
+
+ inode = lookup_free_space_inode(root, block_group, path);
+ if (IS_ERR(inode)) {
+ btrfs_free_path(path);
+ return 0;
+ }
+
+ ret = __load_free_space_cache(fs_info->tree_root, inode, ctl,
+ path, block_group->key.objectid);
+ btrfs_free_path(path);
+ if (ret <= 0)
+ goto out;
+
+ spin_lock(&ctl->tree_lock);
+ matched = (ctl->free_space == (block_group->key.offset - used -
+ block_group->bytes_super));
+ spin_unlock(&ctl->tree_lock);
+
+ if (!matched) {
+ __btrfs_remove_free_space_cache(ctl);
+ printk(KERN_ERR "block group %llu has an wrong amount of free "
+ "space\n", block_group->key.objectid);
+ ret = -1;
+ }
+out:
+ if (ret < 0) {
+ /* This cache is bogus, make sure it gets cleared */
+ spin_lock(&block_group->lock);
+ block_group->disk_cache_state = BTRFS_DC_CLEAR;
+ spin_unlock(&block_group->lock);
+ ret = 0;
+
+ printk(KERN_ERR "btrfs: failed to load free space cache "
+ "for block group %llu\n", block_group->key.objectid);
+ }
+
+ iput(inode);
+ return ret;
}
-int btrfs_write_out_cache(struct btrfs_root *root,
- struct btrfs_trans_handle *trans,
- struct btrfs_block_group_cache *block_group,
- struct btrfs_path *path)
+int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
+ struct btrfs_free_space_ctl *ctl,
+ struct btrfs_block_group_cache *block_group,
+ struct btrfs_trans_handle *trans,
+ struct btrfs_path *path, u64 offset)
{
struct btrfs_free_space_header *header;
struct extent_buffer *leaf;
- struct inode *inode;
struct rb_node *node;
struct list_head *pos, *n;
struct page **pages;
@@ -522,35 +560,18 @@ int btrfs_write_out_cache(struct btrfs_root *root,
int index = 0, num_pages = 0;
int entries = 0;
int bitmaps = 0;
- int ret = 0;
+ int ret = -1;
bool next_page = false;
bool out_of_space = false;
- root = root->fs_info->tree_root;
-
INIT_LIST_HEAD(&bitmap_list);
- spin_lock(&block_group->lock);
- if (block_group->disk_cache_state < BTRFS_DC_SETUP) {
- spin_unlock(&block_group->lock);
- return 0;
- }
- spin_unlock(&block_group->lock);
-
- inode = lookup_free_space_inode(root, block_group, path);
- if (IS_ERR(inode))
- return 0;
-
- if (!i_size_read(inode)) {
- iput(inode);
+ node = rb_first(&ctl->free_space_offset);
+ if (!node)
return 0;
- }
- node = rb_first(&block_group->free_space_offset);
- if (!node) {
- iput(inode);
- return 0;
- }
+ if (!i_size_read(inode))
+ return -1;
num_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
PAGE_CACHE_SHIFT;
@@ -560,16 +581,13 @@ int btrfs_write_out_cache(struct btrfs_root *root,
/* We need a checksum per page. */
crc = checksums = kzalloc(sizeof(u32) * num_pages, GFP_NOFS);
- if (!crc) {
- iput(inode);
- return 0;
- }
+ if (!crc)
+ return -1;
pages = kzalloc(sizeof(struct page *) * num_pages, GFP_NOFS);
if (!pages) {
kfree(crc);
- iput(inode);
- return 0;
+ return -1;
}
/* Since the first page has all of our checksums and our generation we
@@ -579,7 +597,7 @@ int btrfs_write_out_cache(struct btrfs_root *root,
first_page_offset = (sizeof(u32) * num_pages) + sizeof(u64);
/* Get the cluster for this block_group if it exists */
- if (!list_empty(&block_group->cluster_list))
+ if (block_group && !list_empty(&block_group->cluster_list))
cluster = list_entry(block_group->cluster_list.next,
struct btrfs_free_cluster,
block_group_list);
@@ -621,7 +639,8 @@ int btrfs_write_out_cache(struct btrfs_root *root,
* When searching for pinned extents, we need to start at our start
* offset.
*/
- start = block_group->key.objectid;
+ if (block_group)
+ start = block_group->key.objectid;
/* Write out the extent entries */
do {
@@ -679,8 +698,9 @@ int btrfs_write_out_cache(struct btrfs_root *root,
* We want to add any pinned extents to our free space cache
* so we don't leak the space
*/
- while (!next_page && (start < block_group->key.objectid +
- block_group->key.offset)) {
+ while (block_group && !next_page &&
+ (start < block_group->key.objectid +
+ block_group->key.offset)) {
ret = find_first_extent_bit(unpin, start, &start, &end,
EXTENT_DIRTY);
if (ret) {
@@ -798,12 +818,12 @@ int btrfs_write_out_cache(struct btrfs_root *root,
filemap_write_and_wait(inode->i_mapping);
key.objectid = BTRFS_FREE_SPACE_OBJECTID;
- key.offset = block_group->key.objectid;
+ key.offset = offset;
key.type = 0;
ret = btrfs_search_slot(trans, root, &key, path, 1, 1);
if (ret < 0) {
- ret = 0;
+ ret = -1;
clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, bytes - 1,
EXTENT_DIRTY | EXTENT_DELALLOC |
EXTENT_DO_ACCOUNTING, 0, 0, NULL, GFP_NOFS);
@@ -816,13 +836,13 @@ int btrfs_write_out_cache(struct btrfs_root *root,
path->slots[0]--;
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
if (found_key.objectid != BTRFS_FREE_SPACE_OBJECTID ||
- found_key.offset != block_group->key.objectid) {
- ret = 0;
+ found_key.offset != offset) {
+ ret = -1;
clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, bytes - 1,
EXTENT_DIRTY | EXTENT_DELALLOC |
EXTENT_DO_ACCOUNTING, 0, 0, NULL,
GFP_NOFS);
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
goto out_free;
}
}
@@ -832,49 +852,83 @@ int btrfs_write_out_cache(struct btrfs_root *root,
btrfs_set_free_space_bitmaps(leaf, header, bitmaps);
btrfs_set_free_space_generation(leaf, header, trans->transid);
btrfs_mark_buffer_dirty(leaf);
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
ret = 1;
out_free:
- if (ret == 0) {
+ if (ret != 1) {
invalidate_inode_pages2_range(inode->i_mapping, 0, index);
- spin_lock(&block_group->lock);
- block_group->disk_cache_state = BTRFS_DC_ERROR;
- spin_unlock(&block_group->lock);
BTRFS_I(inode)->generation = 0;
}
kfree(checksums);
kfree(pages);
btrfs_update_inode(trans, root, inode);
+ return ret;
+}
+
+int btrfs_write_out_cache(struct btrfs_root *root,
+ struct btrfs_trans_handle *trans,
+ struct btrfs_block_group_cache *block_group,
+ struct btrfs_path *path)
+{
+ struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
+ struct inode *inode;
+ int ret = 0;
+
+ root = root->fs_info->tree_root;
+
+ spin_lock(&block_group->lock);
+ if (block_group->disk_cache_state < BTRFS_DC_SETUP) {
+ spin_unlock(&block_group->lock);
+ return 0;
+ }
+ spin_unlock(&block_group->lock);
+
+ inode = lookup_free_space_inode(root, block_group, path);
+ if (IS_ERR(inode))
+ return 0;
+
+ ret = __btrfs_write_out_cache(root, inode, ctl, block_group, trans,
+ path, block_group->key.objectid);
+ if (ret < 0) {
+ spin_lock(&block_group->lock);
+ block_group->disk_cache_state = BTRFS_DC_ERROR;
+ spin_unlock(&block_group->lock);
+ ret = 0;
+
+ printk(KERN_ERR "btrfs: failed to write free space cace "
+ "for block group %llu\n", block_group->key.objectid);
+ }
+
iput(inode);
return ret;
}
-static inline unsigned long offset_to_bit(u64 bitmap_start, u64 sectorsize,
+static inline unsigned long offset_to_bit(u64 bitmap_start, u32 unit,
u64 offset)
{
BUG_ON(offset < bitmap_start);
offset -= bitmap_start;
- return (unsigned long)(div64_u64(offset, sectorsize));
+ return (unsigned long)(div_u64(offset, unit));
}
-static inline unsigned long bytes_to_bits(u64 bytes, u64 sectorsize)
+static inline unsigned long bytes_to_bits(u64 bytes, u32 unit)
{
- return (unsigned long)(div64_u64(bytes, sectorsize));
+ return (unsigned long)(div_u64(bytes, unit));
}
-static inline u64 offset_to_bitmap(struct btrfs_block_group_cache *block_group,
+static inline u64 offset_to_bitmap(struct btrfs_free_space_ctl *ctl,
u64 offset)
{
u64 bitmap_start;
u64 bytes_per_bitmap;
- bytes_per_bitmap = BITS_PER_BITMAP * block_group->sectorsize;
- bitmap_start = offset - block_group->key.objectid;
+ bytes_per_bitmap = BITS_PER_BITMAP * ctl->unit;
+ bitmap_start = offset - ctl->start;
bitmap_start = div64_u64(bitmap_start, bytes_per_bitmap);
bitmap_start *= bytes_per_bitmap;
- bitmap_start += block_group->key.objectid;
+ bitmap_start += ctl->start;
return bitmap_start;
}
@@ -932,10 +986,10 @@ static int tree_insert_offset(struct rb_root *root, u64 offset,
* offset.
*/
static struct btrfs_free_space *
-tree_search_offset(struct btrfs_block_group_cache *block_group,
+tree_search_offset(struct btrfs_free_space_ctl *ctl,
u64 offset, int bitmap_only, int fuzzy)
{
- struct rb_node *n = block_group->free_space_offset.rb_node;
+ struct rb_node *n = ctl->free_space_offset.rb_node;
struct btrfs_free_space *entry, *prev = NULL;
/* find entry that is closest to the 'offset' */
@@ -1031,8 +1085,7 @@ tree_search_offset(struct btrfs_block_group_cache *block_group,
break;
}
}
- if (entry->offset + BITS_PER_BITMAP *
- block_group->sectorsize > offset)
+ if (entry->offset + BITS_PER_BITMAP * ctl->unit > offset)
return entry;
} else if (entry->offset + entry->bytes > offset)
return entry;
@@ -1043,7 +1096,7 @@ tree_search_offset(struct btrfs_block_group_cache *block_group,
while (1) {
if (entry->bitmap) {
if (entry->offset + BITS_PER_BITMAP *
- block_group->sectorsize > offset)
+ ctl->unit > offset)
break;
} else {
if (entry->offset + entry->bytes > offset)
@@ -1059,42 +1112,47 @@ tree_search_offset(struct btrfs_block_group_cache *block_group,
}
static inline void
-__unlink_free_space(struct btrfs_block_group_cache *block_group,
+__unlink_free_space(struct btrfs_free_space_ctl *ctl,
struct btrfs_free_space *info)
{
- rb_erase(&info->offset_index, &block_group->free_space_offset);
- block_group->free_extents--;
+ rb_erase(&info->offset_index, &ctl->free_space_offset);
+ ctl->free_extents--;
}
-static void unlink_free_space(struct btrfs_block_group_cache *block_group,
+static void unlink_free_space(struct btrfs_free_space_ctl *ctl,
struct btrfs_free_space *info)
{
- __unlink_free_space(block_group, info);
- block_group->free_space -= info->bytes;
+ __unlink_free_space(ctl, info);
+ ctl->free_space -= info->bytes;
}
-static int link_free_space(struct btrfs_block_group_cache *block_group,
+static int link_free_space(struct btrfs_free_space_ctl *ctl,
struct btrfs_free_space *info)
{
int ret = 0;
BUG_ON(!info->bitmap && !info->bytes);
- ret = tree_insert_offset(&block_group->free_space_offset, info->offset,
+ ret = tree_insert_offset(&ctl->free_space_offset, info->offset,
&info->offset_index, (info->bitmap != NULL));
if (ret)
return ret;
- block_group->free_space += info->bytes;
- block_group->free_extents++;
+ ctl->free_space += info->bytes;
+ ctl->free_extents++;
return ret;
}
-static void recalculate_thresholds(struct btrfs_block_group_cache *block_group)
+static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
{
+ struct btrfs_block_group_cache *block_group = ctl->private;
u64 max_bytes;
u64 bitmap_bytes;
u64 extent_bytes;
u64 size = block_group->key.offset;
+ u64 bytes_per_bg = BITS_PER_BITMAP * block_group->sectorsize;
+ int max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg);
+
+ BUG_ON(ctl->total_bitmaps > max_bitmaps);
/*
* The goal is to keep the total amount of memory used per 1gb of space
@@ -1112,10 +1170,10 @@ static void recalculate_thresholds(struct btrfs_block_group_cache *block_group)
* sure we don't go over our overall goal of MAX_CACHE_BYTES_PER_GIG as
* we add more bitmaps.
*/
- bitmap_bytes = (block_group->total_bitmaps + 1) * PAGE_CACHE_SIZE;
+ bitmap_bytes = (ctl->total_bitmaps + 1) * PAGE_CACHE_SIZE;
if (bitmap_bytes >= max_bytes) {
- block_group->extents_thresh = 0;
+ ctl->extents_thresh = 0;
return;
}
@@ -1126,47 +1184,43 @@ static void recalculate_thresholds(struct btrfs_block_group_cache *block_group)
extent_bytes = max_bytes - bitmap_bytes;
extent_bytes = min_t(u64, extent_bytes, div64_u64(max_bytes, 2));
- block_group->extents_thresh =
+ ctl->extents_thresh =
div64_u64(extent_bytes, (sizeof(struct btrfs_free_space)));
}
-static void bitmap_clear_bits(struct btrfs_block_group_cache *block_group,
+static void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
struct btrfs_free_space *info, u64 offset,
u64 bytes)
{
- unsigned long start, end;
- unsigned long i;
+ unsigned long start, count;
- start = offset_to_bit(info->offset, block_group->sectorsize, offset);
- end = start + bytes_to_bits(bytes, block_group->sectorsize);
- BUG_ON(end > BITS_PER_BITMAP);
+ start = offset_to_bit(info->offset, ctl->unit, offset);
+ count = bytes_to_bits(bytes, ctl->unit);
+ BUG_ON(start + count > BITS_PER_BITMAP);
- for (i = start; i < end; i++)
- clear_bit(i, info->bitmap);
+ bitmap_clear(info->bitmap, start, count);
info->bytes -= bytes;
- block_group->free_space -= bytes;
+ ctl->free_space -= bytes;
}
-static void bitmap_set_bits(struct btrfs_block_group_cache *block_group,
+static void bitmap_set_bits(struct btrfs_free_space_ctl *ctl,
struct btrfs_free_space *info, u64 offset,
u64 bytes)
{
- unsigned long start, end;
- unsigned long i;
+ unsigned long start, count;
- start = offset_to_bit(info->offset, block_group->sectorsize, offset);
- end = start + bytes_to_bits(bytes, block_group->sectorsize);
- BUG_ON(end > BITS_PER_BITMAP);
+ start = offset_to_bit(info->offset, ctl->unit, offset);
+ count = bytes_to_bits(bytes, ctl->unit);
+ BUG_ON(start + count > BITS_PER_BITMAP);
- for (i = start; i < end; i++)
- set_bit(i, info->bitmap);
+ bitmap_set(info->bitmap, start, count);
info->bytes += bytes;
- block_group->free_space += bytes;
+ ctl->free_space += bytes;
}
-static int search_bitmap(struct btrfs_block_group_cache *block_group,
+static int search_bitmap(struct btrfs_free_space_ctl *ctl,
struct btrfs_free_space *bitmap_info, u64 *offset,
u64 *bytes)
{
@@ -1174,9 +1228,9 @@ static int search_bitmap(struct btrfs_block_group_cache *block_group,
unsigned long bits, i;
unsigned long next_zero;
- i = offset_to_bit(bitmap_info->offset, block_group->sectorsize,
+ i = offset_to_bit(bitmap_info->offset, ctl->unit,
max_t(u64, *offset, bitmap_info->offset));
- bits = bytes_to_bits(*bytes, block_group->sectorsize);
+ bits = bytes_to_bits(*bytes, ctl->unit);
for (i = find_next_bit(bitmap_info->bitmap, BITS_PER_BITMAP, i);
i < BITS_PER_BITMAP;
@@ -1191,29 +1245,25 @@ static int search_bitmap(struct btrfs_block_group_cache *block_group,
}
if (found_bits) {
- *offset = (u64)(i * block_group->sectorsize) +
- bitmap_info->offset;
- *bytes = (u64)(found_bits) * block_group->sectorsize;
+ *offset = (u64)(i * ctl->unit) + bitmap_info->offset;
+ *bytes = (u64)(found_bits) * ctl->unit;
return 0;
}
return -1;
}
-static struct btrfs_free_space *find_free_space(struct btrfs_block_group_cache
- *block_group, u64 *offset,
- u64 *bytes, int debug)
+static struct btrfs_free_space *
+find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes)
{
struct btrfs_free_space *entry;
struct rb_node *node;
int ret;
- if (!block_group->free_space_offset.rb_node)
+ if (!ctl->free_space_offset.rb_node)
return NULL;
- entry = tree_search_offset(block_group,
- offset_to_bitmap(block_group, *offset),
- 0, 1);
+ entry = tree_search_offset(ctl, offset_to_bitmap(ctl, *offset), 0, 1);
if (!entry)
return NULL;
@@ -1223,7 +1273,7 @@ static struct btrfs_free_space *find_free_space(struct btrfs_block_group_cache
continue;
if (entry->bitmap) {
- ret = search_bitmap(block_group, entry, offset, bytes);
+ ret = search_bitmap(ctl, entry, offset, bytes);
if (!ret)
return entry;
continue;
@@ -1237,33 +1287,28 @@ static struct btrfs_free_space *find_free_space(struct btrfs_block_group_cache
return NULL;
}
-static void add_new_bitmap(struct btrfs_block_group_cache *block_group,
+static void add_new_bitmap(struct btrfs_free_space_ctl *ctl,
struct btrfs_free_space *info, u64 offset)
{
- u64 bytes_per_bg = BITS_PER_BITMAP * block_group->sectorsize;
- int max_bitmaps = (int)div64_u64(block_group->key.offset +
- bytes_per_bg - 1, bytes_per_bg);
- BUG_ON(block_group->total_bitmaps >= max_bitmaps);
-
- info->offset = offset_to_bitmap(block_group, offset);
+ info->offset = offset_to_bitmap(ctl, offset);
info->bytes = 0;
- link_free_space(block_group, info);
- block_group->total_bitmaps++;
+ link_free_space(ctl, info);
+ ctl->total_bitmaps++;
- recalculate_thresholds(block_group);
+ ctl->op->recalc_thresholds(ctl);
}
-static void free_bitmap(struct btrfs_block_group_cache *block_group,
+static void free_bitmap(struct btrfs_free_space_ctl *ctl,
struct btrfs_free_space *bitmap_info)
{
- unlink_free_space(block_group, bitmap_info);
+ unlink_free_space(ctl, bitmap_info);
kfree(bitmap_info->bitmap);
kmem_cache_free(btrfs_free_space_cachep, bitmap_info);
- block_group->total_bitmaps--;
- recalculate_thresholds(block_group);
+ ctl->total_bitmaps--;
+ ctl->op->recalc_thresholds(ctl);
}
-static noinline int remove_from_bitmap(struct btrfs_block_group_cache *block_group,
+static noinline int remove_from_bitmap(struct btrfs_free_space_ctl *ctl,
struct btrfs_free_space *bitmap_info,
u64 *offset, u64 *bytes)
{
@@ -1272,8 +1317,7 @@ static noinline int remove_from_bitmap(struct btrfs_block_group_cache *block_gro
int ret;
again:
- end = bitmap_info->offset +
- (u64)(BITS_PER_BITMAP * block_group->sectorsize) - 1;
+ end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit) - 1;
/*
* XXX - this can go away after a few releases.
@@ -1288,24 +1332,22 @@ again:
search_start = *offset;
search_bytes = *bytes;
search_bytes = min(search_bytes, end - search_start + 1);
- ret = search_bitmap(block_group, bitmap_info, &search_start,
- &search_bytes);
+ ret = search_bitmap(ctl, bitmap_info, &search_start, &search_bytes);
BUG_ON(ret < 0 || search_start != *offset);
if (*offset > bitmap_info->offset && *offset + *bytes > end) {
- bitmap_clear_bits(block_group, bitmap_info, *offset,
- end - *offset + 1);
+ bitmap_clear_bits(ctl, bitmap_info, *offset, end - *offset + 1);
*bytes -= end - *offset + 1;
*offset = end + 1;
} else if (*offset >= bitmap_info->offset && *offset + *bytes <= end) {
- bitmap_clear_bits(block_group, bitmap_info, *offset, *bytes);
+ bitmap_clear_bits(ctl, bitmap_info, *offset, *bytes);
*bytes = 0;
}
if (*bytes) {
struct rb_node *next = rb_next(&bitmap_info->offset_index);
if (!bitmap_info->bytes)
- free_bitmap(block_group, bitmap_info);
+ free_bitmap(ctl, bitmap_info);
/*
* no entry after this bitmap, but we still have bytes to
@@ -1332,31 +1374,28 @@ again:
*/
search_start = *offset;
search_bytes = *bytes;
- ret = search_bitmap(block_group, bitmap_info, &search_start,
+ ret = search_bitmap(ctl, bitmap_info, &search_start,
&search_bytes);
if (ret < 0 || search_start != *offset)
return -EAGAIN;
goto again;
} else if (!bitmap_info->bytes)
- free_bitmap(block_group, bitmap_info);
+ free_bitmap(ctl, bitmap_info);
return 0;
}
-static int insert_into_bitmap(struct btrfs_block_group_cache *block_group,
- struct btrfs_free_space *info)
+static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
+ struct btrfs_free_space *info)
{
- struct btrfs_free_space *bitmap_info;
- int added = 0;
- u64 bytes, offset, end;
- int ret;
+ struct btrfs_block_group_cache *block_group = ctl->private;
/*
* If we are below the extents threshold then we can add this as an
* extent, and don't have to deal with the bitmap
*/
- if (block_group->free_extents < block_group->extents_thresh) {
+ if (ctl->free_extents < ctl->extents_thresh) {
/*
* If this block group has some small extents we don't want to
* use up all of our free slots in the cache with them, we want
@@ -1365,11 +1404,10 @@ static int insert_into_bitmap(struct btrfs_block_group_cache *block_group,
* the overhead of a bitmap if we don't have to.
*/
if (info->bytes <= block_group->sectorsize * 4) {
- if (block_group->free_extents * 2 <=
- block_group->extents_thresh)
- return 0;
+ if (ctl->free_extents * 2 <= ctl->extents_thresh)
+ return false;
} else {
- return 0;
+ return false;
}
}
@@ -1379,31 +1417,42 @@ static int insert_into_bitmap(struct btrfs_block_group_cache *block_group,
*/
if (BITS_PER_BITMAP * block_group->sectorsize >
block_group->key.offset)
- return 0;
+ return false;
+
+ return true;
+}
+
+static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl,
+ struct btrfs_free_space *info)
+{
+ struct btrfs_free_space *bitmap_info;
+ int added = 0;
+ u64 bytes, offset, end;
+ int ret;
bytes = info->bytes;
offset = info->offset;
+ if (!ctl->op->use_bitmap(ctl, info))
+ return 0;
+
again:
- bitmap_info = tree_search_offset(block_group,
- offset_to_bitmap(block_group, offset),
+ bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
1, 0);
if (!bitmap_info) {
BUG_ON(added);
goto new_bitmap;
}
- end = bitmap_info->offset +
- (u64)(BITS_PER_BITMAP * block_group->sectorsize);
+ end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit);
if (offset >= bitmap_info->offset && offset + bytes > end) {
- bitmap_set_bits(block_group, bitmap_info, offset,
- end - offset);
+ bitmap_set_bits(ctl, bitmap_info, offset, end - offset);
bytes -= end - offset;
offset = end;
added = 0;
} else if (offset >= bitmap_info->offset && offset + bytes <= end) {
- bitmap_set_bits(block_group, bitmap_info, offset, bytes);
+ bitmap_set_bits(ctl, bitmap_info, offset, bytes);
bytes = 0;
} else {
BUG();
@@ -1417,19 +1466,19 @@ again:
new_bitmap:
if (info && info->bitmap) {
- add_new_bitmap(block_group, info, offset);
+ add_new_bitmap(ctl, info, offset);
added = 1;
info = NULL;
goto again;
} else {
- spin_unlock(&block_group->tree_lock);
+ spin_unlock(&ctl->tree_lock);
/* no pre-allocated info, allocate a new one */
if (!info) {
info = kmem_cache_zalloc(btrfs_free_space_cachep,
GFP_NOFS);
if (!info) {
- spin_lock(&block_group->tree_lock);
+ spin_lock(&ctl->tree_lock);
ret = -ENOMEM;
goto out;
}
@@ -1437,7 +1486,7 @@ new_bitmap:
/* allocate the bitmap */
info->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
- spin_lock(&block_group->tree_lock);
+ spin_lock(&ctl->tree_lock);
if (!info->bitmap) {
ret = -ENOMEM;
goto out;
@@ -1455,7 +1504,7 @@ out:
return ret;
}
-bool try_merge_free_space(struct btrfs_block_group_cache *block_group,
+static bool try_merge_free_space(struct btrfs_free_space_ctl *ctl,
struct btrfs_free_space *info, bool update_stat)
{
struct btrfs_free_space *left_info;
@@ -1469,18 +1518,18 @@ bool try_merge_free_space(struct btrfs_block_group_cache *block_group,
* are adding, if there is remove that struct and add a new one to
* cover the entire range
*/
- right_info = tree_search_offset(block_group, offset + bytes, 0, 0);
+ right_info = tree_search_offset(ctl, offset + bytes, 0, 0);
if (right_info && rb_prev(&right_info->offset_index))
left_info = rb_entry(rb_prev(&right_info->offset_index),
struct btrfs_free_space, offset_index);
else
- left_info = tree_search_offset(block_group, offset - 1, 0, 0);
+ left_info = tree_search_offset(ctl, offset - 1, 0, 0);
if (right_info && !right_info->bitmap) {
if (update_stat)
- unlink_free_space(block_group, right_info);
+ unlink_free_space(ctl, right_info);
else
- __unlink_free_space(block_group, right_info);
+ __unlink_free_space(ctl, right_info);
info->bytes += right_info->bytes;
kmem_cache_free(btrfs_free_space_cachep, right_info);
merged = true;
@@ -1489,9 +1538,9 @@ bool try_merge_free_space(struct btrfs_block_group_cache *block_group,
if (left_info && !left_info->bitmap &&
left_info->offset + left_info->bytes == offset) {
if (update_stat)
- unlink_free_space(block_group, left_info);
+ unlink_free_space(ctl, left_info);
else
- __unlink_free_space(block_group, left_info);
+ __unlink_free_space(ctl, left_info);
info->offset = left_info->offset;
info->bytes += left_info->bytes;
kmem_cache_free(btrfs_free_space_cachep, left_info);
@@ -1501,8 +1550,8 @@ bool try_merge_free_space(struct btrfs_block_group_cache *block_group,
return merged;
}
-int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
- u64 offset, u64 bytes)
+int __btrfs_add_free_space(struct btrfs_free_space_ctl *ctl,
+ u64 offset, u64 bytes)
{
struct btrfs_free_space *info;
int ret = 0;
@@ -1514,9 +1563,9 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
info->offset = offset;
info->bytes = bytes;
- spin_lock(&block_group->tree_lock);
+ spin_lock(&ctl->tree_lock);
- if (try_merge_free_space(block_group, info, true))
+ if (try_merge_free_space(ctl, info, true))
goto link;
/*
@@ -1524,7 +1573,7 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
* extent then we know we're going to have to allocate a new extent, so
* before we do that see if we need to drop this into a bitmap
*/
- ret = insert_into_bitmap(block_group, info);
+ ret = insert_into_bitmap(ctl, info);
if (ret < 0) {
goto out;
} else if (ret) {
@@ -1532,11 +1581,11 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
goto out;
}
link:
- ret = link_free_space(block_group, info);
+ ret = link_free_space(ctl, info);
if (ret)
kmem_cache_free(btrfs_free_space_cachep, info);
out:
- spin_unlock(&block_group->tree_lock);
+ spin_unlock(&ctl->tree_lock);
if (ret) {
printk(KERN_CRIT "btrfs: unable to add free space :%d\n", ret);
@@ -1549,21 +1598,21 @@ out:
int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
u64 offset, u64 bytes)
{
+ struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
struct btrfs_free_space *info;
struct btrfs_free_space *next_info = NULL;
int ret = 0;
- spin_lock(&block_group->tree_lock);
+ spin_lock(&ctl->tree_lock);
again:
- info = tree_search_offset(block_group, offset, 0, 0);
+ info = tree_search_offset(ctl, offset, 0, 0);
if (!info) {
/*
* oops didn't find an extent that matched the space we wanted
* to remove, look for a bitmap instead
*/
- info = tree_search_offset(block_group,
- offset_to_bitmap(block_group, offset),
+ info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
1, 0);
if (!info) {
WARN_ON(1);
@@ -1578,8 +1627,8 @@ again:
offset_index);
if (next_info->bitmap)
- end = next_info->offset + BITS_PER_BITMAP *
- block_group->sectorsize - 1;
+ end = next_info->offset +
+ BITS_PER_BITMAP * ctl->unit - 1;
else
end = next_info->offset + next_info->bytes;
@@ -1599,20 +1648,20 @@ again:
}
if (info->bytes == bytes) {
- unlink_free_space(block_group, info);
+ unlink_free_space(ctl, info);
if (info->bitmap) {
kfree(info->bitmap);
- block_group->total_bitmaps--;
+ ctl->total_bitmaps--;
}
kmem_cache_free(btrfs_free_space_cachep, info);
goto out_lock;
}
if (!info->bitmap && info->offset == offset) {
- unlink_free_space(block_group, info);
+ unlink_free_space(ctl, info);
info->offset += bytes;
info->bytes -= bytes;
- link_free_space(block_group, info);
+ link_free_space(ctl, info);
goto out_lock;
}
@@ -1626,13 +1675,13 @@ again:
* first unlink the old info and then
* insert it again after the hole we're creating
*/
- unlink_free_space(block_group, info);
+ unlink_free_space(ctl, info);
if (offset + bytes < info->offset + info->bytes) {
u64 old_end = info->offset + info->bytes;
info->offset = offset + bytes;
info->bytes = old_end - info->offset;
- ret = link_free_space(block_group, info);
+ ret = link_free_space(ctl, info);
WARN_ON(ret);
if (ret)
goto out_lock;
@@ -1642,7 +1691,7 @@ again:
*/
kmem_cache_free(btrfs_free_space_cachep, info);
}
- spin_unlock(&block_group->tree_lock);
+ spin_unlock(&ctl->tree_lock);
/* step two, insert a new info struct to cover
* anything before the hole
@@ -1653,12 +1702,12 @@ again:
goto out;
}
- ret = remove_from_bitmap(block_group, info, &offset, &bytes);
+ ret = remove_from_bitmap(ctl, info, &offset, &bytes);
if (ret == -EAGAIN)
goto again;
BUG_ON(ret);
out_lock:
- spin_unlock(&block_group->tree_lock);
+ spin_unlock(&ctl->tree_lock);
out:
return ret;
}
@@ -1666,11 +1715,12 @@ out:
void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
u64 bytes)
{
+ struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
struct btrfs_free_space *info;
struct rb_node *n;
int count = 0;
- for (n = rb_first(&block_group->free_space_offset); n; n = rb_next(n)) {
+ for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) {
info = rb_entry(n, struct btrfs_free_space, offset_index);
if (info->bytes >= bytes)
count++;
@@ -1685,19 +1735,28 @@ void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
"\n", count);
}
-u64 btrfs_block_group_free_space(struct btrfs_block_group_cache *block_group)
+static struct btrfs_free_space_op free_space_op = {
+ .recalc_thresholds = recalculate_thresholds,
+ .use_bitmap = use_bitmap,
+};
+
+void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group)
{
- struct btrfs_free_space *info;
- struct rb_node *n;
- u64 ret = 0;
+ struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
- for (n = rb_first(&block_group->free_space_offset); n;
- n = rb_next(n)) {
- info = rb_entry(n, struct btrfs_free_space, offset_index);
- ret += info->bytes;
- }
+ spin_lock_init(&ctl->tree_lock);
+ ctl->unit = block_group->sectorsize;
+ ctl->start = block_group->key.objectid;
+ ctl->private = block_group;
+ ctl->op = &free_space_op;
- return ret;
+ /*
+ * we only want to have 32k of ram per block group for keeping
+ * track of free space, and if we pass 1/2 of that we want to
+ * start converting things over to using bitmaps
+ */
+ ctl->extents_thresh = ((1024 * 32) / 2) /
+ sizeof(struct btrfs_free_space);
}
/*
@@ -1711,6 +1770,7 @@ __btrfs_return_cluster_to_free_space(
struct btrfs_block_group_cache *block_group,
struct btrfs_free_cluster *cluster)
{
+ struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
struct btrfs_free_space *entry;
struct rb_node *node;
@@ -1732,8 +1792,8 @@ __btrfs_return_cluster_to_free_space(
bitmap = (entry->bitmap != NULL);
if (!bitmap)
- try_merge_free_space(block_group, entry, false);
- tree_insert_offset(&block_group->free_space_offset,
+ try_merge_free_space(ctl, entry, false);
+ tree_insert_offset(&ctl->free_space_offset,
entry->offset, &entry->offset_index, bitmap);
}
cluster->root = RB_ROOT;
@@ -1744,14 +1804,38 @@ out:
return 0;
}
-void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
+void __btrfs_remove_free_space_cache_locked(struct btrfs_free_space_ctl *ctl)
{
struct btrfs_free_space *info;
struct rb_node *node;
+
+ while ((node = rb_last(&ctl->free_space_offset)) != NULL) {
+ info = rb_entry(node, struct btrfs_free_space, offset_index);
+ unlink_free_space(ctl, info);
+ kfree(info->bitmap);
+ kmem_cache_free(btrfs_free_space_cachep, info);
+ if (need_resched()) {
+ spin_unlock(&ctl->tree_lock);
+ cond_resched();
+ spin_lock(&ctl->tree_lock);
+ }
+ }
+}
+
+void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl)
+{
+ spin_lock(&ctl->tree_lock);
+ __btrfs_remove_free_space_cache_locked(ctl);
+ spin_unlock(&ctl->tree_lock);
+}
+
+void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
+{
+ struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
struct btrfs_free_cluster *cluster;
struct list_head *head;
- spin_lock(&block_group->tree_lock);
+ spin_lock(&ctl->tree_lock);
while ((head = block_group->cluster_list.next) !=
&block_group->cluster_list) {
cluster = list_entry(head, struct btrfs_free_cluster,
@@ -1760,60 +1844,46 @@ void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
WARN_ON(cluster->block_group != block_group);
__btrfs_return_cluster_to_free_space(block_group, cluster);
if (need_resched()) {
- spin_unlock(&block_group->tree_lock);
+ spin_unlock(&ctl->tree_lock);
cond_resched();
- spin_lock(&block_group->tree_lock);
+ spin_lock(&ctl->tree_lock);
}
}
+ __btrfs_remove_free_space_cache_locked(ctl);
+ spin_unlock(&ctl->tree_lock);
- while ((node = rb_last(&block_group->free_space_offset)) != NULL) {
- info = rb_entry(node, struct btrfs_free_space, offset_index);
- if (!info->bitmap) {
- unlink_free_space(block_group, info);
- kmem_cache_free(btrfs_free_space_cachep, info);
- } else {
- free_bitmap(block_group, info);
- }
-
- if (need_resched()) {
- spin_unlock(&block_group->tree_lock);
- cond_resched();
- spin_lock(&block_group->tree_lock);
- }
- }
-
- spin_unlock(&block_group->tree_lock);
}
u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
u64 offset, u64 bytes, u64 empty_size)
{
+ struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
struct btrfs_free_space *entry = NULL;
u64 bytes_search = bytes + empty_size;
u64 ret = 0;
- spin_lock(&block_group->tree_lock);
- entry = find_free_space(block_group, &offset, &bytes_search, 0);
+ spin_lock(&ctl->tree_lock);
+ entry = find_free_space(ctl, &offset, &bytes_search);
if (!entry)
goto out;
ret = offset;
if (entry->bitmap) {
- bitmap_clear_bits(block_group, entry, offset, bytes);
+ bitmap_clear_bits(ctl, entry, offset, bytes);
if (!entry->bytes)
- free_bitmap(block_group, entry);
+ free_bitmap(ctl, entry);
} else {
- unlink_free_space(block_group, entry);
+ unlink_free_space(ctl, entry);
entry->offset += bytes;
entry->bytes -= bytes;
if (!entry->bytes)
kmem_cache_free(btrfs_free_space_cachep, entry);
else
- link_free_space(block_group, entry);
+ link_free_space(ctl, entry);
}
out:
- spin_unlock(&block_group->tree_lock);
+ spin_unlock(&ctl->tree_lock);
return ret;
}
@@ -1830,6 +1900,7 @@ int btrfs_return_cluster_to_free_space(
struct btrfs_block_group_cache *block_group,
struct btrfs_free_cluster *cluster)
{
+ struct btrfs_free_space_ctl *ctl;
int ret;
/* first, get a safe pointer to the block group */
@@ -1848,10 +1919,12 @@ int btrfs_return_cluster_to_free_space(
atomic_inc(&block_group->count);
spin_unlock(&cluster->lock);
+ ctl = block_group->free_space_ctl;
+
/* now return any extents the cluster had on it */
- spin_lock(&block_group->tree_lock);
+ spin_lock(&ctl->tree_lock);
ret = __btrfs_return_cluster_to_free_space(block_group, cluster);
- spin_unlock(&block_group->tree_lock);
+ spin_unlock(&ctl->tree_lock);
/* finally drop our ref */
btrfs_put_block_group(block_group);
@@ -1863,6 +1936,7 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group,
struct btrfs_free_space *entry,
u64 bytes, u64 min_start)
{
+ struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
int err;
u64 search_start = cluster->window_start;
u64 search_bytes = bytes;
@@ -1871,13 +1945,12 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group,
search_start = min_start;
search_bytes = bytes;
- err = search_bitmap(block_group, entry, &search_start,
- &search_bytes);
+ err = search_bitmap(ctl, entry, &search_start, &search_bytes);
if (err)
return 0;
ret = search_start;
- bitmap_clear_bits(block_group, entry, ret, bytes);
+ bitmap_clear_bits(ctl, entry, ret, bytes);
return ret;
}
@@ -1891,6 +1964,7 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
struct btrfs_free_cluster *cluster, u64 bytes,
u64 min_start)
{
+ struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
struct btrfs_free_space *entry = NULL;
struct rb_node *node;
u64 ret = 0;
@@ -1910,8 +1984,6 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
while(1) {
if (entry->bytes < bytes ||
(!entry->bitmap && entry->offset < min_start)) {
- struct rb_node *node;
-
node = rb_next(&entry->offset_index);
if (!node)
break;
@@ -1925,7 +1997,6 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
cluster, entry, bytes,
min_start);
if (ret == 0) {
- struct rb_node *node;
node = rb_next(&entry->offset_index);
if (!node)
break;
@@ -1951,20 +2022,20 @@ out:
if (!ret)
return 0;
- spin_lock(&block_group->tree_lock);
+ spin_lock(&ctl->tree_lock);
- block_group->free_space -= bytes;
+ ctl->free_space -= bytes;
if (entry->bytes == 0) {
- block_group->free_extents--;
+ ctl->free_extents--;
if (entry->bitmap) {
kfree(entry->bitmap);
- block_group->total_bitmaps--;
- recalculate_thresholds(block_group);
+ ctl->total_bitmaps--;
+ ctl->op->recalc_thresholds(ctl);
}
kmem_cache_free(btrfs_free_space_cachep, entry);
}
- spin_unlock(&block_group->tree_lock);
+ spin_unlock(&ctl->tree_lock);
return ret;
}
@@ -1974,6 +2045,7 @@ static int btrfs_bitmap_cluster(struct btrfs_block_group_cache *block_group,
struct btrfs_free_cluster *cluster,
u64 offset, u64 bytes, u64 min_bytes)
{
+ struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
unsigned long next_zero;
unsigned long i;
unsigned long search_bits;
@@ -2028,7 +2100,7 @@ again:
cluster->window_start = start * block_group->sectorsize +
entry->offset;
- rb_erase(&entry->offset_index, &block_group->free_space_offset);
+ rb_erase(&entry->offset_index, &ctl->free_space_offset);
ret = tree_insert_offset(&cluster->root, entry->offset,
&entry->offset_index, 1);
BUG_ON(ret);
@@ -2043,6 +2115,7 @@ static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
struct btrfs_free_cluster *cluster,
u64 offset, u64 bytes, u64 min_bytes)
{
+ struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
struct btrfs_free_space *first = NULL;
struct btrfs_free_space *entry = NULL;
struct btrfs_free_space *prev = NULL;
@@ -2053,7 +2126,7 @@ static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
u64 max_extent;
u64 max_gap = 128 * 1024;
- entry = tree_search_offset(block_group, offset, 0, 1);
+ entry = tree_search_offset(ctl, offset, 0, 1);
if (!entry)
return -ENOSPC;
@@ -2119,7 +2192,7 @@ static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
if (entry->bitmap)
continue;
- rb_erase(&entry->offset_index, &block_group->free_space_offset);
+ rb_erase(&entry->offset_index, &ctl->free_space_offset);
ret = tree_insert_offset(&cluster->root, entry->offset,
&entry->offset_index, 0);
BUG_ON(ret);
@@ -2138,16 +2211,15 @@ static int setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
struct btrfs_free_cluster *cluster,
u64 offset, u64 bytes, u64 min_bytes)
{
+ struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
struct btrfs_free_space *entry;
struct rb_node *node;
int ret = -ENOSPC;
- if (block_group->total_bitmaps == 0)
+ if (ctl->total_bitmaps == 0)
return -ENOSPC;
- entry = tree_search_offset(block_group,
- offset_to_bitmap(block_group, offset),
- 0, 1);
+ entry = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), 0, 1);
if (!entry)
return -ENOSPC;
@@ -2180,6 +2252,7 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
struct btrfs_free_cluster *cluster,
u64 offset, u64 bytes, u64 empty_size)
{
+ struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
u64 min_bytes;
int ret;
@@ -2199,14 +2272,14 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
} else
min_bytes = max(bytes, (bytes + empty_size) >> 2);
- spin_lock(&block_group->tree_lock);
+ spin_lock(&ctl->tree_lock);
/*
* If we know we don't have enough space to make a cluster don't even
* bother doing all the work to try and find one.
*/
- if (block_group->free_space < min_bytes) {
- spin_unlock(&block_group->tree_lock);
+ if (ctl->free_space < min_bytes) {
+ spin_unlock(&ctl->tree_lock);
return -ENOSPC;
}
@@ -2232,7 +2305,7 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
}
out:
spin_unlock(&cluster->lock);
- spin_unlock(&block_group->tree_lock);
+ spin_unlock(&ctl->tree_lock);
return ret;
}
@@ -2253,6 +2326,7 @@ void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster)
int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group,
u64 *trimmed, u64 start, u64 end, u64 minlen)
{
+ struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
struct btrfs_free_space *entry = NULL;
struct btrfs_fs_info *fs_info = block_group->fs_info;
u64 bytes = 0;
@@ -2262,52 +2336,50 @@ int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group,
*trimmed = 0;
while (start < end) {
- spin_lock(&block_group->tree_lock);
+ spin_lock(&ctl->tree_lock);
- if (block_group->free_space < minlen) {
- spin_unlock(&block_group->tree_lock);
+ if (ctl->free_space < minlen) {
+ spin_unlock(&ctl->tree_lock);
break;
}
- entry = tree_search_offset(block_group, start, 0, 1);
+ entry = tree_search_offset(ctl, start, 0, 1);
if (!entry)
- entry = tree_search_offset(block_group,
- offset_to_bitmap(block_group,
- start),
+ entry = tree_search_offset(ctl,
+ offset_to_bitmap(ctl, start),
1, 1);
if (!entry || entry->offset >= end) {
- spin_unlock(&block_group->tree_lock);
+ spin_unlock(&ctl->tree_lock);
break;
}
if (entry->bitmap) {
- ret = search_bitmap(block_group, entry, &start, &bytes);
+ ret = search_bitmap(ctl, entry, &start, &bytes);
if (!ret) {
if (start >= end) {
- spin_unlock(&block_group->tree_lock);
+ spin_unlock(&ctl->tree_lock);
break;
}
bytes = min(bytes, end - start);
- bitmap_clear_bits(block_group, entry,
- start, bytes);
+ bitmap_clear_bits(ctl, entry, start, bytes);
if (entry->bytes == 0)
- free_bitmap(block_group, entry);
+ free_bitmap(ctl, entry);
} else {
start = entry->offset + BITS_PER_BITMAP *
block_group->sectorsize;
- spin_unlock(&block_group->tree_lock);
+ spin_unlock(&ctl->tree_lock);
ret = 0;
continue;
}
} else {
start = entry->offset;
bytes = min(entry->bytes, end - start);
- unlink_free_space(block_group, entry);
+ unlink_free_space(ctl, entry);
kmem_cache_free(btrfs_free_space_cachep, entry);
}
- spin_unlock(&block_group->tree_lock);
+ spin_unlock(&ctl->tree_lock);
if (bytes >= minlen) {
int update_ret;
@@ -2319,8 +2391,7 @@ int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group,
bytes,
&actually_trimmed);
- btrfs_add_free_space(block_group,
- start, bytes);
+ btrfs_add_free_space(block_group, start, bytes);
if (!update_ret)
btrfs_update_reserved_bytes(block_group,
bytes, 0, 1);
@@ -2342,3 +2413,145 @@ int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group,
return ret;
}
+
+/*
+ * Find the left-most item in the cache tree, and then return the
+ * smallest inode number in the item.
+ *
+ * Note: the returned inode number may not be the smallest one in
+ * the tree, if the left-most item is a bitmap.
+ */
+u64 btrfs_find_ino_for_alloc(struct btrfs_root *fs_root)
+{
+ struct btrfs_free_space_ctl *ctl = fs_root->free_ino_ctl;
+ struct btrfs_free_space *entry = NULL;
+ u64 ino = 0;
+
+ spin_lock(&ctl->tree_lock);
+
+ if (RB_EMPTY_ROOT(&ctl->free_space_offset))
+ goto out;
+
+ entry = rb_entry(rb_first(&ctl->free_space_offset),
+ struct btrfs_free_space, offset_index);
+
+ if (!entry->bitmap) {
+ ino = entry->offset;
+
+ unlink_free_space(ctl, entry);
+ entry->offset++;
+ entry->bytes--;
+ if (!entry->bytes)
+ kmem_cache_free(btrfs_free_space_cachep, entry);
+ else
+ link_free_space(ctl, entry);
+ } else {
+ u64 offset = 0;
+ u64 count = 1;
+ int ret;
+
+ ret = search_bitmap(ctl, entry, &offset, &count);
+ BUG_ON(ret);
+
+ ino = offset;
+ bitmap_clear_bits(ctl, entry, offset, 1);
+ if (entry->bytes == 0)
+ free_bitmap(ctl, entry);
+ }
+out:
+ spin_unlock(&ctl->tree_lock);
+
+ return ino;
+}
+
+struct inode *lookup_free_ino_inode(struct btrfs_root *root,
+ struct btrfs_path *path)
+{
+ struct inode *inode = NULL;
+
+ spin_lock(&root->cache_lock);
+ if (root->cache_inode)
+ inode = igrab(root->cache_inode);
+ spin_unlock(&root->cache_lock);
+ if (inode)
+ return inode;
+
+ inode = __lookup_free_space_inode(root, path, 0);
+ if (IS_ERR(inode))
+ return inode;
+
+ spin_lock(&root->cache_lock);
+ if (!root->fs_info->closing)
+ root->cache_inode = igrab(inode);
+ spin_unlock(&root->cache_lock);
+
+ return inode;
+}
+
+int create_free_ino_inode(struct btrfs_root *root,
+ struct btrfs_trans_handle *trans,
+ struct btrfs_path *path)
+{
+ return __create_free_space_inode(root, trans, path,
+ BTRFS_FREE_INO_OBJECTID, 0);
+}
+
+int load_free_ino_cache(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
+{
+ struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
+ struct btrfs_path *path;
+ struct inode *inode;
+ int ret = 0;
+ u64 root_gen = btrfs_root_generation(&root->root_item);
+
+ /*
+ * If we're unmounting then just return, since this does a search on the
+ * normal root and not the commit root and we could deadlock.
+ */
+ smp_mb();
+ if (fs_info->closing)
+ return 0;
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return 0;
+
+ inode = lookup_free_ino_inode(root, path);
+ if (IS_ERR(inode))
+ goto out;
+
+ if (root_gen != BTRFS_I(inode)->generation)
+ goto out_put;
+
+ ret = __load_free_space_cache(root, inode, ctl, path, 0);
+
+ if (ret < 0)
+ printk(KERN_ERR "btrfs: failed to load free ino cache for "
+ "root %llu\n", root->root_key.objectid);
+out_put:
+ iput(inode);
+out:
+ btrfs_free_path(path);
+ return ret;
+}
+
+int btrfs_write_out_ino_cache(struct btrfs_root *root,
+ struct btrfs_trans_handle *trans,
+ struct btrfs_path *path)
+{
+ struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
+ struct inode *inode;
+ int ret;
+
+ inode = lookup_free_ino_inode(root, path);
+ if (IS_ERR(inode))
+ return 0;
+
+ ret = __btrfs_write_out_cache(root, inode, ctl, NULL, trans, path, 0);
+ if (ret < 0)
+ printk(KERN_ERR "btrfs: failed to write free ino cache "
+ "for root %llu\n", root->root_key.objectid);
+
+ iput(inode);
+ return ret;
+}
diff --git a/fs/btrfs/free-space-cache.h b/fs/btrfs/free-space-cache.h
index 65c3b935289f..8f2613f779ed 100644
--- a/fs/btrfs/free-space-cache.h
+++ b/fs/btrfs/free-space-cache.h
@@ -27,6 +27,25 @@ struct btrfs_free_space {
struct list_head list;
};
+struct btrfs_free_space_ctl {
+ spinlock_t tree_lock;
+ struct rb_root free_space_offset;
+ u64 free_space;
+ int extents_thresh;
+ int free_extents;
+ int total_bitmaps;
+ int unit;
+ u64 start;
+ struct btrfs_free_space_op *op;
+ void *private;
+};
+
+struct btrfs_free_space_op {
+ void (*recalc_thresholds)(struct btrfs_free_space_ctl *ctl);
+ bool (*use_bitmap)(struct btrfs_free_space_ctl *ctl,
+ struct btrfs_free_space *info);
+};
+
struct inode *lookup_free_space_inode(struct btrfs_root *root,
struct btrfs_block_group_cache
*block_group, struct btrfs_path *path);
@@ -45,17 +64,38 @@ int btrfs_write_out_cache(struct btrfs_root *root,
struct btrfs_trans_handle *trans,
struct btrfs_block_group_cache *block_group,
struct btrfs_path *path);
-int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
- u64 bytenr, u64 size);
+
+struct inode *lookup_free_ino_inode(struct btrfs_root *root,
+ struct btrfs_path *path);
+int create_free_ino_inode(struct btrfs_root *root,
+ struct btrfs_trans_handle *trans,
+ struct btrfs_path *path);
+int load_free_ino_cache(struct btrfs_fs_info *fs_info,
+ struct btrfs_root *root);
+int btrfs_write_out_ino_cache(struct btrfs_root *root,
+ struct btrfs_trans_handle *trans,
+ struct btrfs_path *path);
+
+void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group);
+int __btrfs_add_free_space(struct btrfs_free_space_ctl *ctl,
+ u64 bytenr, u64 size);
+static inline int
+btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
+ u64 bytenr, u64 size)
+{
+ return __btrfs_add_free_space(block_group->free_space_ctl,
+ bytenr, size);
+}
int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
u64 bytenr, u64 size);
+void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl);
void btrfs_remove_free_space_cache(struct btrfs_block_group_cache
- *block_group);
+ *block_group);
u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
u64 offset, u64 bytes, u64 empty_size);
+u64 btrfs_find_ino_for_alloc(struct btrfs_root *fs_root);
void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
u64 bytes);
-u64 btrfs_block_group_free_space(struct btrfs_block_group_cache *block_group);
int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_block_group_cache *block_group,
diff --git a/fs/btrfs/inode-item.c b/fs/btrfs/inode-item.c
index 64f1150bb48d..baa74f3db691 100644
--- a/fs/btrfs/inode-item.c
+++ b/fs/btrfs/inode-item.c
@@ -130,7 +130,6 @@ int btrfs_del_inode_ref(struct btrfs_trans_handle *trans,
item_size - (ptr + sub_item_len - item_start));
ret = btrfs_truncate_item(trans, root, path,
item_size - sub_item_len, 1);
- BUG_ON(ret);
out:
btrfs_free_path(path);
return ret;
@@ -167,7 +166,6 @@ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
old_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]);
ret = btrfs_extend_item(trans, root, path, ins_len);
- BUG_ON(ret);
ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
struct btrfs_inode_ref);
ref = (struct btrfs_inode_ref *)((unsigned long)ref + old_size);
diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
index c05a08f4c411..3262cd17a12f 100644
--- a/fs/btrfs/inode-map.c
+++ b/fs/btrfs/inode-map.c
@@ -16,11 +16,446 @@
* Boston, MA 021110-1307, USA.
*/
+#include <linux/delay.h>
+#include <linux/kthread.h>
+#include <linux/pagemap.h>
+
#include "ctree.h"
#include "disk-io.h"
+#include "free-space-cache.h"
+#include "inode-map.h"
#include "transaction.h"
-int btrfs_find_highest_inode(struct btrfs_root *root, u64 *objectid)
+static int caching_kthread(void *data)
+{
+ struct btrfs_root *root = data;
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
+ struct btrfs_key key;
+ struct btrfs_path *path;
+ struct extent_buffer *leaf;
+ u64 last = (u64)-1;
+ int slot;
+ int ret;
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+
+ /* Since the commit root is read-only, we can safely skip locking. */
+ path->skip_locking = 1;
+ path->search_commit_root = 1;
+ path->reada = 2;
+
+ key.objectid = BTRFS_FIRST_FREE_OBJECTID;
+ key.offset = 0;
+ key.type = BTRFS_INODE_ITEM_KEY;
+again:
+ /* need to make sure the commit_root doesn't disappear */
+ mutex_lock(&root->fs_commit_mutex);
+
+ ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+ if (ret < 0)
+ goto out;
+
+ while (1) {
+ smp_mb();
+ if (fs_info->closing)
+ goto out;
+
+ leaf = path->nodes[0];
+ slot = path->slots[0];
+ if (slot >= btrfs_header_nritems(leaf)) {
+ ret = btrfs_next_leaf(root, path);
+ if (ret < 0)
+ goto out;
+ else if (ret > 0)
+ break;
+
+ if (need_resched() ||
+ btrfs_transaction_in_commit(fs_info)) {
+ leaf = path->nodes[0];
+
+ if (btrfs_header_nritems(leaf) == 0) {
+ WARN_ON(1);
+ break;
+ }
+
+ /*
+ * Save the key so we can advances forward
+ * in the next search.
+ */
+ btrfs_item_key_to_cpu(leaf, &key, 0);
+ btrfs_release_path(path);
+ root->cache_progress = last;
+ mutex_unlock(&root->fs_commit_mutex);
+ schedule_timeout(1);
+ goto again;
+ } else
+ continue;
+ }
+
+ btrfs_item_key_to_cpu(leaf, &key, slot);
+
+ if (key.type != BTRFS_INODE_ITEM_KEY)
+ goto next;
+
+ if (key.objectid >= root->highest_objectid)
+ break;
+
+ if (last != (u64)-1 && last + 1 != key.objectid) {
+ __btrfs_add_free_space(ctl, last + 1,
+ key.objectid - last - 1);
+ wake_up(&root->cache_wait);
+ }
+
+ last = key.objectid;
+next:
+ path->slots[0]++;
+ }
+
+ if (last < root->highest_objectid - 1) {
+ __btrfs_add_free_space(ctl, last + 1,
+ root->highest_objectid - last - 1);
+ }
+
+ spin_lock(&root->cache_lock);
+ root->cached = BTRFS_CACHE_FINISHED;
+ spin_unlock(&root->cache_lock);
+
+ root->cache_progress = (u64)-1;
+ btrfs_unpin_free_ino(root);
+out:
+ wake_up(&root->cache_wait);
+ mutex_unlock(&root->fs_commit_mutex);
+
+ btrfs_free_path(path);
+
+ return ret;
+}
+
+static void start_caching(struct btrfs_root *root)
+{
+ struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
+ struct task_struct *tsk;
+ int ret;
+ u64 objectid;
+
+ spin_lock(&root->cache_lock);
+ if (root->cached != BTRFS_CACHE_NO) {
+ spin_unlock(&root->cache_lock);
+ return;
+ }
+
+ root->cached = BTRFS_CACHE_STARTED;
+ spin_unlock(&root->cache_lock);
+
+ ret = load_free_ino_cache(root->fs_info, root);
+ if (ret == 1) {
+ spin_lock(&root->cache_lock);
+ root->cached = BTRFS_CACHE_FINISHED;
+ spin_unlock(&root->cache_lock);
+ return;
+ }
+
+ /*
+ * It can be quite time-consuming to fill the cache by searching
+ * through the extent tree, and this can keep ino allocation path
+ * waiting. Therefore at start we quickly find out the highest
+ * inode number and we know we can use inode numbers which fall in
+ * [highest_ino + 1, BTRFS_LAST_FREE_OBJECTID].
+ */
+ ret = btrfs_find_free_objectid(root, &objectid);
+ if (!ret && objectid <= BTRFS_LAST_FREE_OBJECTID) {
+ __btrfs_add_free_space(ctl, objectid,
+ BTRFS_LAST_FREE_OBJECTID - objectid + 1);
+ }
+
+ tsk = kthread_run(caching_kthread, root, "btrfs-ino-cache-%llu\n",
+ root->root_key.objectid);
+ BUG_ON(IS_ERR(tsk));
+}
+
+int btrfs_find_free_ino(struct btrfs_root *root, u64 *objectid)
+{
+again:
+ *objectid = btrfs_find_ino_for_alloc(root);
+
+ if (*objectid != 0)
+ return 0;
+
+ start_caching(root);
+
+ wait_event(root->cache_wait,
+ root->cached == BTRFS_CACHE_FINISHED ||
+ root->free_ino_ctl->free_space > 0);
+
+ if (root->cached == BTRFS_CACHE_FINISHED &&
+ root->free_ino_ctl->free_space == 0)
+ return -ENOSPC;
+ else
+ goto again;
+}
+
+void btrfs_return_ino(struct btrfs_root *root, u64 objectid)
+{
+ struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
+ struct btrfs_free_space_ctl *pinned = root->free_ino_pinned;
+again:
+ if (root->cached == BTRFS_CACHE_FINISHED) {
+ __btrfs_add_free_space(ctl, objectid, 1);
+ } else {
+ /*
+ * If we are in the process of caching free ino chunks,
+ * to avoid adding the same inode number to the free_ino
+ * tree twice due to cross transaction, we'll leave it
+ * in the pinned tree until a transaction is committed
+ * or the caching work is done.
+ */
+
+ mutex_lock(&root->fs_commit_mutex);
+ spin_lock(&root->cache_lock);
+ if (root->cached == BTRFS_CACHE_FINISHED) {
+ spin_unlock(&root->cache_lock);
+ mutex_unlock(&root->fs_commit_mutex);
+ goto again;
+ }
+ spin_unlock(&root->cache_lock);
+
+ start_caching(root);
+
+ if (objectid <= root->cache_progress ||
+ objectid > root->highest_objectid)
+ __btrfs_add_free_space(ctl, objectid, 1);
+ else
+ __btrfs_add_free_space(pinned, objectid, 1);
+
+ mutex_unlock(&root->fs_commit_mutex);
+ }
+}
+
+/*
+ * When a transaction is committed, we'll move those inode numbers which
+ * are smaller than root->cache_progress from pinned tree to free_ino tree,
+ * and others will just be dropped, because the commit root we were
+ * searching has changed.
+ *
+ * Must be called with root->fs_commit_mutex held
+ */
+void btrfs_unpin_free_ino(struct btrfs_root *root)
+{
+ struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
+ struct rb_root *rbroot = &root->free_ino_pinned->free_space_offset;
+ struct btrfs_free_space *info;
+ struct rb_node *n;
+ u64 count;
+
+ while (1) {
+ n = rb_first(rbroot);
+ if (!n)
+ break;
+
+ info = rb_entry(n, struct btrfs_free_space, offset_index);
+ BUG_ON(info->bitmap);
+
+ if (info->offset > root->cache_progress)
+ goto free;
+ else if (info->offset + info->bytes > root->cache_progress)
+ count = root->cache_progress - info->offset + 1;
+ else
+ count = info->bytes;
+
+ __btrfs_add_free_space(ctl, info->offset, count);
+free:
+ rb_erase(&info->offset_index, rbroot);
+ kfree(info);
+ }
+}
+
+#define INIT_THRESHOLD (((1024 * 32) / 2) / sizeof(struct btrfs_free_space))
+#define INODES_PER_BITMAP (PAGE_CACHE_SIZE * 8)
+
+/*
+ * The goal is to keep the memory used by the free_ino tree won't
+ * exceed the memory if we use bitmaps only.
+ */
+static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
+{
+ struct btrfs_free_space *info;
+ struct rb_node *n;
+ int max_ino;
+ int max_bitmaps;
+
+ n = rb_last(&ctl->free_space_offset);
+ if (!n) {
+ ctl->extents_thresh = INIT_THRESHOLD;
+ return;
+ }
+ info = rb_entry(n, struct btrfs_free_space, offset_index);
+
+ /*
+ * Find the maximum inode number in the filesystem. Note we
+ * ignore the fact that this can be a bitmap, because we are
+ * not doing precise calculation.
+ */
+ max_ino = info->bytes - 1;
+
+ max_bitmaps = ALIGN(max_ino, INODES_PER_BITMAP) / INODES_PER_BITMAP;
+ if (max_bitmaps <= ctl->total_bitmaps) {
+ ctl->extents_thresh = 0;
+ return;
+ }
+
+ ctl->extents_thresh = (max_bitmaps - ctl->total_bitmaps) *
+ PAGE_CACHE_SIZE / sizeof(*info);
+}
+
+/*
+ * We don't fall back to bitmap, if we are below the extents threshold
+ * or this chunk of inode numbers is a big one.
+ */
+static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
+ struct btrfs_free_space *info)
+{
+ if (ctl->free_extents < ctl->extents_thresh ||
+ info->bytes > INODES_PER_BITMAP / 10)
+ return false;
+
+ return true;
+}
+
+static struct btrfs_free_space_op free_ino_op = {
+ .recalc_thresholds = recalculate_thresholds,
+ .use_bitmap = use_bitmap,
+};
+
+static void pinned_recalc_thresholds(struct btrfs_free_space_ctl *ctl)
+{
+}
+
+static bool pinned_use_bitmap(struct btrfs_free_space_ctl *ctl,
+ struct btrfs_free_space *info)
+{
+ /*
+ * We always use extents for two reasons:
+ *
+ * - The pinned tree is only used during the process of caching
+ * work.
+ * - Make code simpler. See btrfs_unpin_free_ino().
+ */
+ return false;
+}
+
+static struct btrfs_free_space_op pinned_free_ino_op = {
+ .recalc_thresholds = pinned_recalc_thresholds,
+ .use_bitmap = pinned_use_bitmap,
+};
+
+void btrfs_init_free_ino_ctl(struct btrfs_root *root)
+{
+ struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
+ struct btrfs_free_space_ctl *pinned = root->free_ino_pinned;
+
+ spin_lock_init(&ctl->tree_lock);
+ ctl->unit = 1;
+ ctl->start = 0;
+ ctl->private = NULL;
+ ctl->op = &free_ino_op;
+
+ /*
+ * Initially we allow to use 16K of ram to cache chunks of
+ * inode numbers before we resort to bitmaps. This is somewhat
+ * arbitrary, but it will be adjusted in runtime.
+ */
+ ctl->extents_thresh = INIT_THRESHOLD;
+
+ spin_lock_init(&pinned->tree_lock);
+ pinned->unit = 1;
+ pinned->start = 0;
+ pinned->private = NULL;
+ pinned->extents_thresh = 0;
+ pinned->op = &pinned_free_ino_op;
+}
+
+int btrfs_save_ino_cache(struct btrfs_root *root,
+ struct btrfs_trans_handle *trans)
+{
+ struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
+ struct btrfs_path *path;
+ struct inode *inode;
+ u64 alloc_hint = 0;
+ int ret;
+ int prealloc;
+ bool retry = false;
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+again:
+ inode = lookup_free_ino_inode(root, path);
+ if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
+ ret = PTR_ERR(inode);
+ goto out;
+ }
+
+ if (IS_ERR(inode)) {
+ BUG_ON(retry);
+ retry = true;
+
+ ret = create_free_ino_inode(root, trans, path);
+ if (ret)
+ goto out;
+ goto again;
+ }
+
+ BTRFS_I(inode)->generation = 0;
+ ret = btrfs_update_inode(trans, root, inode);
+ WARN_ON(ret);
+
+ if (i_size_read(inode) > 0) {
+ ret = btrfs_truncate_free_space_cache(root, trans, path, inode);
+ if (ret)
+ goto out_put;
+ }
+
+ spin_lock(&root->cache_lock);
+ if (root->cached != BTRFS_CACHE_FINISHED) {
+ ret = -1;
+ spin_unlock(&root->cache_lock);
+ goto out_put;
+ }
+ spin_unlock(&root->cache_lock);
+
+ spin_lock(&ctl->tree_lock);
+ prealloc = sizeof(struct btrfs_free_space) * ctl->free_extents;
+ prealloc = ALIGN(prealloc, PAGE_CACHE_SIZE);
+ prealloc += ctl->total_bitmaps * PAGE_CACHE_SIZE;
+ spin_unlock(&ctl->tree_lock);
+
+ /* Just to make sure we have enough space */
+ prealloc += 8 * PAGE_CACHE_SIZE;
+
+ ret = btrfs_check_data_free_space(inode, prealloc);
+ if (ret)
+ goto out_put;
+
+ ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, prealloc,
+ prealloc, prealloc, &alloc_hint);
+ if (ret)
+ goto out_put;
+ btrfs_free_reserved_data_space(inode, prealloc);
+
+out_put:
+ iput(inode);
+out:
+ if (ret == 0)
+ ret = btrfs_write_out_ino_cache(root, trans, path);
+
+ btrfs_free_path(path);
+ return ret;
+}
+
+static int btrfs_find_highest_objectid(struct btrfs_root *root, u64 *objectid)
{
struct btrfs_path *path;
int ret;
@@ -55,15 +490,14 @@ error:
return ret;
}
-int btrfs_find_free_objectid(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- u64 dirid, u64 *objectid)
+int btrfs_find_free_objectid(struct btrfs_root *root, u64 *objectid)
{
int ret;
mutex_lock(&root->objectid_mutex);
if (unlikely(root->highest_objectid < BTRFS_FIRST_FREE_OBJECTID)) {
- ret = btrfs_find_highest_inode(root, &root->highest_objectid);
+ ret = btrfs_find_highest_objectid(root,
+ &root->highest_objectid);
if (ret)
goto out;
}
diff --git a/fs/btrfs/inode-map.h b/fs/btrfs/inode-map.h
new file mode 100644
index 000000000000..ddb347bfee23
--- /dev/null
+++ b/fs/btrfs/inode-map.h
@@ -0,0 +1,13 @@
+#ifndef __BTRFS_INODE_MAP
+#define __BTRFS_INODE_MAP
+
+void btrfs_init_free_ino_ctl(struct btrfs_root *root);
+void btrfs_unpin_free_ino(struct btrfs_root *root);
+void btrfs_return_ino(struct btrfs_root *root, u64 objectid);
+int btrfs_find_free_ino(struct btrfs_root *root, u64 *objectid);
+int btrfs_save_ino_cache(struct btrfs_root *root,
+ struct btrfs_trans_handle *trans);
+
+int btrfs_find_free_objectid(struct btrfs_root *root, u64 *objectid);
+
+#endif
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 7cd8ab0ef04d..39a9d5750efd 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -37,6 +37,7 @@
#include <linux/posix_acl.h>
#include <linux/falloc.h>
#include <linux/slab.h>
+#include <linux/ratelimit.h>
#include "compat.h"
#include "ctree.h"
#include "disk-io.h"
@@ -51,6 +52,7 @@
#include "compression.h"
#include "locking.h"
#include "free-space-cache.h"
+#include "inode-map.h"
struct btrfs_iget_args {
u64 ino;
@@ -138,7 +140,7 @@ static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
path->leave_spinning = 1;
btrfs_set_trans_block_group(trans, inode);
- key.objectid = inode->i_ino;
+ key.objectid = btrfs_ino(inode);
key.offset = start;
btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
datasize = btrfs_file_extent_calc_inline_size(cur_size);
@@ -340,6 +342,10 @@ static noinline int compress_file_range(struct inode *inode,
int will_compress;
int compress_type = root->fs_info->compress_type;
+ /* if this is a small write inside eof, kick off a defragbot */
+ if (end <= BTRFS_I(inode)->disk_i_size && (end - start + 1) < 16 * 1024)
+ btrfs_add_inode_defrag(NULL, inode);
+
actual_end = min_t(u64, isize, end + 1);
again:
will_compress = 0;
@@ -649,7 +655,7 @@ retry:
async_extent->start +
async_extent->ram_size - 1, 0);
- em = alloc_extent_map(GFP_NOFS);
+ em = alloc_extent_map();
BUG_ON(!em);
em->start = async_extent->start;
em->len = async_extent->ram_size;
@@ -745,6 +751,15 @@ static u64 get_extent_allocation_hint(struct inode *inode, u64 start,
return alloc_hint;
}
+static inline bool is_free_space_inode(struct btrfs_root *root,
+ struct inode *inode)
+{
+ if (root == root->fs_info->tree_root ||
+ BTRFS_I(inode)->location.objectid == BTRFS_FREE_INO_OBJECTID)
+ return true;
+ return false;
+}
+
/*
* when extent_io.c finds a delayed allocation range in the file,
* the call backs end up in this code. The basic idea is to
@@ -777,7 +792,7 @@ static noinline int cow_file_range(struct inode *inode,
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
int ret = 0;
- BUG_ON(root == root->fs_info->tree_root);
+ BUG_ON(is_free_space_inode(root, inode));
trans = btrfs_join_transaction(root, 1);
BUG_ON(IS_ERR(trans));
btrfs_set_trans_block_group(trans, inode);
@@ -788,6 +803,10 @@ static noinline int cow_file_range(struct inode *inode,
disk_num_bytes = num_bytes;
ret = 0;
+ /* if this is a small write inside eof, kick off defrag */
+ if (end <= BTRFS_I(inode)->disk_i_size && num_bytes < 64 * 1024)
+ btrfs_add_inode_defrag(trans, inode);
+
if (start == 0) {
/* lets try to make an inline extent */
ret = cow_file_range_inline(trans, root, inode,
@@ -826,7 +845,7 @@ static noinline int cow_file_range(struct inode *inode,
(u64)-1, &ins, 1);
BUG_ON(ret);
- em = alloc_extent_map(GFP_NOFS);
+ em = alloc_extent_map();
BUG_ON(!em);
em->start = start;
em->orig_start = em->start;
@@ -1008,7 +1027,7 @@ static noinline int csum_exist_in_range(struct btrfs_root *root,
LIST_HEAD(list);
ret = btrfs_lookup_csums_range(root->fs_info->csum_root, bytenr,
- bytenr + num_bytes - 1, &list);
+ bytenr + num_bytes - 1, &list, 0);
if (ret == 0 && list_empty(&list))
return 0;
@@ -1049,29 +1068,31 @@ static noinline int run_delalloc_nocow(struct inode *inode,
int type;
int nocow;
int check_prev = 1;
- bool nolock = false;
+ bool nolock;
+ u64 ino = btrfs_ino(inode);
path = btrfs_alloc_path();
BUG_ON(!path);
- if (root == root->fs_info->tree_root) {
- nolock = true;
+
+ nolock = is_free_space_inode(root, inode);
+
+ if (nolock)
trans = btrfs_join_transaction_nolock(root, 1);
- } else {
+ else
trans = btrfs_join_transaction(root, 1);
- }
BUG_ON(IS_ERR(trans));
cow_start = (u64)-1;
cur_offset = start;
while (1) {
- ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
+ ret = btrfs_lookup_file_extent(trans, root, path, ino,
cur_offset, 0);
BUG_ON(ret < 0);
if (ret > 0 && path->slots[0] > 0 && check_prev) {
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &found_key,
path->slots[0] - 1);
- if (found_key.objectid == inode->i_ino &&
+ if (found_key.objectid == ino &&
found_key.type == BTRFS_EXTENT_DATA_KEY)
path->slots[0]--;
}
@@ -1092,7 +1113,7 @@ next_slot:
num_bytes = 0;
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
- if (found_key.objectid > inode->i_ino ||
+ if (found_key.objectid > ino ||
found_key.type > BTRFS_EXTENT_DATA_KEY ||
found_key.offset > end)
break;
@@ -1127,7 +1148,7 @@ next_slot:
goto out_check;
if (btrfs_extent_readonly(root, disk_bytenr))
goto out_check;
- if (btrfs_cross_ref_exist(trans, root, inode->i_ino,
+ if (btrfs_cross_ref_exist(trans, root, ino,
found_key.offset -
extent_offset, disk_bytenr))
goto out_check;
@@ -1164,7 +1185,7 @@ out_check:
goto next_slot;
}
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
if (cow_start != (u64)-1) {
ret = cow_file_range(inode, locked_page, cow_start,
found_key.offset - 1, page_started,
@@ -1177,7 +1198,7 @@ out_check:
struct extent_map *em;
struct extent_map_tree *em_tree;
em_tree = &BTRFS_I(inode)->extent_tree;
- em = alloc_extent_map(GFP_NOFS);
+ em = alloc_extent_map();
BUG_ON(!em);
em->start = cur_offset;
em->orig_start = em->start;
@@ -1222,7 +1243,7 @@ out_check:
if (cur_offset > end)
break;
}
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
if (cur_offset <= end && cow_start == (u64)-1)
cow_start = cur_offset;
@@ -1310,14 +1331,13 @@ static int btrfs_set_bit_hook(struct inode *inode,
/*
* set_bit and clear bit hooks normally require _irqsave/restore
- * but in this case, we are only testeing for the DELALLOC
+ * but in this case, we are only testing for the DELALLOC
* bit, which is only set or cleared with irqs on
*/
if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
struct btrfs_root *root = BTRFS_I(inode)->root;
u64 len = state->end + 1 - state->start;
- int do_list = (root->root_key.objectid !=
- BTRFS_ROOT_TREE_OBJECTID);
+ bool do_list = !is_free_space_inode(root, inode);
if (*bits & EXTENT_FIRST_DELALLOC)
*bits &= ~EXTENT_FIRST_DELALLOC;
@@ -1344,14 +1364,13 @@ static int btrfs_clear_bit_hook(struct inode *inode,
{
/*
* set_bit and clear bit hooks normally require _irqsave/restore
- * but in this case, we are only testeing for the DELALLOC
+ * but in this case, we are only testing for the DELALLOC
* bit, which is only set or cleared with irqs on
*/
if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
struct btrfs_root *root = BTRFS_I(inode)->root;
u64 len = state->end + 1 - state->start;
- int do_list = (root->root_key.objectid !=
- BTRFS_ROOT_TREE_OBJECTID);
+ bool do_list = !is_free_space_inode(root, inode);
if (*bits & EXTENT_FIRST_DELALLOC)
*bits &= ~EXTENT_FIRST_DELALLOC;
@@ -1458,7 +1477,7 @@ static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
- if (root == root->fs_info->tree_root)
+ if (is_free_space_inode(root, inode))
ret = btrfs_bio_wq_end_io(root->fs_info, bio, 2);
else
ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
@@ -1644,7 +1663,7 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
&hint, 0);
BUG_ON(ret);
- ins.objectid = inode->i_ino;
+ ins.objectid = btrfs_ino(inode);
ins.offset = file_pos;
ins.type = BTRFS_EXTENT_DATA_KEY;
ret = btrfs_insert_empty_item(trans, root, path, &ins, sizeof(*fi));
@@ -1675,7 +1694,7 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
ins.type = BTRFS_EXTENT_ITEM_KEY;
ret = btrfs_alloc_reserved_file_extent(trans, root,
root->root_key.objectid,
- inode->i_ino, file_pos, &ins);
+ btrfs_ino(inode), file_pos, &ins);
BUG_ON(ret);
btrfs_free_path(path);
@@ -1701,7 +1720,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
struct extent_state *cached_state = NULL;
int compress_type = 0;
int ret;
- bool nolock = false;
+ bool nolock;
ret = btrfs_dec_test_ordered_pending(inode, &ordered_extent, start,
end - start + 1);
@@ -1709,7 +1728,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
return 0;
BUG_ON(!ordered_extent);
- nolock = (root == root->fs_info->tree_root);
+ nolock = is_free_space_inode(root, inode);
if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
BUG_ON(!list_empty(&ordered_extent->list));
@@ -1855,7 +1874,7 @@ static int btrfs_io_failed_hook(struct bio *failed_bio,
}
read_unlock(&em_tree->lock);
- if (!em || IS_ERR(em)) {
+ if (IS_ERR_OR_NULL(em)) {
kfree(failrec);
return -EIO;
}
@@ -2004,12 +2023,11 @@ good:
return 0;
zeroit:
- if (printk_ratelimit()) {
- printk(KERN_INFO "btrfs csum failed ino %lu off %llu csum %u "
- "private %llu\n", page->mapping->host->i_ino,
+ printk_ratelimited(KERN_INFO "btrfs csum failed ino %llu off %llu csum %u "
+ "private %llu\n",
+ (unsigned long long)btrfs_ino(page->mapping->host),
(unsigned long long)start, csum,
(unsigned long long)private);
- }
memset(kaddr + offset, 1, end - start + 1);
flush_dcache_page(page);
kunmap_atomic(kaddr, KM_USER0);
@@ -2244,7 +2262,7 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
/* insert an orphan item to track this unlinked/truncated file */
if (insert >= 1) {
- ret = btrfs_insert_orphan_item(trans, root, inode->i_ino);
+ ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode));
BUG_ON(ret);
}
@@ -2281,7 +2299,7 @@ int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode)
spin_unlock(&root->orphan_lock);
if (trans && delete_item) {
- ret = btrfs_del_orphan_item(trans, root, inode->i_ino);
+ ret = btrfs_del_orphan_item(trans, root, btrfs_ino(inode));
BUG_ON(ret);
}
@@ -2346,7 +2364,7 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
break;
/* release the path since we're done with it */
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
/*
* this is where we are basically btrfs_lookup, without the
@@ -2543,7 +2561,8 @@ static void btrfs_read_locked_inode(struct inode *inode)
* try to precache a NULL acl entry for files that don't have
* any xattrs or acls
*/
- maybe_acls = acls_after_inode_item(leaf, path->slots[0], inode->i_ino);
+ maybe_acls = acls_after_inode_item(leaf, path->slots[0],
+ btrfs_ino(inode));
if (!maybe_acls)
cache_no_acl(inode);
@@ -2647,11 +2666,26 @@ noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
struct extent_buffer *leaf;
int ret;
+ /*
+ * If root is tree root, it means this inode is used to
+ * store free space information. And these inodes are updated
+ * when committing the transaction, so they needn't delaye to
+ * be updated, or deadlock will occured.
+ */
+ if (!is_free_space_inode(root, inode)) {
+ ret = btrfs_delayed_update_inode(trans, root, inode);
+ if (!ret)
+ btrfs_set_inode_last_trans(trans, inode);
+ return ret;
+ }
+
path = btrfs_alloc_path();
- BUG_ON(!path);
+ if (!path)
+ return -ENOMEM;
+
path->leave_spinning = 1;
- ret = btrfs_lookup_inode(trans, root, path,
- &BTRFS_I(inode)->location, 1);
+ ret = btrfs_lookup_inode(trans, root, path, &BTRFS_I(inode)->location,
+ 1);
if (ret) {
if (ret > 0)
ret = -ENOENT;
@@ -2661,7 +2695,7 @@ noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
btrfs_unlock_up_safe(path, 1);
leaf = path->nodes[0];
inode_item = btrfs_item_ptr(leaf, path->slots[0],
- struct btrfs_inode_item);
+ struct btrfs_inode_item);
fill_inode_item(trans, leaf, inode_item, inode);
btrfs_mark_buffer_dirty(leaf);
@@ -2672,7 +2706,6 @@ failed:
return ret;
}
-
/*
* unlink helper that gets used here in inode.c and in the tree logging
* recovery code. It remove a link in a directory with a given name, and
@@ -2689,6 +2722,8 @@ static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
struct btrfs_dir_item *di;
struct btrfs_key key;
u64 index;
+ u64 ino = btrfs_ino(inode);
+ u64 dir_ino = btrfs_ino(dir);
path = btrfs_alloc_path();
if (!path) {
@@ -2697,7 +2732,7 @@ static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
}
path->leave_spinning = 1;
- di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino,
+ di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
name, name_len, -1);
if (IS_ERR(di)) {
ret = PTR_ERR(di);
@@ -2712,33 +2747,23 @@ static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
ret = btrfs_delete_one_dir_name(trans, root, path, di);
if (ret)
goto err;
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
- ret = btrfs_del_inode_ref(trans, root, name, name_len,
- inode->i_ino,
- dir->i_ino, &index);
+ ret = btrfs_del_inode_ref(trans, root, name, name_len, ino,
+ dir_ino, &index);
if (ret) {
printk(KERN_INFO "btrfs failed to delete reference to %.*s, "
- "inode %lu parent %lu\n", name_len, name,
- inode->i_ino, dir->i_ino);
+ "inode %llu parent %llu\n", name_len, name,
+ (unsigned long long)ino, (unsigned long long)dir_ino);
goto err;
}
- di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino,
- index, name, name_len, -1);
- if (IS_ERR(di)) {
- ret = PTR_ERR(di);
- goto err;
- }
- if (!di) {
- ret = -ENOENT;
+ ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
+ if (ret)
goto err;
- }
- ret = btrfs_delete_one_dir_name(trans, root, path, di);
- btrfs_release_path(root, path);
ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len,
- inode, dir->i_ino);
+ inode, dir_ino);
BUG_ON(ret != 0 && ret != -ENOENT);
ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len,
@@ -2816,12 +2841,14 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir,
int check_link = 1;
int err = -ENOSPC;
int ret;
+ u64 ino = btrfs_ino(inode);
+ u64 dir_ino = btrfs_ino(dir);
trans = btrfs_start_transaction(root, 10);
if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC)
return trans;
- if (inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
+ if (ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
return ERR_PTR(-ENOSPC);
/* check if there is someone else holds reference */
@@ -2862,7 +2889,7 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir,
} else {
check_link = 0;
}
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
ret = btrfs_lookup_inode(trans, root, path,
&BTRFS_I(inode)->location, 0);
@@ -2876,11 +2903,11 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir,
} else {
check_link = 0;
}
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
if (ret == 0 && S_ISREG(inode->i_mode)) {
ret = btrfs_lookup_file_extent(trans, root, path,
- inode->i_ino, (u64)-1, 0);
+ ino, (u64)-1, 0);
if (ret < 0) {
err = ret;
goto out;
@@ -2888,7 +2915,7 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir,
BUG_ON(ret == 0);
if (check_path_shared(root, path))
goto out;
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
}
if (!check_link) {
@@ -2896,7 +2923,7 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir,
goto out;
}
- di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino,
+ di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
dentry->d_name.name, dentry->d_name.len, 0);
if (IS_ERR(di)) {
err = PTR_ERR(di);
@@ -2909,11 +2936,11 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir,
err = 0;
goto out;
}
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
ref = btrfs_lookup_inode_ref(trans, root, path,
dentry->d_name.name, dentry->d_name.len,
- inode->i_ino, dir->i_ino, 0);
+ ino, dir_ino, 0);
if (IS_ERR(ref)) {
err = PTR_ERR(ref);
goto out;
@@ -2922,9 +2949,17 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir,
if (check_path_shared(root, path))
goto out;
index = btrfs_inode_ref_index(path->nodes[0], ref);
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
- di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino, index,
+ /*
+ * This is a commit root search, if we can lookup inode item and other
+ * relative items in the commit root, it means the transaction of
+ * dir/file creation has been committed, and the dir index item that we
+ * delay to insert has also been inserted into the commit root. So
+ * we needn't worry about the delayed insertion of the dir index item
+ * here.
+ */
+ di = btrfs_lookup_dir_index_item(trans, root, path, dir_ino, index,
dentry->d_name.name, dentry->d_name.len, 0);
if (IS_ERR(di)) {
err = PTR_ERR(di);
@@ -2999,54 +3034,47 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
struct btrfs_key key;
u64 index;
int ret;
+ u64 dir_ino = btrfs_ino(dir);
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
- di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino,
+ di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
name, name_len, -1);
- BUG_ON(!di || IS_ERR(di));
+ BUG_ON(IS_ERR_OR_NULL(di));
leaf = path->nodes[0];
btrfs_dir_item_key_to_cpu(leaf, di, &key);
WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
ret = btrfs_delete_one_dir_name(trans, root, path, di);
BUG_ON(ret);
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
ret = btrfs_del_root_ref(trans, root->fs_info->tree_root,
objectid, root->root_key.objectid,
- dir->i_ino, &index, name, name_len);
+ dir_ino, &index, name, name_len);
if (ret < 0) {
BUG_ON(ret != -ENOENT);
- di = btrfs_search_dir_index_item(root, path, dir->i_ino,
+ di = btrfs_search_dir_index_item(root, path, dir_ino,
name, name_len);
- BUG_ON(!di || IS_ERR(di));
+ BUG_ON(IS_ERR_OR_NULL(di));
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
index = key.offset;
}
+ btrfs_release_path(path);
- di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino,
- index, name, name_len, -1);
- BUG_ON(!di || IS_ERR(di));
-
- leaf = path->nodes[0];
- btrfs_dir_item_key_to_cpu(leaf, di, &key);
- WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
- ret = btrfs_delete_one_dir_name(trans, root, path, di);
+ ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
BUG_ON(ret);
- btrfs_release_path(root, path);
btrfs_i_size_write(dir, dir->i_size - name_len * 2);
dir->i_mtime = dir->i_ctime = CURRENT_TIME;
ret = btrfs_update_inode(trans, root, dir);
BUG_ON(ret);
- btrfs_free_path(path);
return 0;
}
@@ -3059,7 +3087,7 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
unsigned long nr = 0;
if (inode->i_size > BTRFS_EMPTY_DIR_SIZE ||
- inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
+ btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID)
return -ENOTEMPTY;
trans = __unlink_start_trans(dir, dentry);
@@ -3068,7 +3096,7 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
btrfs_set_trans_block_group(trans, dir);
- if (unlikely(inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
+ if (unlikely(btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
err = btrfs_unlink_subvol(trans, root, dir,
BTRFS_I(inode)->location.objectid,
dentry->d_name.name,
@@ -3093,178 +3121,6 @@ out:
return err;
}
-#if 0
-/*
- * when truncating bytes in a file, it is possible to avoid reading
- * the leaves that contain only checksum items. This can be the
- * majority of the IO required to delete a large file, but it must
- * be done carefully.
- *
- * The keys in the level just above the leaves are checked to make sure
- * the lowest key in a given leaf is a csum key, and starts at an offset
- * after the new size.
- *
- * Then the key for the next leaf is checked to make sure it also has
- * a checksum item for the same file. If it does, we know our target leaf
- * contains only checksum items, and it can be safely freed without reading
- * it.
- *
- * This is just an optimization targeted at large files. It may do
- * nothing. It will return 0 unless things went badly.
- */
-static noinline int drop_csum_leaves(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_path *path,
- struct inode *inode, u64 new_size)
-{
- struct btrfs_key key;
- int ret;
- int nritems;
- struct btrfs_key found_key;
- struct btrfs_key other_key;
- struct btrfs_leaf_ref *ref;
- u64 leaf_gen;
- u64 leaf_start;
-
- path->lowest_level = 1;
- key.objectid = inode->i_ino;
- key.type = BTRFS_CSUM_ITEM_KEY;
- key.offset = new_size;
-again:
- ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
- if (ret < 0)
- goto out;
-
- if (path->nodes[1] == NULL) {
- ret = 0;
- goto out;
- }
- ret = 0;
- btrfs_node_key_to_cpu(path->nodes[1], &found_key, path->slots[1]);
- nritems = btrfs_header_nritems(path->nodes[1]);
-
- if (!nritems)
- goto out;
-
- if (path->slots[1] >= nritems)
- goto next_node;
-
- /* did we find a key greater than anything we want to delete? */
- if (found_key.objectid > inode->i_ino ||
- (found_key.objectid == inode->i_ino && found_key.type > key.type))
- goto out;
-
- /* we check the next key in the node to make sure the leave contains
- * only checksum items. This comparison doesn't work if our
- * leaf is the last one in the node
- */
- if (path->slots[1] + 1 >= nritems) {
-next_node:
- /* search forward from the last key in the node, this
- * will bring us into the next node in the tree
- */
- btrfs_node_key_to_cpu(path->nodes[1], &found_key, nritems - 1);
-
- /* unlikely, but we inc below, so check to be safe */
- if (found_key.offset == (u64)-1)
- goto out;
-
- /* search_forward needs a path with locks held, do the
- * search again for the original key. It is possible
- * this will race with a balance and return a path that
- * we could modify, but this drop is just an optimization
- * and is allowed to miss some leaves.
- */
- btrfs_release_path(root, path);
- found_key.offset++;
-
- /* setup a max key for search_forward */
- other_key.offset = (u64)-1;
- other_key.type = key.type;
- other_key.objectid = key.objectid;
-
- path->keep_locks = 1;
- ret = btrfs_search_forward(root, &found_key, &other_key,
- path, 0, 0);
- path->keep_locks = 0;
- if (ret || found_key.objectid != key.objectid ||
- found_key.type != key.type) {
- ret = 0;
- goto out;
- }
-
- key.offset = found_key.offset;
- btrfs_release_path(root, path);
- cond_resched();
- goto again;
- }
-
- /* we know there's one more slot after us in the tree,
- * read that key so we can verify it is also a checksum item
- */
- btrfs_node_key_to_cpu(path->nodes[1], &other_key, path->slots[1] + 1);
-
- if (found_key.objectid < inode->i_ino)
- goto next_key;
-
- if (found_key.type != key.type || found_key.offset < new_size)
- goto next_key;
-
- /*
- * if the key for the next leaf isn't a csum key from this objectid,
- * we can't be sure there aren't good items inside this leaf.
- * Bail out
- */
- if (other_key.objectid != inode->i_ino || other_key.type != key.type)
- goto out;
-
- leaf_start = btrfs_node_blockptr(path->nodes[1], path->slots[1]);
- leaf_gen = btrfs_node_ptr_generation(path->nodes[1], path->slots[1]);
- /*
- * it is safe to delete this leaf, it contains only
- * csum items from this inode at an offset >= new_size
- */
- ret = btrfs_del_leaf(trans, root, path, leaf_start);
- BUG_ON(ret);
-
- if (root->ref_cows && leaf_gen < trans->transid) {
- ref = btrfs_alloc_leaf_ref(root, 0);
- if (ref) {
- ref->root_gen = root->root_key.offset;
- ref->bytenr = leaf_start;
- ref->owner = 0;
- ref->generation = leaf_gen;
- ref->nritems = 0;
-
- btrfs_sort_leaf_ref(ref);
-
- ret = btrfs_add_leaf_ref(root, ref, 0);
- WARN_ON(ret);
- btrfs_free_leaf_ref(root, ref);
- } else {
- WARN_ON(1);
- }
- }
-next_key:
- btrfs_release_path(root, path);
-
- if (other_key.objectid == inode->i_ino &&
- other_key.type == key.type && other_key.offset > key.offset) {
- key.offset = other_key.offset;
- cond_resched();
- goto again;
- }
- ret = 0;
-out:
- /* fixup any changes we've made to the path */
- path->lowest_level = 0;
- path->keep_locks = 0;
- btrfs_release_path(root, path);
- return ret;
-}
-
-#endif
-
/*
* this can truncate away extent items, csum items and directory items.
* It starts at a high offset and removes keys until it can't find
@@ -3300,17 +3156,27 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
int encoding;
int ret;
int err = 0;
+ u64 ino = btrfs_ino(inode);
BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY);
if (root->ref_cows || root == root->fs_info->tree_root)
btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0);
+ /*
+ * This function is also used to drop the items in the log tree before
+ * we relog the inode, so if root != BTRFS_I(inode)->root, it means
+ * it is used to drop the loged items. So we shouldn't kill the delayed
+ * items.
+ */
+ if (min_type == 0 && root == BTRFS_I(inode)->root)
+ btrfs_kill_delayed_inode_items(inode);
+
path = btrfs_alloc_path();
BUG_ON(!path);
path->reada = -1;
- key.objectid = inode->i_ino;
+ key.objectid = ino;
key.offset = (u64)-1;
key.type = (u8)-1;
@@ -3338,7 +3204,7 @@ search_again:
found_type = btrfs_key_type(&found_key);
encoding = 0;
- if (found_key.objectid != inode->i_ino)
+ if (found_key.objectid != ino)
break;
if (found_type < min_type)
@@ -3428,7 +3294,6 @@ search_again:
btrfs_file_extent_calc_inline_size(size);
ret = btrfs_truncate_item(trans, root, path,
size, 1);
- BUG_ON(ret);
} else if (root->ref_cows) {
inode_sub_bytes(inode, item_end + 1 -
found_key.offset);
@@ -3457,7 +3322,7 @@ delete:
ret = btrfs_free_extent(trans, root, extent_start,
extent_num_bytes, 0,
btrfs_header_owner(leaf),
- inode->i_ino, extent_offset);
+ ino, extent_offset);
BUG_ON(ret);
}
@@ -3466,7 +3331,9 @@ delete:
if (path->slots[0] == 0 ||
path->slots[0] != pending_del_slot) {
- if (root->ref_cows) {
+ if (root->ref_cows &&
+ BTRFS_I(inode)->location.objectid !=
+ BTRFS_FREE_INO_OBJECTID) {
err = -EAGAIN;
goto out;
}
@@ -3477,7 +3344,7 @@ delete:
BUG_ON(ret);
pending_del_nr = 0;
}
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
goto search_again;
} else {
path->slots[0]--;
@@ -3635,7 +3502,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
while (1) {
em = btrfs_get_extent(inode, NULL, 0, cur_offset,
block_end - cur_offset, 0);
- BUG_ON(IS_ERR(em) || !em);
+ BUG_ON(IS_ERR_OR_NULL(em));
last_byte = min(extent_map_end(em), block_end);
last_byte = (last_byte + mask) & ~mask;
if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
@@ -3656,7 +3523,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
break;
err = btrfs_insert_file_extent(trans, root,
- inode->i_ino, cur_offset, 0,
+ btrfs_ino(inode), cur_offset, 0,
0, hole_size, 0, hole_size,
0, 0, 0);
if (err)
@@ -3758,7 +3625,7 @@ void btrfs_evict_inode(struct inode *inode)
truncate_inode_pages(&inode->i_data, 0);
if (inode->i_nlink && (btrfs_root_refs(&root->root_item) != 0 ||
- root == root->fs_info->tree_root))
+ is_free_space_inode(root, inode)))
goto no_delete;
if (is_bad_inode(inode)) {
@@ -3811,6 +3678,10 @@ void btrfs_evict_inode(struct inode *inode)
BUG_ON(ret);
}
+ if (!(root == root->fs_info->tree_root ||
+ root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID))
+ btrfs_return_ino(root, btrfs_ino(inode));
+
nr = trans->blocks_used;
btrfs_end_transaction(trans, root);
btrfs_btree_balance_dirty(root, nr);
@@ -3836,12 +3707,12 @@ static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
path = btrfs_alloc_path();
BUG_ON(!path);
- di = btrfs_lookup_dir_item(NULL, root, path, dir->i_ino, name,
+ di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(dir), name,
namelen, 0);
if (IS_ERR(di))
ret = PTR_ERR(di);
- if (!di || IS_ERR(di))
+ if (IS_ERR_OR_NULL(di))
goto out_err;
btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
@@ -3889,7 +3760,7 @@ static int fixup_tree_root_location(struct btrfs_root *root,
leaf = path->nodes[0];
ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
- if (btrfs_root_ref_dirid(leaf, ref) != dir->i_ino ||
+ if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(dir) ||
btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len)
goto out;
@@ -3899,7 +3770,7 @@ static int fixup_tree_root_location(struct btrfs_root *root,
if (ret)
goto out;
- btrfs_release_path(root->fs_info->tree_root, path);
+ btrfs_release_path(path);
new_root = btrfs_read_fs_root_no_name(root->fs_info, location);
if (IS_ERR(new_root)) {
@@ -3928,6 +3799,7 @@ static void inode_tree_add(struct inode *inode)
struct btrfs_inode *entry;
struct rb_node **p;
struct rb_node *parent;
+ u64 ino = btrfs_ino(inode);
again:
p = &root->inode_tree.rb_node;
parent = NULL;
@@ -3940,9 +3812,9 @@ again:
parent = *p;
entry = rb_entry(parent, struct btrfs_inode, rb_node);
- if (inode->i_ino < entry->vfs_inode.i_ino)
+ if (ino < btrfs_ino(&entry->vfs_inode))
p = &parent->rb_left;
- else if (inode->i_ino > entry->vfs_inode.i_ino)
+ else if (ino > btrfs_ino(&entry->vfs_inode))
p = &parent->rb_right;
else {
WARN_ON(!(entry->vfs_inode.i_state &
@@ -4006,9 +3878,9 @@ again:
prev = node;
entry = rb_entry(node, struct btrfs_inode, rb_node);
- if (objectid < entry->vfs_inode.i_ino)
+ if (objectid < btrfs_ino(&entry->vfs_inode))
node = node->rb_left;
- else if (objectid > entry->vfs_inode.i_ino)
+ else if (objectid > btrfs_ino(&entry->vfs_inode))
node = node->rb_right;
else
break;
@@ -4016,7 +3888,7 @@ again:
if (!node) {
while (prev) {
entry = rb_entry(prev, struct btrfs_inode, rb_node);
- if (objectid <= entry->vfs_inode.i_ino) {
+ if (objectid <= btrfs_ino(&entry->vfs_inode)) {
node = prev;
break;
}
@@ -4025,7 +3897,7 @@ again:
}
while (node) {
entry = rb_entry(node, struct btrfs_inode, rb_node);
- objectid = entry->vfs_inode.i_ino + 1;
+ objectid = btrfs_ino(&entry->vfs_inode) + 1;
inode = igrab(&entry->vfs_inode);
if (inode) {
spin_unlock(&root->inode_lock);
@@ -4063,7 +3935,7 @@ static int btrfs_init_locked_inode(struct inode *inode, void *p)
static int btrfs_find_actor(struct inode *inode, void *opaque)
{
struct btrfs_iget_args *args = opaque;
- return args->ino == inode->i_ino &&
+ return args->ino == btrfs_ino(inode) &&
args->root == BTRFS_I(inode)->root;
}
@@ -4208,7 +4080,7 @@ static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
return d_splice_alias(inode, dentry);
}
-static unsigned char btrfs_filetype_table[] = {
+unsigned char btrfs_filetype_table[] = {
DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
};
@@ -4222,6 +4094,8 @@ static int btrfs_real_readdir(struct file *filp, void *dirent,
struct btrfs_key key;
struct btrfs_key found_key;
struct btrfs_path *path;
+ struct list_head ins_list;
+ struct list_head del_list;
int ret;
struct extent_buffer *leaf;
int slot;
@@ -4234,6 +4108,7 @@ static int btrfs_real_readdir(struct file *filp, void *dirent,
char tmp_name[32];
char *name_ptr;
int name_len;
+ int is_curr = 0; /* filp->f_pos points to the current index? */
/* FIXME, use a real flag for deciding about the key type */
if (root->fs_info->tree_root == root)
@@ -4241,9 +4116,7 @@ static int btrfs_real_readdir(struct file *filp, void *dirent,
/* special case for "." */
if (filp->f_pos == 0) {
- over = filldir(dirent, ".", 1,
- 1, inode->i_ino,
- DT_DIR);
+ over = filldir(dirent, ".", 1, 1, btrfs_ino(inode), DT_DIR);
if (over)
return 0;
filp->f_pos = 1;
@@ -4258,11 +4131,19 @@ static int btrfs_real_readdir(struct file *filp, void *dirent,
filp->f_pos = 2;
}
path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
path->reada = 2;
+ if (key_type == BTRFS_DIR_INDEX_KEY) {
+ INIT_LIST_HEAD(&ins_list);
+ INIT_LIST_HEAD(&del_list);
+ btrfs_get_delayed_items(inode, &ins_list, &del_list);
+ }
+
btrfs_set_key_type(&key, key_type);
key.offset = filp->f_pos;
- key.objectid = inode->i_ino;
+ key.objectid = btrfs_ino(inode);
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
@@ -4289,8 +4170,13 @@ static int btrfs_real_readdir(struct file *filp, void *dirent,
break;
if (found_key.offset < filp->f_pos)
goto next;
+ if (key_type == BTRFS_DIR_INDEX_KEY &&
+ btrfs_should_delete_dir_index(&del_list,
+ found_key.offset))
+ goto next;
filp->f_pos = found_key.offset;
+ is_curr = 1;
di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
di_cur = 0;
@@ -4345,6 +4231,15 @@ next:
path->slots[0]++;
}
+ if (key_type == BTRFS_DIR_INDEX_KEY) {
+ if (is_curr)
+ filp->f_pos++;
+ ret = btrfs_readdir_delayed_dir_index(filp, dirent, filldir,
+ &ins_list);
+ if (ret)
+ goto nopos;
+ }
+
/* Reached end of directory/root. Bump pos past the last item. */
if (key_type == BTRFS_DIR_INDEX_KEY)
/*
@@ -4357,6 +4252,8 @@ next:
nopos:
ret = 0;
err:
+ if (key_type == BTRFS_DIR_INDEX_KEY)
+ btrfs_put_delayed_items(&ins_list, &del_list);
btrfs_free_path(path);
return ret;
}
@@ -4372,7 +4269,8 @@ int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
return 0;
smp_mb();
- nolock = (root->fs_info->closing && root == root->fs_info->tree_root);
+ if (root->fs_info->closing && is_free_space_inode(root, inode))
+ nolock = true;
if (wbc->sync_mode == WB_SYNC_ALL) {
if (nolock)
@@ -4396,7 +4294,7 @@ int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
* FIXME, needs more benchmarking...there are no reasons other than performance
* to keep or drop this code.
*/
-void btrfs_dirty_inode(struct inode *inode)
+void btrfs_dirty_inode(struct inode *inode, int flags)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_trans_handle *trans;
@@ -4415,25 +4313,25 @@ void btrfs_dirty_inode(struct inode *inode)
btrfs_end_transaction(trans, root);
trans = btrfs_start_transaction(root, 1);
if (IS_ERR(trans)) {
- if (printk_ratelimit()) {
- printk(KERN_ERR "btrfs: fail to "
- "dirty inode %lu error %ld\n",
- inode->i_ino, PTR_ERR(trans));
- }
+ printk_ratelimited(KERN_ERR "btrfs: fail to "
+ "dirty inode %llu error %ld\n",
+ (unsigned long long)btrfs_ino(inode),
+ PTR_ERR(trans));
return;
}
btrfs_set_trans_block_group(trans, inode);
ret = btrfs_update_inode(trans, root, inode);
if (ret) {
- if (printk_ratelimit()) {
- printk(KERN_ERR "btrfs: fail to "
- "dirty inode %lu error %d\n",
- inode->i_ino, ret);
- }
+ printk_ratelimited(KERN_ERR "btrfs: fail to "
+ "dirty inode %llu error %d\n",
+ (unsigned long long)btrfs_ino(inode),
+ ret);
}
}
btrfs_end_transaction(trans, root);
+ if (BTRFS_I(inode)->delayed_node)
+ btrfs_balance_delayed_items(root);
}
/*
@@ -4449,7 +4347,7 @@ static int btrfs_set_inode_index_count(struct inode *inode)
struct extent_buffer *leaf;
int ret;
- key.objectid = inode->i_ino;
+ key.objectid = btrfs_ino(inode);
btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY);
key.offset = (u64)-1;
@@ -4481,7 +4379,7 @@ static int btrfs_set_inode_index_count(struct inode *inode)
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
- if (found_key.objectid != inode->i_ino ||
+ if (found_key.objectid != btrfs_ino(inode) ||
btrfs_key_type(&found_key) != BTRFS_DIR_INDEX_KEY) {
BTRFS_I(inode)->index_cnt = 2;
goto out;
@@ -4502,9 +4400,12 @@ int btrfs_set_inode_index(struct inode *dir, u64 *index)
int ret = 0;
if (BTRFS_I(dir)->index_cnt == (u64)-1) {
- ret = btrfs_set_inode_index_count(dir);
- if (ret)
- return ret;
+ ret = btrfs_inode_delayed_dir_index_count(dir);
+ if (ret) {
+ ret = btrfs_set_inode_index_count(dir);
+ if (ret)
+ return ret;
+ }
}
*index = BTRFS_I(dir)->index_cnt;
@@ -4540,6 +4441,12 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
return ERR_PTR(-ENOMEM);
}
+ /*
+ * we have to initialize this early, so we can reclaim the inode
+ * number if we fail afterwards in this function.
+ */
+ inode->i_ino = objectid;
+
if (dir) {
trace_btrfs_inode_request(dir);
@@ -4585,7 +4492,6 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
goto fail;
inode_init_owner(inode, dir, mode);
- inode->i_ino = objectid;
inode_set_bytes(inode, 0);
inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
@@ -4649,29 +4555,29 @@ int btrfs_add_link(struct btrfs_trans_handle *trans,
int ret = 0;
struct btrfs_key key;
struct btrfs_root *root = BTRFS_I(parent_inode)->root;
+ u64 ino = btrfs_ino(inode);
+ u64 parent_ino = btrfs_ino(parent_inode);
- if (unlikely(inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) {
+ if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
memcpy(&key, &BTRFS_I(inode)->root->root_key, sizeof(key));
} else {
- key.objectid = inode->i_ino;
+ key.objectid = ino;
btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
key.offset = 0;
}
- if (unlikely(inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) {
+ if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
ret = btrfs_add_root_ref(trans, root->fs_info->tree_root,
key.objectid, root->root_key.objectid,
- parent_inode->i_ino,
- index, name, name_len);
+ parent_ino, index, name, name_len);
} else if (add_backref) {
- ret = btrfs_insert_inode_ref(trans, root,
- name, name_len, inode->i_ino,
- parent_inode->i_ino, index);
+ ret = btrfs_insert_inode_ref(trans, root, name, name_len, ino,
+ parent_ino, index);
}
if (ret == 0) {
ret = btrfs_insert_dir_item(trans, root, name, name_len,
- parent_inode->i_ino, &key,
+ parent_inode, &key,
btrfs_inode_type(inode), index);
BUG_ON(ret);
@@ -4714,10 +4620,6 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
if (!new_valid_dev(rdev))
return -EINVAL;
- err = btrfs_find_free_objectid(NULL, root, dir->i_ino, &objectid);
- if (err)
- return err;
-
/*
* 2 for inode item and ref
* 2 for dir items
@@ -4729,8 +4631,12 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
btrfs_set_trans_block_group(trans, dir);
+ err = btrfs_find_free_ino(root, &objectid);
+ if (err)
+ goto out_unlock;
+
inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
- dentry->d_name.len, dir->i_ino, objectid,
+ dentry->d_name.len, btrfs_ino(dir), objectid,
BTRFS_I(dir)->block_group, mode, &index);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
@@ -4777,9 +4683,6 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
u64 objectid;
u64 index = 0;
- err = btrfs_find_free_objectid(NULL, root, dir->i_ino, &objectid);
- if (err)
- return err;
/*
* 2 for inode item and ref
* 2 for dir items
@@ -4791,8 +4694,12 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
btrfs_set_trans_block_group(trans, dir);
+ err = btrfs_find_free_ino(root, &objectid);
+ if (err)
+ goto out_unlock;
+
inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
- dentry->d_name.len, dir->i_ino, objectid,
+ dentry->d_name.len, btrfs_ino(dir), objectid,
BTRFS_I(dir)->block_group, mode, &index);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
@@ -4903,10 +4810,6 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
u64 index = 0;
unsigned long nr = 1;
- err = btrfs_find_free_objectid(NULL, root, dir->i_ino, &objectid);
- if (err)
- return err;
-
/*
* 2 items for inode and ref
* 2 items for dir items
@@ -4917,8 +4820,12 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
return PTR_ERR(trans);
btrfs_set_trans_block_group(trans, dir);
+ err = btrfs_find_free_ino(root, &objectid);
+ if (err)
+ goto out_fail;
+
inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
- dentry->d_name.len, dir->i_ino, objectid,
+ dentry->d_name.len, btrfs_ino(dir), objectid,
BTRFS_I(dir)->block_group, S_IFDIR | mode,
&index);
if (IS_ERR(inode)) {
@@ -5041,7 +4948,7 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
u64 bytenr;
u64 extent_start = 0;
u64 extent_end = 0;
- u64 objectid = inode->i_ino;
+ u64 objectid = btrfs_ino(inode);
u32 found_type;
struct btrfs_path *path = NULL;
struct btrfs_root *root = BTRFS_I(inode)->root;
@@ -5069,7 +4976,7 @@ again:
else
goto out;
}
- em = alloc_extent_map(GFP_NOFS);
+ em = alloc_extent_map();
if (!em) {
err = -ENOMEM;
goto out;
@@ -5223,7 +5130,7 @@ again:
kunmap(page);
free_extent_map(em);
em = NULL;
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
trans = btrfs_join_transaction(root, 1);
if (IS_ERR(trans))
return ERR_CAST(trans);
@@ -5249,7 +5156,7 @@ not_found_em:
em->block_start = EXTENT_MAP_HOLE;
set_bit(EXTENT_FLAG_VACANCY, &em->flags);
insert:
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
if (em->start > start || extent_map_end(em) <= start) {
printk(KERN_ERR "Btrfs: bad extent! em: [%llu %llu] passed "
"[%llu %llu]\n", (unsigned long long)em->start,
@@ -5382,7 +5289,7 @@ struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *pag
u64 hole_start = start;
u64 hole_len = len;
- em = alloc_extent_map(GFP_NOFS);
+ em = alloc_extent_map();
if (!em) {
err = -ENOMEM;
goto out;
@@ -5472,6 +5379,9 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
if (IS_ERR(trans))
return ERR_CAST(trans);
+ if (start <= BTRFS_I(inode)->disk_i_size && len < 64 * 1024)
+ btrfs_add_inode_defrag(trans, inode);
+
trans->block_rsv = &root->fs_info->delalloc_block_rsv;
alloc_hint = get_extent_allocation_hint(inode, start, len);
@@ -5483,7 +5393,7 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
}
if (!em) {
- em = alloc_extent_map(GFP_NOFS);
+ em = alloc_extent_map();
if (!em) {
em = ERR_PTR(-ENOMEM);
goto out;
@@ -5549,7 +5459,7 @@ static noinline int can_nocow_odirect(struct btrfs_trans_handle *trans,
if (!path)
return -ENOMEM;
- ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
+ ret = btrfs_lookup_file_extent(trans, root, path, btrfs_ino(inode),
offset, 0);
if (ret < 0)
goto out;
@@ -5566,7 +5476,7 @@ static noinline int can_nocow_odirect(struct btrfs_trans_handle *trans,
ret = 0;
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &key, slot);
- if (key.objectid != inode->i_ino ||
+ if (key.objectid != btrfs_ino(inode) ||
key.type != BTRFS_EXTENT_DATA_KEY) {
/* not our file or wrong item type, must cow */
goto out;
@@ -5600,7 +5510,7 @@ static noinline int can_nocow_odirect(struct btrfs_trans_handle *trans,
* look for other files referencing this extent, if we
* find any we must cow
*/
- if (btrfs_cross_ref_exist(trans, root, inode->i_ino,
+ if (btrfs_cross_ref_exist(trans, root, btrfs_ino(inode),
key.offset - backref_offset, disk_bytenr))
goto out;
@@ -5790,9 +5700,10 @@ static void btrfs_endio_direct_read(struct bio *bio, int err)
flush_dcache_page(bvec->bv_page);
if (csum != *private) {
- printk(KERN_ERR "btrfs csum failed ino %lu off"
+ printk(KERN_ERR "btrfs csum failed ino %llu off"
" %llu csum %u private %u\n",
- inode->i_ino, (unsigned long long)start,
+ (unsigned long long)btrfs_ino(inode),
+ (unsigned long long)start,
csum, *private);
err = -EIO;
}
@@ -5939,9 +5850,9 @@ static void btrfs_end_dio_bio(struct bio *bio, int err)
struct btrfs_dio_private *dip = bio->bi_private;
if (err) {
- printk(KERN_ERR "btrfs direct IO failed ino %lu rw %lu "
+ printk(KERN_ERR "btrfs direct IO failed ino %llu rw %lu "
"sector %#Lx len %u err no %d\n",
- dip->inode->i_ino, bio->bi_rw,
+ (unsigned long long)btrfs_ino(dip->inode), bio->bi_rw,
(unsigned long long)bio->bi_sector, bio->bi_size, err);
dip->errors = 1;
@@ -6782,12 +6693,15 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
ei->ordered_data_close = 0;
ei->orphan_meta_reserved = 0;
ei->dummy_inode = 0;
+ ei->in_defrag = 0;
ei->force_compress = BTRFS_COMPRESS_NONE;
+ ei->delayed_node = NULL;
+
inode = &ei->vfs_inode;
- extent_map_tree_init(&ei->extent_tree, GFP_NOFS);
- extent_io_tree_init(&ei->io_tree, &inode->i_data, GFP_NOFS);
- extent_io_tree_init(&ei->io_failure_tree, &inode->i_data, GFP_NOFS);
+ extent_map_tree_init(&ei->extent_tree);
+ extent_io_tree_init(&ei->io_tree, &inode->i_data);
+ extent_io_tree_init(&ei->io_failure_tree, &inode->i_data);
mutex_init(&ei->log_mutex);
btrfs_ordered_inode_tree_init(&ei->ordered_tree);
INIT_LIST_HEAD(&ei->i_orphan);
@@ -6851,8 +6765,8 @@ void btrfs_destroy_inode(struct inode *inode)
spin_lock(&root->orphan_lock);
if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
- printk(KERN_INFO "BTRFS: inode %lu still on the orphan list\n",
- inode->i_ino);
+ printk(KERN_INFO "BTRFS: inode %llu still on the orphan list\n",
+ (unsigned long long)btrfs_ino(inode));
list_del_init(&BTRFS_I(inode)->i_orphan);
}
spin_unlock(&root->orphan_lock);
@@ -6874,6 +6788,7 @@ void btrfs_destroy_inode(struct inode *inode)
inode_tree_del(inode);
btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
free:
+ btrfs_remove_delayed_node(inode);
call_rcu(&inode->i_rcu, btrfs_i_callback);
}
@@ -6882,7 +6797,7 @@ int btrfs_drop_inode(struct inode *inode)
struct btrfs_root *root = BTRFS_I(inode)->root;
if (btrfs_root_refs(&root->root_item) == 0 &&
- root != root->fs_info->tree_root)
+ !is_free_space_inode(root, inode))
return 1;
else
return generic_drop_inode(inode);
@@ -6991,16 +6906,17 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
u64 index = 0;
u64 root_objectid;
int ret;
+ u64 old_ino = btrfs_ino(old_inode);
- if (new_dir->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
+ if (btrfs_ino(new_dir) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
return -EPERM;
/* we only allow rename subvolume link between subvolumes */
- if (old_inode->i_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
+ if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
return -EXDEV;
- if (old_inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID ||
- (new_inode && new_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID))
+ if (old_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID ||
+ (new_inode && btrfs_ino(new_inode) == BTRFS_FIRST_FREE_OBJECTID))
return -ENOTEMPTY;
if (S_ISDIR(old_inode->i_mode) && new_inode &&
@@ -7016,7 +6932,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
filemap_flush(old_inode->i_mapping);
/* close the racy window with snapshot create/destroy ioctl */
- if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
+ if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
down_read(&root->fs_info->subvol_sem);
/*
* We want to reserve the absolute worst case amount of items. So if
@@ -7041,15 +6957,15 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
if (ret)
goto out_fail;
- if (unlikely(old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) {
+ if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
/* force full log commit if subvolume involved. */
root->fs_info->last_trans_log_full_commit = trans->transid;
} else {
ret = btrfs_insert_inode_ref(trans, dest,
new_dentry->d_name.name,
new_dentry->d_name.len,
- old_inode->i_ino,
- new_dir->i_ino, index);
+ old_ino,
+ btrfs_ino(new_dir), index);
if (ret)
goto out_fail;
/*
@@ -7065,10 +6981,8 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
* make sure the inode gets flushed if it is replacing
* something.
*/
- if (new_inode && new_inode->i_size &&
- old_inode && S_ISREG(old_inode->i_mode)) {
+ if (new_inode && new_inode->i_size && S_ISREG(old_inode->i_mode))
btrfs_add_ordered_operation(trans, root, old_inode);
- }
old_dir->i_ctime = old_dir->i_mtime = ctime;
new_dir->i_ctime = new_dir->i_mtime = ctime;
@@ -7077,7 +6991,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
if (old_dentry->d_parent != new_dentry->d_parent)
btrfs_record_unlink_dir(trans, old_dir, old_inode, 1);
- if (unlikely(old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) {
+ if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
root_objectid = BTRFS_I(old_inode)->root->root_key.objectid;
ret = btrfs_unlink_subvol(trans, root, old_dir, root_objectid,
old_dentry->d_name.name,
@@ -7094,7 +7008,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
if (new_inode) {
new_inode->i_ctime = CURRENT_TIME;
- if (unlikely(new_inode->i_ino ==
+ if (unlikely(btrfs_ino(new_inode) ==
BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
root_objectid = BTRFS_I(new_inode)->location.objectid;
ret = btrfs_unlink_subvol(trans, dest, new_dir,
@@ -7122,7 +7036,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
new_dentry->d_name.len, 0, index);
BUG_ON(ret);
- if (old_inode->i_ino != BTRFS_FIRST_FREE_OBJECTID) {
+ if (old_ino != BTRFS_FIRST_FREE_OBJECTID) {
struct dentry *parent = dget_parent(new_dentry);
btrfs_log_new_name(trans, old_inode, old_dir, parent);
dput(parent);
@@ -7131,7 +7045,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
out_fail:
btrfs_end_transaction_throttle(trans, root);
out_notrans:
- if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
+ if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
up_read(&root->fs_info->subvol_sem);
return ret;
@@ -7185,58 +7099,6 @@ int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
return 0;
}
-int btrfs_start_one_delalloc_inode(struct btrfs_root *root, int delay_iput,
- int sync)
-{
- struct btrfs_inode *binode;
- struct inode *inode = NULL;
-
- spin_lock(&root->fs_info->delalloc_lock);
- while (!list_empty(&root->fs_info->delalloc_inodes)) {
- binode = list_entry(root->fs_info->delalloc_inodes.next,
- struct btrfs_inode, delalloc_inodes);
- inode = igrab(&binode->vfs_inode);
- if (inode) {
- list_move_tail(&binode->delalloc_inodes,
- &root->fs_info->delalloc_inodes);
- break;
- }
-
- list_del_init(&binode->delalloc_inodes);
- cond_resched_lock(&root->fs_info->delalloc_lock);
- }
- spin_unlock(&root->fs_info->delalloc_lock);
-
- if (inode) {
- if (sync) {
- filemap_write_and_wait(inode->i_mapping);
- /*
- * We have to do this because compression doesn't
- * actually set PG_writeback until it submits the pages
- * for IO, which happens in an async thread, so we could
- * race and not actually wait for any writeback pages
- * because they've not been submitted yet. Technically
- * this could still be the case for the ordered stuff
- * since the async thread may not have started to do its
- * work yet. If this becomes the case then we need to
- * figure out a way to make sure that in writepage we
- * wait for any async pages to be submitted before
- * returning so that fdatawait does what its supposed to
- * do.
- */
- btrfs_wait_ordered_range(inode, 0, (u64)-1);
- } else {
- filemap_flush(inode->i_mapping);
- }
- if (delay_iput)
- btrfs_add_delayed_iput(inode);
- else
- iput(inode);
- return 1;
- }
- return 0;
-}
-
static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
const char *symname)
{
@@ -7260,9 +7122,6 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root))
return -ENAMETOOLONG;
- err = btrfs_find_free_objectid(NULL, root, dir->i_ino, &objectid);
- if (err)
- return err;
/*
* 2 items for inode item and ref
* 2 items for dir items
@@ -7274,8 +7133,12 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
btrfs_set_trans_block_group(trans, dir);
+ err = btrfs_find_free_ino(root, &objectid);
+ if (err)
+ goto out_unlock;
+
inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
- dentry->d_name.len, dir->i_ino, objectid,
+ dentry->d_name.len, btrfs_ino(dir), objectid,
BTRFS_I(dir)->block_group, S_IFLNK|S_IRWXUGO,
&index);
if (IS_ERR(inode)) {
@@ -7307,7 +7170,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
path = btrfs_alloc_path();
BUG_ON(!path);
- key.objectid = inode->i_ino;
+ key.objectid = btrfs_ino(inode);
key.offset = 0;
btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
datasize = btrfs_file_extent_calc_inline_size(name_len);
@@ -7315,6 +7178,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
datasize);
if (err) {
drop_inode = 1;
+ btrfs_free_path(path);
goto out_unlock;
}
leaf = path->nodes[0];
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 2616f7ed4799..85e818ce00c5 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -50,6 +50,7 @@
#include "print-tree.h"
#include "volumes.h"
#include "locking.h"
+#include "inode-map.h"
/* Mask out flags that are inappropriate for the given type of inode. */
static inline __u32 btrfs_mask_flags(umode_t mode, __u32 flags)
@@ -281,8 +282,9 @@ static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg)
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- mutex_lock(&fs_info->fs_devices->device_list_mutex);
- list_for_each_entry(device, &fs_info->fs_devices->devices, dev_list) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(device, &fs_info->fs_devices->devices,
+ dev_list) {
if (!device->bdev)
continue;
q = bdev_get_queue(device->bdev);
@@ -292,7 +294,7 @@ static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg)
minlen);
}
}
- mutex_unlock(&fs_info->fs_devices->device_list_mutex);
+ rcu_read_unlock();
if (!num_devices)
return -EOPNOTSUPP;
@@ -329,8 +331,7 @@ static noinline int create_subvol(struct btrfs_root *root,
u64 new_dirid = BTRFS_FIRST_FREE_OBJECTID;
u64 index = 0;
- ret = btrfs_find_free_objectid(NULL, root->fs_info->tree_root,
- 0, &objectid);
+ ret = btrfs_find_free_objectid(root->fs_info->tree_root, &objectid);
if (ret) {
dput(parent);
return ret;
@@ -422,7 +423,7 @@ static noinline int create_subvol(struct btrfs_root *root,
BUG_ON(ret);
ret = btrfs_insert_dir_item(trans, root,
- name, namelen, dir->i_ino, &key,
+ name, namelen, dir, &key,
BTRFS_FT_DIR, index);
if (ret)
goto fail;
@@ -433,7 +434,7 @@ static noinline int create_subvol(struct btrfs_root *root,
ret = btrfs_add_root_ref(trans, root->fs_info->tree_root,
objectid, root->root_key.objectid,
- dir->i_ino, index, name, namelen);
+ btrfs_ino(dir), index, name, namelen);
BUG_ON(ret);
@@ -655,6 +656,106 @@ out_unlock:
return error;
}
+/*
+ * When we're defragging a range, we don't want to kick it off again
+ * if it is really just waiting for delalloc to send it down.
+ * If we find a nice big extent or delalloc range for the bytes in the
+ * file you want to defrag, we return 0 to let you know to skip this
+ * part of the file
+ */
+static int check_defrag_in_cache(struct inode *inode, u64 offset, int thresh)
+{
+ struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
+ struct extent_map *em = NULL;
+ struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
+ u64 end;
+
+ read_lock(&em_tree->lock);
+ em = lookup_extent_mapping(em_tree, offset, PAGE_CACHE_SIZE);
+ read_unlock(&em_tree->lock);
+
+ if (em) {
+ end = extent_map_end(em);
+ free_extent_map(em);
+ if (end - offset > thresh)
+ return 0;
+ }
+ /* if we already have a nice delalloc here, just stop */
+ thresh /= 2;
+ end = count_range_bits(io_tree, &offset, offset + thresh,
+ thresh, EXTENT_DELALLOC, 1);
+ if (end >= thresh)
+ return 0;
+ return 1;
+}
+
+/*
+ * helper function to walk through a file and find extents
+ * newer than a specific transid, and smaller than thresh.
+ *
+ * This is used by the defragging code to find new and small
+ * extents
+ */
+static int find_new_extents(struct btrfs_root *root,
+ struct inode *inode, u64 newer_than,
+ u64 *off, int thresh)
+{
+ struct btrfs_path *path;
+ struct btrfs_key min_key;
+ struct btrfs_key max_key;
+ struct extent_buffer *leaf;
+ struct btrfs_file_extent_item *extent;
+ int type;
+ int ret;
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+
+ min_key.objectid = inode->i_ino;
+ min_key.type = BTRFS_EXTENT_DATA_KEY;
+ min_key.offset = *off;
+
+ max_key.objectid = inode->i_ino;
+ max_key.type = (u8)-1;
+ max_key.offset = (u64)-1;
+
+ path->keep_locks = 1;
+
+ while(1) {
+ ret = btrfs_search_forward(root, &min_key, &max_key,
+ path, 0, newer_than);
+ if (ret != 0)
+ goto none;
+ if (min_key.objectid != inode->i_ino)
+ goto none;
+ if (min_key.type != BTRFS_EXTENT_DATA_KEY)
+ goto none;
+
+ leaf = path->nodes[0];
+ extent = btrfs_item_ptr(leaf, path->slots[0],
+ struct btrfs_file_extent_item);
+
+ type = btrfs_file_extent_type(leaf, extent);
+ if (type == BTRFS_FILE_EXTENT_REG &&
+ btrfs_file_extent_num_bytes(leaf, extent) < thresh &&
+ check_defrag_in_cache(inode, min_key.offset, thresh)) {
+ *off = min_key.offset;
+ btrfs_free_path(path);
+ return 0;
+ }
+
+ if (min_key.offset == (u64)-1)
+ goto none;
+
+ min_key.offset++;
+ btrfs_release_path(path);
+ }
+none:
+ btrfs_free_path(path);
+ return -ENOENT;
+}
+
static int should_defrag_range(struct inode *inode, u64 start, u64 len,
int thresh, u64 *last_len, u64 *skip,
u64 *defrag_end)
@@ -664,10 +765,6 @@ static int should_defrag_range(struct inode *inode, u64 start, u64 len,
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
int ret = 1;
-
- if (thresh == 0)
- thresh = 256 * 1024;
-
/*
* make sure that once we start defragging and extent, we keep on
* defragging it
@@ -726,27 +823,176 @@ static int should_defrag_range(struct inode *inode, u64 start, u64 len,
return ret;
}
-static int btrfs_defrag_file(struct file *file,
- struct btrfs_ioctl_defrag_range_args *range)
+/*
+ * it doesn't do much good to defrag one or two pages
+ * at a time. This pulls in a nice chunk of pages
+ * to COW and defrag.
+ *
+ * It also makes sure the delalloc code has enough
+ * dirty data to avoid making new small extents as part
+ * of the defrag
+ *
+ * It's a good idea to start RA on this range
+ * before calling this.
+ */
+static int cluster_pages_for_defrag(struct inode *inode,
+ struct page **pages,
+ unsigned long start_index,
+ int num_pages)
{
- struct inode *inode = fdentry(file)->d_inode;
- struct btrfs_root *root = BTRFS_I(inode)->root;
- struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
+ unsigned long file_end;
+ u64 isize = i_size_read(inode);
+ u64 page_start;
+ u64 page_end;
+ int ret;
+ int i;
+ int i_done;
struct btrfs_ordered_extent *ordered;
- struct page *page;
+ struct extent_state *cached_state = NULL;
+
+ if (isize == 0)
+ return 0;
+ file_end = (isize - 1) >> PAGE_CACHE_SHIFT;
+
+ ret = btrfs_delalloc_reserve_space(inode,
+ num_pages << PAGE_CACHE_SHIFT);
+ if (ret)
+ return ret;
+again:
+ ret = 0;
+ i_done = 0;
+
+ /* step one, lock all the pages */
+ for (i = 0; i < num_pages; i++) {
+ struct page *page;
+ page = grab_cache_page(inode->i_mapping,
+ start_index + i);
+ if (!page)
+ break;
+
+ if (!PageUptodate(page)) {
+ btrfs_readpage(NULL, page);
+ lock_page(page);
+ if (!PageUptodate(page)) {
+ unlock_page(page);
+ page_cache_release(page);
+ ret = -EIO;
+ break;
+ }
+ }
+ isize = i_size_read(inode);
+ file_end = (isize - 1) >> PAGE_CACHE_SHIFT;
+ if (!isize || page->index > file_end ||
+ page->mapping != inode->i_mapping) {
+ /* whoops, we blew past eof, skip this page */
+ unlock_page(page);
+ page_cache_release(page);
+ break;
+ }
+ pages[i] = page;
+ i_done++;
+ }
+ if (!i_done || ret)
+ goto out;
+
+ if (!(inode->i_sb->s_flags & MS_ACTIVE))
+ goto out;
+
+ /*
+ * so now we have a nice long stream of locked
+ * and up to date pages, lets wait on them
+ */
+ for (i = 0; i < i_done; i++)
+ wait_on_page_writeback(pages[i]);
+
+ page_start = page_offset(pages[0]);
+ page_end = page_offset(pages[i_done - 1]) + PAGE_CACHE_SIZE;
+
+ lock_extent_bits(&BTRFS_I(inode)->io_tree,
+ page_start, page_end - 1, 0, &cached_state,
+ GFP_NOFS);
+ ordered = btrfs_lookup_first_ordered_extent(inode, page_end - 1);
+ if (ordered &&
+ ordered->file_offset + ordered->len > page_start &&
+ ordered->file_offset < page_end) {
+ btrfs_put_ordered_extent(ordered);
+ unlock_extent_cached(&BTRFS_I(inode)->io_tree,
+ page_start, page_end - 1,
+ &cached_state, GFP_NOFS);
+ for (i = 0; i < i_done; i++) {
+ unlock_page(pages[i]);
+ page_cache_release(pages[i]);
+ }
+ btrfs_wait_ordered_range(inode, page_start,
+ page_end - page_start);
+ goto again;
+ }
+ if (ordered)
+ btrfs_put_ordered_extent(ordered);
+
+ clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start,
+ page_end - 1, EXTENT_DIRTY | EXTENT_DELALLOC |
+ EXTENT_DO_ACCOUNTING, 0, 0, &cached_state,
+ GFP_NOFS);
+
+ if (i_done != num_pages) {
+ atomic_inc(&BTRFS_I(inode)->outstanding_extents);
+ btrfs_delalloc_release_space(inode,
+ (num_pages - i_done) << PAGE_CACHE_SHIFT);
+ }
+
+
+ btrfs_set_extent_delalloc(inode, page_start, page_end - 1,
+ &cached_state);
+
+ unlock_extent_cached(&BTRFS_I(inode)->io_tree,
+ page_start, page_end - 1, &cached_state,
+ GFP_NOFS);
+
+ for (i = 0; i < i_done; i++) {
+ clear_page_dirty_for_io(pages[i]);
+ ClearPageChecked(pages[i]);
+ set_page_extent_mapped(pages[i]);
+ set_page_dirty(pages[i]);
+ unlock_page(pages[i]);
+ page_cache_release(pages[i]);
+ }
+ return i_done;
+out:
+ for (i = 0; i < i_done; i++) {
+ unlock_page(pages[i]);
+ page_cache_release(pages[i]);
+ }
+ btrfs_delalloc_release_space(inode, num_pages << PAGE_CACHE_SHIFT);
+ return ret;
+
+}
+
+int btrfs_defrag_file(struct inode *inode, struct file *file,
+ struct btrfs_ioctl_defrag_range_args *range,
+ u64 newer_than, unsigned long max_to_defrag)
+{
+ struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_super_block *disk_super;
+ struct file_ra_state *ra = NULL;
unsigned long last_index;
- unsigned long ra_pages = root->fs_info->bdi.ra_pages;
- unsigned long total_read = 0;
u64 features;
- u64 page_start;
- u64 page_end;
u64 last_len = 0;
u64 skip = 0;
u64 defrag_end = 0;
+ u64 newer_off = range->start;
+ int newer_left = 0;
unsigned long i;
int ret;
+ int defrag_count = 0;
int compress_type = BTRFS_COMPRESS_ZLIB;
+ int extent_thresh = range->extent_thresh;
+ int newer_cluster = (256 * 1024) >> PAGE_CACHE_SHIFT;
+ u64 new_align = ~((u64)128 * 1024 - 1);
+ struct page **pages = NULL;
+
+ if (extent_thresh == 0)
+ extent_thresh = 256 * 1024;
if (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS) {
if (range->compress_type > BTRFS_COMPRESS_TYPES)
@@ -758,6 +1004,27 @@ static int btrfs_defrag_file(struct file *file,
if (inode->i_size == 0)
return 0;
+ /*
+ * if we were not given a file, allocate a readahead
+ * context
+ */
+ if (!file) {
+ ra = kzalloc(sizeof(*ra), GFP_NOFS);
+ if (!ra)
+ return -ENOMEM;
+ file_ra_state_init(ra, inode->i_mapping);
+ } else {
+ ra = &file->f_ra;
+ }
+
+ pages = kmalloc(sizeof(struct page *) * newer_cluster,
+ GFP_NOFS);
+ if (!pages) {
+ ret = -ENOMEM;
+ goto out_ra;
+ }
+
+ /* find the last page to defrag */
if (range->start + range->len > range->start) {
last_index = min_t(u64, inode->i_size - 1,
range->start + range->len - 1) >> PAGE_CACHE_SHIFT;
@@ -765,11 +1032,37 @@ static int btrfs_defrag_file(struct file *file,
last_index = (inode->i_size - 1) >> PAGE_CACHE_SHIFT;
}
- i = range->start >> PAGE_CACHE_SHIFT;
- while (i <= last_index) {
- if (!should_defrag_range(inode, (u64)i << PAGE_CACHE_SHIFT,
+ if (newer_than) {
+ ret = find_new_extents(root, inode, newer_than,
+ &newer_off, 64 * 1024);
+ if (!ret) {
+ range->start = newer_off;
+ /*
+ * we always align our defrag to help keep
+ * the extents in the file evenly spaced
+ */
+ i = (newer_off & new_align) >> PAGE_CACHE_SHIFT;
+ newer_left = newer_cluster;
+ } else
+ goto out_ra;
+ } else {
+ i = range->start >> PAGE_CACHE_SHIFT;
+ }
+ if (!max_to_defrag)
+ max_to_defrag = last_index - 1;
+
+ while (i <= last_index && defrag_count < max_to_defrag) {
+ /*
+ * make sure we stop running if someone unmounts
+ * the FS
+ */
+ if (!(inode->i_sb->s_flags & MS_ACTIVE))
+ break;
+
+ if (!newer_than &&
+ !should_defrag_range(inode, (u64)i << PAGE_CACHE_SHIFT,
PAGE_CACHE_SIZE,
- range->extent_thresh,
+ extent_thresh,
&last_len, &skip,
&defrag_end)) {
unsigned long next;
@@ -781,92 +1074,39 @@ static int btrfs_defrag_file(struct file *file,
i = max(i + 1, next);
continue;
}
-
- if (total_read % ra_pages == 0) {
- btrfs_force_ra(inode->i_mapping, &file->f_ra, file, i,
- min(last_index, i + ra_pages - 1));
- }
- total_read++;
- mutex_lock(&inode->i_mutex);
if (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS)
BTRFS_I(inode)->force_compress = compress_type;
- ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
- if (ret)
- goto err_unlock;
-again:
- if (inode->i_size == 0 ||
- i > ((inode->i_size - 1) >> PAGE_CACHE_SHIFT)) {
- ret = 0;
- goto err_reservations;
- }
+ btrfs_force_ra(inode->i_mapping, ra, file, i, newer_cluster);
- page = grab_cache_page(inode->i_mapping, i);
- if (!page) {
- ret = -ENOMEM;
- goto err_reservations;
- }
-
- if (!PageUptodate(page)) {
- btrfs_readpage(NULL, page);
- lock_page(page);
- if (!PageUptodate(page)) {
- unlock_page(page);
- page_cache_release(page);
- ret = -EIO;
- goto err_reservations;
- }
- }
-
- if (page->mapping != inode->i_mapping) {
- unlock_page(page);
- page_cache_release(page);
- goto again;
- }
+ ret = cluster_pages_for_defrag(inode, pages, i, newer_cluster);
+ if (ret < 0)
+ goto out_ra;
- wait_on_page_writeback(page);
+ defrag_count += ret;
+ balance_dirty_pages_ratelimited_nr(inode->i_mapping, ret);
+ i += ret;
- if (PageDirty(page)) {
- btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
- goto loop_unlock;
- }
-
- page_start = (u64)page->index << PAGE_CACHE_SHIFT;
- page_end = page_start + PAGE_CACHE_SIZE - 1;
- lock_extent(io_tree, page_start, page_end, GFP_NOFS);
+ if (newer_than) {
+ if (newer_off == (u64)-1)
+ break;
- ordered = btrfs_lookup_ordered_extent(inode, page_start);
- if (ordered) {
- unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
- unlock_page(page);
- page_cache_release(page);
- btrfs_start_ordered_extent(inode, ordered, 1);
- btrfs_put_ordered_extent(ordered);
- goto again;
+ newer_off = max(newer_off + 1,
+ (u64)i << PAGE_CACHE_SHIFT);
+
+ ret = find_new_extents(root, inode,
+ newer_than, &newer_off,
+ 64 * 1024);
+ if (!ret) {
+ range->start = newer_off;
+ i = (newer_off & new_align) >> PAGE_CACHE_SHIFT;
+ newer_left = newer_cluster;
+ } else {
+ break;
+ }
+ } else {
+ i++;
}
- set_page_extent_mapped(page);
-
- /*
- * this makes sure page_mkwrite is called on the
- * page if it is dirtied again later
- */
- clear_page_dirty_for_io(page);
- clear_extent_bits(&BTRFS_I(inode)->io_tree, page_start,
- page_end, EXTENT_DIRTY | EXTENT_DELALLOC |
- EXTENT_DO_ACCOUNTING, GFP_NOFS);
-
- btrfs_set_extent_delalloc(inode, page_start, page_end, NULL);
- ClearPageChecked(page);
- set_page_dirty(page);
- unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
-
-loop_unlock:
- unlock_page(page);
- page_cache_release(page);
- mutex_unlock(&inode->i_mutex);
-
- balance_dirty_pages_ratelimited_nr(inode->i_mapping, 1);
- i++;
}
if ((range->flags & BTRFS_DEFRAG_RANGE_START_IO))
@@ -898,12 +1138,14 @@ loop_unlock:
btrfs_set_super_incompat_flags(disk_super, features);
}
- return 0;
+ if (!file)
+ kfree(ra);
+ return defrag_count;
-err_reservations:
- btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
-err_unlock:
- mutex_unlock(&inode->i_mutex);
+out_ra:
+ if (!file)
+ kfree(ra);
+ kfree(pages);
return ret;
}
@@ -1129,7 +1371,7 @@ static noinline int btrfs_ioctl_subvol_getflags(struct file *file,
int ret = 0;
u64 flags = 0;
- if (inode->i_ino != BTRFS_FIRST_FREE_OBJECTID)
+ if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID)
return -EINVAL;
down_read(&root->fs_info->subvol_sem);
@@ -1156,7 +1398,7 @@ static noinline int btrfs_ioctl_subvol_setflags(struct file *file,
if (root->fs_info->sb->s_flags & MS_RDONLY)
return -EROFS;
- if (inode->i_ino != BTRFS_FIRST_FREE_OBJECTID)
+ if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID)
return -EINVAL;
if (copy_from_user(&flags, arg, sizeof(flags)))
@@ -1279,7 +1521,6 @@ static noinline int copy_to_sk(struct btrfs_root *root,
int nritems;
int i;
int slot;
- int found = 0;
int ret = 0;
leaf = path->nodes[0];
@@ -1326,7 +1567,7 @@ static noinline int copy_to_sk(struct btrfs_root *root,
item_off, item_len);
*sk_offset += item_len;
}
- found++;
+ (*num_found)++;
if (*num_found >= sk->nr_items)
break;
@@ -1345,7 +1586,6 @@ advance_key:
} else
ret = 1;
overflow:
- *num_found += found;
return ret;
}
@@ -1402,7 +1642,7 @@ static noinline int search_ioctl(struct inode *inode,
}
ret = copy_to_sk(root, path, &key, sk, args->buf,
&sk_offset, &num_found);
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
if (ret || num_found >= sk->nr_items)
break;
@@ -1509,7 +1749,7 @@ static noinline int btrfs_search_path_in_tree(struct btrfs_fs_info *info,
if (key.offset == BTRFS_FIRST_FREE_OBJECTID)
break;
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
key.objectid = key.offset;
key.offset = (u64)-1;
dirid = key.objectid;
@@ -1639,7 +1879,7 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
goto out_dput;
}
- if (inode->i_ino != BTRFS_FIRST_FREE_OBJECTID) {
+ if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID) {
err = -EINVAL;
goto out_dput;
}
@@ -1757,7 +1997,10 @@ static int btrfs_ioctl_defrag(struct file *file, void __user *argp)
/* the rest are all set to zero by kzalloc */
range->len = (u64)-1;
}
- ret = btrfs_defrag_file(file, range);
+ ret = btrfs_defrag_file(fdentry(file)->d_inode, file,
+ range, 0, 0);
+ if (ret > 0)
+ ret = 0;
kfree(range);
break;
default:
@@ -1809,6 +2052,75 @@ static long btrfs_ioctl_rm_dev(struct btrfs_root *root, void __user *arg)
return ret;
}
+static long btrfs_ioctl_fs_info(struct btrfs_root *root, void __user *arg)
+{
+ struct btrfs_ioctl_fs_info_args fi_args;
+ struct btrfs_device *device;
+ struct btrfs_device *next;
+ struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ fi_args.num_devices = fs_devices->num_devices;
+ fi_args.max_id = 0;
+ memcpy(&fi_args.fsid, root->fs_info->fsid, sizeof(fi_args.fsid));
+
+ mutex_lock(&fs_devices->device_list_mutex);
+ list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
+ if (device->devid > fi_args.max_id)
+ fi_args.max_id = device->devid;
+ }
+ mutex_unlock(&fs_devices->device_list_mutex);
+
+ if (copy_to_user(arg, &fi_args, sizeof(fi_args)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static long btrfs_ioctl_dev_info(struct btrfs_root *root, void __user *arg)
+{
+ struct btrfs_ioctl_dev_info_args *di_args;
+ struct btrfs_device *dev;
+ struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
+ int ret = 0;
+ char *s_uuid = NULL;
+ char empty_uuid[BTRFS_UUID_SIZE] = {0};
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ di_args = memdup_user(arg, sizeof(*di_args));
+ if (IS_ERR(di_args))
+ return PTR_ERR(di_args);
+
+ if (memcmp(empty_uuid, di_args->uuid, BTRFS_UUID_SIZE) != 0)
+ s_uuid = di_args->uuid;
+
+ mutex_lock(&fs_devices->device_list_mutex);
+ dev = btrfs_find_device(root, di_args->devid, s_uuid, NULL);
+ mutex_unlock(&fs_devices->device_list_mutex);
+
+ if (!dev) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ di_args->devid = dev->devid;
+ di_args->bytes_used = dev->bytes_used;
+ di_args->total_bytes = dev->total_bytes;
+ memcpy(di_args->uuid, dev->uuid, sizeof(di_args->uuid));
+ strncpy(di_args->path, dev->name, sizeof(di_args->path));
+
+out:
+ if (ret == 0 && copy_to_user(arg, di_args, sizeof(*di_args)))
+ ret = -EFAULT;
+
+ kfree(di_args);
+ return ret;
+}
+
static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
u64 off, u64 olen, u64 destoff)
{
@@ -1925,7 +2237,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
}
/* clone data */
- key.objectid = src->i_ino;
+ key.objectid = btrfs_ino(src);
key.type = BTRFS_EXTENT_DATA_KEY;
key.offset = 0;
@@ -1952,7 +2264,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
btrfs_item_key_to_cpu(leaf, &key, slot);
if (btrfs_key_type(&key) > BTRFS_EXTENT_DATA_KEY ||
- key.objectid != src->i_ino)
+ key.objectid != btrfs_ino(src))
break;
if (btrfs_key_type(&key) == BTRFS_EXTENT_DATA_KEY) {
@@ -1988,14 +2300,14 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
datal = btrfs_file_extent_ram_bytes(leaf,
extent);
}
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
if (key.offset + datal <= off ||
key.offset >= off+len)
goto next;
memcpy(&new_key, &key, sizeof(new_key));
- new_key.objectid = inode->i_ino;
+ new_key.objectid = btrfs_ino(inode);
if (off <= key.offset)
new_key.offset = key.offset + destoff - off;
else
@@ -2049,7 +2361,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
ret = btrfs_inc_extent_ref(trans, root,
disko, diskl, 0,
root->root_key.objectid,
- inode->i_ino,
+ btrfs_ino(inode),
new_key.offset - datao);
BUG_ON(ret);
}
@@ -2098,7 +2410,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
}
btrfs_mark_buffer_dirty(leaf);
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
inode->i_mtime = inode->i_ctime = CURRENT_TIME;
@@ -2119,12 +2431,12 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
btrfs_end_transaction(trans, root);
}
next:
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
key.offset++;
}
ret = 0;
out:
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
unlock_extent(&BTRFS_I(src)->io_tree, off, off+len, GFP_NOFS);
out_unlock:
mutex_unlock(&src->i_mutex);
@@ -2471,6 +2783,58 @@ static noinline long btrfs_ioctl_wait_sync(struct file *file, void __user *argp)
return btrfs_wait_for_commit(root, transid);
}
+static long btrfs_ioctl_scrub(struct btrfs_root *root, void __user *arg)
+{
+ int ret;
+ struct btrfs_ioctl_scrub_args *sa;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ sa = memdup_user(arg, sizeof(*sa));
+ if (IS_ERR(sa))
+ return PTR_ERR(sa);
+
+ ret = btrfs_scrub_dev(root, sa->devid, sa->start, sa->end,
+ &sa->progress, sa->flags & BTRFS_SCRUB_READONLY);
+
+ if (copy_to_user(arg, sa, sizeof(*sa)))
+ ret = -EFAULT;
+
+ kfree(sa);
+ return ret;
+}
+
+static long btrfs_ioctl_scrub_cancel(struct btrfs_root *root, void __user *arg)
+{
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ return btrfs_scrub_cancel(root);
+}
+
+static long btrfs_ioctl_scrub_progress(struct btrfs_root *root,
+ void __user *arg)
+{
+ struct btrfs_ioctl_scrub_args *sa;
+ int ret;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ sa = memdup_user(arg, sizeof(*sa));
+ if (IS_ERR(sa))
+ return PTR_ERR(sa);
+
+ ret = btrfs_scrub_progress(root, sa->devid, &sa->progress);
+
+ if (copy_to_user(arg, sa, sizeof(*sa)))
+ ret = -EFAULT;
+
+ kfree(sa);
+ return ret;
+}
+
long btrfs_ioctl(struct file *file, unsigned int
cmd, unsigned long arg)
{
@@ -2510,6 +2874,10 @@ long btrfs_ioctl(struct file *file, unsigned int
return btrfs_ioctl_add_dev(root, argp);
case BTRFS_IOC_RM_DEV:
return btrfs_ioctl_rm_dev(root, argp);
+ case BTRFS_IOC_FS_INFO:
+ return btrfs_ioctl_fs_info(root, argp);
+ case BTRFS_IOC_DEV_INFO:
+ return btrfs_ioctl_dev_info(root, argp);
case BTRFS_IOC_BALANCE:
return btrfs_balance(root->fs_info->dev_root);
case BTRFS_IOC_CLONE:
@@ -2533,6 +2901,12 @@ long btrfs_ioctl(struct file *file, unsigned int
return btrfs_ioctl_start_sync(file, argp);
case BTRFS_IOC_WAIT_SYNC:
return btrfs_ioctl_wait_sync(file, argp);
+ case BTRFS_IOC_SCRUB:
+ return btrfs_ioctl_scrub(root, argp);
+ case BTRFS_IOC_SCRUB_CANCEL:
+ return btrfs_ioctl_scrub_cancel(root, argp);
+ case BTRFS_IOC_SCRUB_PROGRESS:
+ return btrfs_ioctl_scrub_progress(root, argp);
}
return -ENOTTY;
diff --git a/fs/btrfs/ioctl.h b/fs/btrfs/ioctl.h
index 8fb382167b13..ad1ea789fcb4 100644
--- a/fs/btrfs/ioctl.h
+++ b/fs/btrfs/ioctl.h
@@ -32,6 +32,8 @@ struct btrfs_ioctl_vol_args {
#define BTRFS_SUBVOL_CREATE_ASYNC (1ULL << 0)
#define BTRFS_SUBVOL_RDONLY (1ULL << 1)
+#define BTRFS_FSID_SIZE 16
+#define BTRFS_UUID_SIZE 16
#define BTRFS_SUBVOL_NAME_MAX 4039
struct btrfs_ioctl_vol_args_v2 {
@@ -42,6 +44,71 @@ struct btrfs_ioctl_vol_args_v2 {
char name[BTRFS_SUBVOL_NAME_MAX + 1];
};
+/*
+ * structure to report errors and progress to userspace, either as a
+ * result of a finished scrub, a canceled scrub or a progress inquiry
+ */
+struct btrfs_scrub_progress {
+ __u64 data_extents_scrubbed; /* # of data extents scrubbed */
+ __u64 tree_extents_scrubbed; /* # of tree extents scrubbed */
+ __u64 data_bytes_scrubbed; /* # of data bytes scrubbed */
+ __u64 tree_bytes_scrubbed; /* # of tree bytes scrubbed */
+ __u64 read_errors; /* # of read errors encountered (EIO) */
+ __u64 csum_errors; /* # of failed csum checks */
+ __u64 verify_errors; /* # of occurences, where the metadata
+ * of a tree block did not match the
+ * expected values, like generation or
+ * logical */
+ __u64 no_csum; /* # of 4k data block for which no csum
+ * is present, probably the result of
+ * data written with nodatasum */
+ __u64 csum_discards; /* # of csum for which no data was found
+ * in the extent tree. */
+ __u64 super_errors; /* # of bad super blocks encountered */
+ __u64 malloc_errors; /* # of internal kmalloc errors. These
+ * will likely cause an incomplete
+ * scrub */
+ __u64 uncorrectable_errors; /* # of errors where either no intact
+ * copy was found or the writeback
+ * failed */
+ __u64 corrected_errors; /* # of errors corrected */
+ __u64 last_physical; /* last physical address scrubbed. In
+ * case a scrub was aborted, this can
+ * be used to restart the scrub */
+ __u64 unverified_errors; /* # of occurences where a read for a
+ * full (64k) bio failed, but the re-
+ * check succeeded for each 4k piece.
+ * Intermittent error. */
+};
+
+#define BTRFS_SCRUB_READONLY 1
+struct btrfs_ioctl_scrub_args {
+ __u64 devid; /* in */
+ __u64 start; /* in */
+ __u64 end; /* in */
+ __u64 flags; /* in */
+ struct btrfs_scrub_progress progress; /* out */
+ /* pad to 1k */
+ __u64 unused[(1024-32-sizeof(struct btrfs_scrub_progress))/8];
+};
+
+#define BTRFS_DEVICE_PATH_NAME_MAX 1024
+struct btrfs_ioctl_dev_info_args {
+ __u64 devid; /* in/out */
+ __u8 uuid[BTRFS_UUID_SIZE]; /* in/out */
+ __u64 bytes_used; /* out */
+ __u64 total_bytes; /* out */
+ __u64 unused[379]; /* pad to 4k */
+ __u8 path[BTRFS_DEVICE_PATH_NAME_MAX]; /* out */
+};
+
+struct btrfs_ioctl_fs_info_args {
+ __u64 max_id; /* out */
+ __u64 num_devices; /* out */
+ __u8 fsid[BTRFS_FSID_SIZE]; /* out */
+ __u64 reserved[124]; /* pad to 1k */
+};
+
#define BTRFS_INO_LOOKUP_PATH_MAX 4080
struct btrfs_ioctl_ino_lookup_args {
__u64 treeid;
@@ -114,37 +181,6 @@ struct btrfs_ioctl_clone_range_args {
#define BTRFS_DEFRAG_RANGE_COMPRESS 1
#define BTRFS_DEFRAG_RANGE_START_IO 2
-struct btrfs_ioctl_defrag_range_args {
- /* start of the defrag operation */
- __u64 start;
-
- /* number of bytes to defrag, use (u64)-1 to say all */
- __u64 len;
-
- /*
- * flags for the operation, which can include turning
- * on compression for this one defrag
- */
- __u64 flags;
-
- /*
- * any extent bigger than this will be considered
- * already defragged. Use 0 to take the kernel default
- * Use 1 to say every single extent must be rewritten
- */
- __u32 extent_thresh;
-
- /*
- * which compression method to use if turning on compression
- * for this defrag operation. If unspecified, zlib will
- * be used
- */
- __u32 compress_type;
-
- /* spare for later */
- __u32 unused[4];
-};
-
struct btrfs_ioctl_space_info {
__u64 flags;
__u64 total_bytes;
@@ -203,4 +239,13 @@ struct btrfs_ioctl_space_args {
struct btrfs_ioctl_vol_args_v2)
#define BTRFS_IOC_SUBVOL_GETFLAGS _IOW(BTRFS_IOCTL_MAGIC, 25, __u64)
#define BTRFS_IOC_SUBVOL_SETFLAGS _IOW(BTRFS_IOCTL_MAGIC, 26, __u64)
+#define BTRFS_IOC_SCRUB _IOWR(BTRFS_IOCTL_MAGIC, 27, \
+ struct btrfs_ioctl_scrub_args)
+#define BTRFS_IOC_SCRUB_CANCEL _IO(BTRFS_IOCTL_MAGIC, 28)
+#define BTRFS_IOC_SCRUB_PROGRESS _IOWR(BTRFS_IOCTL_MAGIC, 29, \
+ struct btrfs_ioctl_scrub_args)
+#define BTRFS_IOC_DEV_INFO _IOWR(BTRFS_IOCTL_MAGIC, 30, \
+ struct btrfs_ioctl_dev_info_args)
+#define BTRFS_IOC_FS_INFO _IOR(BTRFS_IOCTL_MAGIC, 31, \
+ struct btrfs_ioctl_fs_info_args)
#endif
diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c
index 6151f2ea38bb..66fa43dc3f0f 100644
--- a/fs/btrfs/locking.c
+++ b/fs/btrfs/locking.c
@@ -185,31 +185,6 @@ sleep:
return 0;
}
-/*
- * Very quick trylock, this does not spin or schedule. It returns
- * 1 with the spinlock held if it was able to take the lock, or it
- * returns zero if it was unable to take the lock.
- *
- * After this call, scheduling is not safe without first calling
- * btrfs_set_lock_blocking()
- */
-int btrfs_try_tree_lock(struct extent_buffer *eb)
-{
- if (spin_trylock(&eb->lock)) {
- if (test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags)) {
- /*
- * we've got the spinlock, but the real owner is
- * blocking. Drop the spinlock and return failure
- */
- spin_unlock(&eb->lock);
- return 0;
- }
- return 1;
- }
- /* someone else has the spinlock giveup */
- return 0;
-}
-
int btrfs_tree_unlock(struct extent_buffer *eb)
{
/*
diff --git a/fs/btrfs/locking.h b/fs/btrfs/locking.h
index 6c4ce457168c..5c33a560a2f1 100644
--- a/fs/btrfs/locking.h
+++ b/fs/btrfs/locking.h
@@ -21,8 +21,6 @@
int btrfs_tree_lock(struct extent_buffer *eb);
int btrfs_tree_unlock(struct extent_buffer *eb);
-
-int btrfs_try_tree_lock(struct extent_buffer *eb);
int btrfs_try_spin_lock(struct extent_buffer *eb);
void btrfs_set_lock_blocking(struct extent_buffer *eb);
diff --git a/fs/btrfs/ref-cache.c b/fs/btrfs/ref-cache.c
index a97314cf6bd6..82d569cb6267 100644
--- a/fs/btrfs/ref-cache.c
+++ b/fs/btrfs/ref-cache.c
@@ -23,56 +23,6 @@
#include "ref-cache.h"
#include "transaction.h"
-/*
- * leaf refs are used to cache the information about which extents
- * a given leaf has references on. This allows us to process that leaf
- * in btrfs_drop_snapshot without needing to read it back from disk.
- */
-
-/*
- * kmalloc a leaf reference struct and update the counters for the
- * total ref cache size
- */
-struct btrfs_leaf_ref *btrfs_alloc_leaf_ref(struct btrfs_root *root,
- int nr_extents)
-{
- struct btrfs_leaf_ref *ref;
- size_t size = btrfs_leaf_ref_size(nr_extents);
-
- ref = kmalloc(size, GFP_NOFS);
- if (ref) {
- spin_lock(&root->fs_info->ref_cache_lock);
- root->fs_info->total_ref_cache_size += size;
- spin_unlock(&root->fs_info->ref_cache_lock);
-
- memset(ref, 0, sizeof(*ref));
- atomic_set(&ref->usage, 1);
- INIT_LIST_HEAD(&ref->list);
- }
- return ref;
-}
-
-/*
- * free a leaf reference struct and update the counters for the
- * total ref cache size
- */
-void btrfs_free_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref)
-{
- if (!ref)
- return;
- WARN_ON(atomic_read(&ref->usage) == 0);
- if (atomic_dec_and_test(&ref->usage)) {
- size_t size = btrfs_leaf_ref_size(ref->nritems);
-
- BUG_ON(ref->in_tree);
- kfree(ref);
-
- spin_lock(&root->fs_info->ref_cache_lock);
- root->fs_info->total_ref_cache_size -= size;
- spin_unlock(&root->fs_info->ref_cache_lock);
- }
-}
-
static struct rb_node *tree_insert(struct rb_root *root, u64 bytenr,
struct rb_node *node)
{
@@ -116,117 +66,3 @@ static struct rb_node *tree_search(struct rb_root *root, u64 bytenr)
}
return NULL;
}
-
-int btrfs_remove_leaf_refs(struct btrfs_root *root, u64 max_root_gen,
- int shared)
-{
- struct btrfs_leaf_ref *ref = NULL;
- struct btrfs_leaf_ref_tree *tree = root->ref_tree;
-
- if (shared)
- tree = &root->fs_info->shared_ref_tree;
- if (!tree)
- return 0;
-
- spin_lock(&tree->lock);
- while (!list_empty(&tree->list)) {
- ref = list_entry(tree->list.next, struct btrfs_leaf_ref, list);
- BUG_ON(ref->tree != tree);
- if (ref->root_gen > max_root_gen)
- break;
- if (!xchg(&ref->in_tree, 0)) {
- cond_resched_lock(&tree->lock);
- continue;
- }
-
- rb_erase(&ref->rb_node, &tree->root);
- list_del_init(&ref->list);
-
- spin_unlock(&tree->lock);
- btrfs_free_leaf_ref(root, ref);
- cond_resched();
- spin_lock(&tree->lock);
- }
- spin_unlock(&tree->lock);
- return 0;
-}
-
-/*
- * find the leaf ref for a given extent. This returns the ref struct with
- * a usage reference incremented
- */
-struct btrfs_leaf_ref *btrfs_lookup_leaf_ref(struct btrfs_root *root,
- u64 bytenr)
-{
- struct rb_node *rb;
- struct btrfs_leaf_ref *ref = NULL;
- struct btrfs_leaf_ref_tree *tree = root->ref_tree;
-again:
- if (tree) {
- spin_lock(&tree->lock);
- rb = tree_search(&tree->root, bytenr);
- if (rb)
- ref = rb_entry(rb, struct btrfs_leaf_ref, rb_node);
- if (ref)
- atomic_inc(&ref->usage);
- spin_unlock(&tree->lock);
- if (ref)
- return ref;
- }
- if (tree != &root->fs_info->shared_ref_tree) {
- tree = &root->fs_info->shared_ref_tree;
- goto again;
- }
- return NULL;
-}
-
-/*
- * add a fully filled in leaf ref struct
- * remove all the refs older than a given root generation
- */
-int btrfs_add_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref,
- int shared)
-{
- int ret = 0;
- struct rb_node *rb;
- struct btrfs_leaf_ref_tree *tree = root->ref_tree;
-
- if (shared)
- tree = &root->fs_info->shared_ref_tree;
-
- spin_lock(&tree->lock);
- rb = tree_insert(&tree->root, ref->bytenr, &ref->rb_node);
- if (rb) {
- ret = -EEXIST;
- } else {
- atomic_inc(&ref->usage);
- ref->tree = tree;
- ref->in_tree = 1;
- list_add_tail(&ref->list, &tree->list);
- }
- spin_unlock(&tree->lock);
- return ret;
-}
-
-/*
- * remove a single leaf ref from the tree. This drops the ref held by the tree
- * only
- */
-int btrfs_remove_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref)
-{
- struct btrfs_leaf_ref_tree *tree;
-
- if (!xchg(&ref->in_tree, 0))
- return 0;
-
- tree = ref->tree;
- spin_lock(&tree->lock);
-
- rb_erase(&ref->rb_node, &tree->root);
- list_del_init(&ref->list);
-
- spin_unlock(&tree->lock);
-
- btrfs_free_leaf_ref(root, ref);
- return 0;
-}
diff --git a/fs/btrfs/ref-cache.h b/fs/btrfs/ref-cache.h
index e2a55cb2072b..24f7001f6387 100644
--- a/fs/btrfs/ref-cache.h
+++ b/fs/btrfs/ref-cache.h
@@ -49,28 +49,4 @@ static inline size_t btrfs_leaf_ref_size(int nr_extents)
return sizeof(struct btrfs_leaf_ref) +
sizeof(struct btrfs_extent_info) * nr_extents;
}
-
-static inline void btrfs_leaf_ref_tree_init(struct btrfs_leaf_ref_tree *tree)
-{
- tree->root = RB_ROOT;
- INIT_LIST_HEAD(&tree->list);
- spin_lock_init(&tree->lock);
-}
-
-static inline int btrfs_leaf_ref_tree_empty(struct btrfs_leaf_ref_tree *tree)
-{
- return RB_EMPTY_ROOT(&tree->root);
-}
-
-void btrfs_leaf_ref_tree_init(struct btrfs_leaf_ref_tree *tree);
-struct btrfs_leaf_ref *btrfs_alloc_leaf_ref(struct btrfs_root *root,
- int nr_extents);
-void btrfs_free_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref);
-struct btrfs_leaf_ref *btrfs_lookup_leaf_ref(struct btrfs_root *root,
- u64 bytenr);
-int btrfs_add_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref,
- int shared);
-int btrfs_remove_leaf_refs(struct btrfs_root *root, u64 max_root_gen,
- int shared);
-int btrfs_remove_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref);
#endif
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index f340f7c99d09..ca38eca70af0 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -30,6 +30,7 @@
#include "btrfs_inode.h"
#include "async-thread.h"
#include "free-space-cache.h"
+#include "inode-map.h"
/*
* backref_node, mapping_node and tree_block start with this
@@ -507,6 +508,7 @@ static int update_backref_cache(struct btrfs_trans_handle *trans,
return 1;
}
+
static int should_ignore_root(struct btrfs_root *root)
{
struct btrfs_root *reloc_root;
@@ -529,7 +531,6 @@ static int should_ignore_root(struct btrfs_root *root)
*/
return 1;
}
-
/*
* find reloc tree by address of tree root
*/
@@ -961,7 +962,7 @@ again:
lower = upper;
upper = NULL;
}
- btrfs_release_path(root, path2);
+ btrfs_release_path(path2);
next:
if (ptr < end) {
ptr += btrfs_extent_inline_ref_size(key.type);
@@ -974,7 +975,7 @@ next:
if (ptr >= end)
path1->slots[0]++;
}
- btrfs_release_path(rc->extent_root, path1);
+ btrfs_release_path(path1);
cur->checked = 1;
WARN_ON(exist);
@@ -1409,9 +1410,9 @@ again:
prev = node;
entry = rb_entry(node, struct btrfs_inode, rb_node);
- if (objectid < entry->vfs_inode.i_ino)
+ if (objectid < btrfs_ino(&entry->vfs_inode))
node = node->rb_left;
- else if (objectid > entry->vfs_inode.i_ino)
+ else if (objectid > btrfs_ino(&entry->vfs_inode))
node = node->rb_right;
else
break;
@@ -1419,7 +1420,7 @@ again:
if (!node) {
while (prev) {
entry = rb_entry(prev, struct btrfs_inode, rb_node);
- if (objectid <= entry->vfs_inode.i_ino) {
+ if (objectid <= btrfs_ino(&entry->vfs_inode)) {
node = prev;
break;
}
@@ -1434,7 +1435,7 @@ again:
return inode;
}
- objectid = entry->vfs_inode.i_ino + 1;
+ objectid = btrfs_ino(&entry->vfs_inode) + 1;
if (cond_resched_lock(&root->inode_lock))
goto again;
@@ -1470,7 +1471,7 @@ static int get_new_location(struct inode *reloc_inode, u64 *new_bytenr,
return -ENOMEM;
bytenr -= BTRFS_I(reloc_inode)->index_cnt;
- ret = btrfs_lookup_file_extent(NULL, root, path, reloc_inode->i_ino,
+ ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(reloc_inode),
bytenr, 0);
if (ret < 0)
goto out;
@@ -1558,11 +1559,11 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
if (first) {
inode = find_next_inode(root, key.objectid);
first = 0;
- } else if (inode && inode->i_ino < key.objectid) {
+ } else if (inode && btrfs_ino(inode) < key.objectid) {
btrfs_add_delayed_iput(inode);
inode = find_next_inode(root, key.objectid);
}
- if (inode && inode->i_ino == key.objectid) {
+ if (inode && btrfs_ino(inode) == key.objectid) {
end = key.offset +
btrfs_file_extent_num_bytes(leaf, fi);
WARN_ON(!IS_ALIGNED(key.offset,
@@ -1749,7 +1750,7 @@ again:
btrfs_node_key_to_cpu(path->nodes[level], &key,
path->slots[level]);
- btrfs_release_path(src, path);
+ btrfs_release_path(path);
path->lowest_level = level;
ret = btrfs_search_slot(trans, src, &key, path, 0, 1);
@@ -1893,6 +1894,7 @@ static int invalidate_extent_cache(struct btrfs_root *root,
struct inode *inode = NULL;
u64 objectid;
u64 start, end;
+ u64 ino;
objectid = min_key->objectid;
while (1) {
@@ -1905,17 +1907,18 @@ static int invalidate_extent_cache(struct btrfs_root *root,
inode = find_next_inode(root, objectid);
if (!inode)
break;
+ ino = btrfs_ino(inode);
- if (inode->i_ino > max_key->objectid) {
+ if (ino > max_key->objectid) {
iput(inode);
break;
}
- objectid = inode->i_ino + 1;
+ objectid = ino + 1;
if (!S_ISREG(inode->i_mode))
continue;
- if (unlikely(min_key->objectid == inode->i_ino)) {
+ if (unlikely(min_key->objectid == ino)) {
if (min_key->type > BTRFS_EXTENT_DATA_KEY)
continue;
if (min_key->type < BTRFS_EXTENT_DATA_KEY)
@@ -1928,7 +1931,7 @@ static int invalidate_extent_cache(struct btrfs_root *root,
start = 0;
}
- if (unlikely(max_key->objectid == inode->i_ino)) {
+ if (unlikely(max_key->objectid == ino)) {
if (max_key->type < BTRFS_EXTENT_DATA_KEY)
continue;
if (max_key->type > BTRFS_EXTENT_DATA_KEY) {
@@ -2496,7 +2499,7 @@ static int do_relocation(struct btrfs_trans_handle *trans,
path->locks[upper->level] = 0;
slot = path->slots[upper->level];
- btrfs_release_path(NULL, path);
+ btrfs_release_path(path);
} else {
ret = btrfs_bin_search(upper->eb, key, upper->level,
&slot);
@@ -2737,7 +2740,7 @@ static int relocate_tree_block(struct btrfs_trans_handle *trans,
} else {
path->lowest_level = node->level;
ret = btrfs_search_slot(trans, root, key, path, 0, 1);
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
if (ret > 0)
ret = 0;
}
@@ -2870,7 +2873,7 @@ int setup_extent_mapping(struct inode *inode, u64 start, u64 end,
struct extent_map *em;
int ret = 0;
- em = alloc_extent_map(GFP_NOFS);
+ em = alloc_extent_map();
if (!em)
return -ENOMEM;
@@ -3119,7 +3122,7 @@ static int add_tree_block(struct reloc_control *rc,
#endif
}
- btrfs_release_path(rc->extent_root, path);
+ btrfs_release_path(path);
BUG_ON(level == -1);
@@ -3220,7 +3223,7 @@ static int delete_block_group_cache(struct btrfs_fs_info *fs_info,
key.offset = 0;
inode = btrfs_iget(fs_info->sb, &key, root, NULL);
- if (!inode || IS_ERR(inode) || is_bad_inode(inode)) {
+ if (IS_ERR_OR_NULL(inode) || is_bad_inode(inode)) {
if (inode && !IS_ERR(inode))
iput(inode);
return -ENOENT;
@@ -3505,7 +3508,7 @@ int add_data_references(struct reloc_control *rc,
}
path->slots[0]++;
}
- btrfs_release_path(rc->extent_root, path);
+ btrfs_release_path(path);
if (err)
free_block_list(blocks);
return err;
@@ -3568,7 +3571,7 @@ next:
EXTENT_DIRTY);
if (ret == 0 && start <= key.objectid) {
- btrfs_release_path(rc->extent_root, path);
+ btrfs_release_path(path);
rc->search_start = end + 1;
} else {
rc->search_start = key.objectid + key.offset;
@@ -3576,7 +3579,7 @@ next:
return 0;
}
}
- btrfs_release_path(rc->extent_root, path);
+ btrfs_release_path(path);
return ret;
}
@@ -3713,7 +3716,7 @@ restart:
flags = BTRFS_EXTENT_FLAG_DATA;
if (path_change) {
- btrfs_release_path(rc->extent_root, path);
+ btrfs_release_path(path);
path->search_commit_root = 1;
path->skip_locking = 1;
@@ -3736,7 +3739,7 @@ restart:
(flags & BTRFS_EXTENT_FLAG_DATA)) {
ret = add_data_references(rc, &key, path, &blocks);
} else {
- btrfs_release_path(rc->extent_root, path);
+ btrfs_release_path(path);
ret = 0;
}
if (ret < 0) {
@@ -3799,7 +3802,7 @@ restart:
}
}
- btrfs_release_path(rc->extent_root, path);
+ btrfs_release_path(path);
clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY,
GFP_NOFS);
@@ -3867,7 +3870,7 @@ static int __insert_orphan_inode(struct btrfs_trans_handle *trans,
btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS |
BTRFS_INODE_PREALLOC);
btrfs_mark_buffer_dirty(leaf);
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
out:
btrfs_free_path(path);
return ret;
@@ -3897,7 +3900,7 @@ struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info,
if (IS_ERR(trans))
return ERR_CAST(trans);
- err = btrfs_find_free_objectid(trans, root, objectid, &objectid);
+ err = btrfs_find_free_objectid(root, &objectid);
if (err)
goto out;
@@ -3935,7 +3938,7 @@ static struct reloc_control *alloc_reloc_control(void)
INIT_LIST_HEAD(&rc->reloc_roots);
backref_cache_init(&rc->backref_cache);
mapping_tree_init(&rc->reloc_root_tree);
- extent_io_tree_init(&rc->processed_blocks, NULL, GFP_NOFS);
+ extent_io_tree_init(&rc->processed_blocks, NULL);
return rc;
}
@@ -4109,7 +4112,7 @@ int btrfs_recover_relocation(struct btrfs_root *root)
}
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
- btrfs_release_path(root->fs_info->tree_root, path);
+ btrfs_release_path(path);
if (key.objectid != BTRFS_TREE_RELOC_OBJECTID ||
key.type != BTRFS_ROOT_ITEM_KEY)
@@ -4141,7 +4144,7 @@ int btrfs_recover_relocation(struct btrfs_root *root)
key.offset--;
}
- btrfs_release_path(root->fs_info->tree_root, path);
+ btrfs_release_path(path);
if (list_empty(&reloc_roots))
goto out;
@@ -4242,7 +4245,7 @@ int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len)
disk_bytenr = file_pos + BTRFS_I(inode)->index_cnt;
ret = btrfs_lookup_csums_range(root->fs_info->csum_root, disk_bytenr,
- disk_bytenr + len - 1, &list);
+ disk_bytenr + len - 1, &list, 0);
while (!list_empty(&list)) {
sums = list_entry(list.next, struct btrfs_ordered_sum, list);
diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
index 6928bff62daa..ebe45443de06 100644
--- a/fs/btrfs/root-tree.c
+++ b/fs/btrfs/root-tree.c
@@ -22,53 +22,6 @@
#include "print-tree.h"
/*
- * search forward for a root, starting with objectid 'search_start'
- * if a root key is found, the objectid we find is filled into 'found_objectid'
- * and 0 is returned. < 0 is returned on error, 1 if there is nothing
- * left in the tree.
- */
-int btrfs_search_root(struct btrfs_root *root, u64 search_start,
- u64 *found_objectid)
-{
- struct btrfs_path *path;
- struct btrfs_key search_key;
- int ret;
-
- root = root->fs_info->tree_root;
- search_key.objectid = search_start;
- search_key.type = (u8)-1;
- search_key.offset = (u64)-1;
-
- path = btrfs_alloc_path();
- BUG_ON(!path);
-again:
- ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
- if (ret < 0)
- goto out;
- if (ret == 0) {
- ret = 1;
- goto out;
- }
- if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
- ret = btrfs_next_leaf(root, path);
- if (ret)
- goto out;
- }
- btrfs_item_key_to_cpu(path->nodes[0], &search_key, path->slots[0]);
- if (search_key.type != BTRFS_ROOT_ITEM_KEY) {
- search_key.offset++;
- btrfs_release_path(root, path);
- goto again;
- }
- ret = 0;
- *found_objectid = search_key.objectid;
-
-out:
- btrfs_free_path(path);
- return ret;
-}
-
-/*
* lookup the root with the highest offset for a given objectid. The key we do
* find is copied into 'key'. If we find something return 0, otherwise 1, < 0
* on error.
@@ -230,7 +183,7 @@ again:
memcpy(&found_key, &key, sizeof(key));
key.offset++;
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
dead_root =
btrfs_read_fs_root_no_radix(root->fs_info->tree_root,
&found_key);
@@ -292,7 +245,7 @@ int btrfs_find_orphan_roots(struct btrfs_root *tree_root)
}
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
- btrfs_release_path(tree_root, path);
+ btrfs_release_path(path);
if (key.objectid != BTRFS_ORPHAN_OBJECTID ||
key.type != BTRFS_ORPHAN_ITEM_KEY)
@@ -385,18 +338,22 @@ again:
*sequence = btrfs_root_ref_sequence(leaf, ref);
ret = btrfs_del_item(trans, tree_root, path);
- BUG_ON(ret);
+ if (ret) {
+ err = ret;
+ goto out;
+ }
} else
err = -ENOENT;
if (key.type == BTRFS_ROOT_BACKREF_KEY) {
- btrfs_release_path(tree_root, path);
+ btrfs_release_path(path);
key.objectid = ref_id;
key.type = BTRFS_ROOT_REF_KEY;
key.offset = root_id;
goto again;
}
+out:
btrfs_free_path(path);
return err;
}
@@ -463,7 +420,7 @@ again:
btrfs_mark_buffer_dirty(leaf);
if (key.type == BTRFS_ROOT_BACKREF_KEY) {
- btrfs_release_path(tree_root, path);
+ btrfs_release_path(path);
key.objectid = ref_id;
key.type = BTRFS_ROOT_REF_KEY;
key.offset = root_id;
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
new file mode 100644
index 000000000000..6dfed0c27ac3
--- /dev/null
+++ b/fs/btrfs/scrub.c
@@ -0,0 +1,1369 @@
+/*
+ * Copyright (C) 2011 STRATO. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include <linux/sched.h>
+#include <linux/pagemap.h>
+#include <linux/writeback.h>
+#include <linux/blkdev.h>
+#include <linux/rbtree.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include "ctree.h"
+#include "volumes.h"
+#include "disk-io.h"
+#include "ordered-data.h"
+
+/*
+ * This is only the first step towards a full-features scrub. It reads all
+ * extent and super block and verifies the checksums. In case a bad checksum
+ * is found or the extent cannot be read, good data will be written back if
+ * any can be found.
+ *
+ * Future enhancements:
+ * - To enhance the performance, better read-ahead strategies for the
+ * extent-tree can be employed.
+ * - In case an unrepairable extent is encountered, track which files are
+ * affected and report them
+ * - In case of a read error on files with nodatasum, map the file and read
+ * the extent to trigger a writeback of the good copy
+ * - track and record media errors, throw out bad devices
+ * - add a mode to also read unallocated space
+ * - make the prefetch cancellable
+ */
+
+struct scrub_bio;
+struct scrub_page;
+struct scrub_dev;
+static void scrub_bio_end_io(struct bio *bio, int err);
+static void scrub_checksum(struct btrfs_work *work);
+static int scrub_checksum_data(struct scrub_dev *sdev,
+ struct scrub_page *spag, void *buffer);
+static int scrub_checksum_tree_block(struct scrub_dev *sdev,
+ struct scrub_page *spag, u64 logical,
+ void *buffer);
+static int scrub_checksum_super(struct scrub_bio *sbio, void *buffer);
+static int scrub_fixup_check(struct scrub_bio *sbio, int ix);
+static void scrub_fixup_end_io(struct bio *bio, int err);
+static int scrub_fixup_io(int rw, struct block_device *bdev, sector_t sector,
+ struct page *page);
+static void scrub_fixup(struct scrub_bio *sbio, int ix);
+
+#define SCRUB_PAGES_PER_BIO 16 /* 64k per bio */
+#define SCRUB_BIOS_PER_DEV 16 /* 1 MB per device in flight */
+
+struct scrub_page {
+ u64 flags; /* extent flags */
+ u64 generation;
+ u64 mirror_num;
+ int have_csum;
+ u8 csum[BTRFS_CSUM_SIZE];
+};
+
+struct scrub_bio {
+ int index;
+ struct scrub_dev *sdev;
+ struct bio *bio;
+ int err;
+ u64 logical;
+ u64 physical;
+ struct scrub_page spag[SCRUB_PAGES_PER_BIO];
+ u64 count;
+ int next_free;
+ struct btrfs_work work;
+};
+
+struct scrub_dev {
+ struct scrub_bio *bios[SCRUB_BIOS_PER_DEV];
+ struct btrfs_device *dev;
+ int first_free;
+ int curr;
+ atomic_t in_flight;
+ spinlock_t list_lock;
+ wait_queue_head_t list_wait;
+ u16 csum_size;
+ struct list_head csum_list;
+ atomic_t cancel_req;
+ int readonly;
+ /*
+ * statistics
+ */
+ struct btrfs_scrub_progress stat;
+ spinlock_t stat_lock;
+};
+
+static void scrub_free_csums(struct scrub_dev *sdev)
+{
+ while (!list_empty(&sdev->csum_list)) {
+ struct btrfs_ordered_sum *sum;
+ sum = list_first_entry(&sdev->csum_list,
+ struct btrfs_ordered_sum, list);
+ list_del(&sum->list);
+ kfree(sum);
+ }
+}
+
+static noinline_for_stack void scrub_free_dev(struct scrub_dev *sdev)
+{
+ int i;
+ int j;
+ struct page *last_page;
+
+ if (!sdev)
+ return;
+
+ for (i = 0; i < SCRUB_BIOS_PER_DEV; ++i) {
+ struct scrub_bio *sbio = sdev->bios[i];
+ struct bio *bio;
+
+ if (!sbio)
+ break;
+
+ bio = sbio->bio;
+ if (bio) {
+ last_page = NULL;
+ for (j = 0; j < bio->bi_vcnt; ++j) {
+ if (bio->bi_io_vec[j].bv_page == last_page)
+ continue;
+ last_page = bio->bi_io_vec[j].bv_page;
+ __free_page(last_page);
+ }
+ bio_put(bio);
+ }
+ kfree(sbio);
+ }
+
+ scrub_free_csums(sdev);
+ kfree(sdev);
+}
+
+static noinline_for_stack
+struct scrub_dev *scrub_setup_dev(struct btrfs_device *dev)
+{
+ struct scrub_dev *sdev;
+ int i;
+ int j;
+ int ret;
+ struct btrfs_fs_info *fs_info = dev->dev_root->fs_info;
+
+ sdev = kzalloc(sizeof(*sdev), GFP_NOFS);
+ if (!sdev)
+ goto nomem;
+ sdev->dev = dev;
+ for (i = 0; i < SCRUB_BIOS_PER_DEV; ++i) {
+ struct bio *bio;
+ struct scrub_bio *sbio;
+
+ sbio = kzalloc(sizeof(*sbio), GFP_NOFS);
+ if (!sbio)
+ goto nomem;
+ sdev->bios[i] = sbio;
+
+ bio = bio_kmalloc(GFP_NOFS, SCRUB_PAGES_PER_BIO);
+ if (!bio)
+ goto nomem;
+
+ sbio->index = i;
+ sbio->sdev = sdev;
+ sbio->bio = bio;
+ sbio->count = 0;
+ sbio->work.func = scrub_checksum;
+ bio->bi_private = sdev->bios[i];
+ bio->bi_end_io = scrub_bio_end_io;
+ bio->bi_sector = 0;
+ bio->bi_bdev = dev->bdev;
+ bio->bi_size = 0;
+
+ for (j = 0; j < SCRUB_PAGES_PER_BIO; ++j) {
+ struct page *page;
+ page = alloc_page(GFP_NOFS);
+ if (!page)
+ goto nomem;
+
+ ret = bio_add_page(bio, page, PAGE_SIZE, 0);
+ if (!ret)
+ goto nomem;
+ }
+ WARN_ON(bio->bi_vcnt != SCRUB_PAGES_PER_BIO);
+
+ if (i != SCRUB_BIOS_PER_DEV-1)
+ sdev->bios[i]->next_free = i + 1;
+ else
+ sdev->bios[i]->next_free = -1;
+ }
+ sdev->first_free = 0;
+ sdev->curr = -1;
+ atomic_set(&sdev->in_flight, 0);
+ atomic_set(&sdev->cancel_req, 0);
+ sdev->csum_size = btrfs_super_csum_size(&fs_info->super_copy);
+ INIT_LIST_HEAD(&sdev->csum_list);
+
+ spin_lock_init(&sdev->list_lock);
+ spin_lock_init(&sdev->stat_lock);
+ init_waitqueue_head(&sdev->list_wait);
+ return sdev;
+
+nomem:
+ scrub_free_dev(sdev);
+ return ERR_PTR(-ENOMEM);
+}
+
+/*
+ * scrub_recheck_error gets called when either verification of the page
+ * failed or the bio failed to read, e.g. with EIO. In the latter case,
+ * recheck_error gets called for every page in the bio, even though only
+ * one may be bad
+ */
+static void scrub_recheck_error(struct scrub_bio *sbio, int ix)
+{
+ if (sbio->err) {
+ if (scrub_fixup_io(READ, sbio->sdev->dev->bdev,
+ (sbio->physical + ix * PAGE_SIZE) >> 9,
+ sbio->bio->bi_io_vec[ix].bv_page) == 0) {
+ if (scrub_fixup_check(sbio, ix) == 0)
+ return;
+ }
+ }
+
+ scrub_fixup(sbio, ix);
+}
+
+static int scrub_fixup_check(struct scrub_bio *sbio, int ix)
+{
+ int ret = 1;
+ struct page *page;
+ void *buffer;
+ u64 flags = sbio->spag[ix].flags;
+
+ page = sbio->bio->bi_io_vec[ix].bv_page;
+ buffer = kmap_atomic(page, KM_USER0);
+ if (flags & BTRFS_EXTENT_FLAG_DATA) {
+ ret = scrub_checksum_data(sbio->sdev,
+ sbio->spag + ix, buffer);
+ } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
+ ret = scrub_checksum_tree_block(sbio->sdev,
+ sbio->spag + ix,
+ sbio->logical + ix * PAGE_SIZE,
+ buffer);
+ } else {
+ WARN_ON(1);
+ }
+ kunmap_atomic(buffer, KM_USER0);
+
+ return ret;
+}
+
+static void scrub_fixup_end_io(struct bio *bio, int err)
+{
+ complete((struct completion *)bio->bi_private);
+}
+
+static void scrub_fixup(struct scrub_bio *sbio, int ix)
+{
+ struct scrub_dev *sdev = sbio->sdev;
+ struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info;
+ struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
+ struct btrfs_multi_bio *multi = NULL;
+ u64 logical = sbio->logical + ix * PAGE_SIZE;
+ u64 length;
+ int i;
+ int ret;
+ DECLARE_COMPLETION_ONSTACK(complete);
+
+ if ((sbio->spag[ix].flags & BTRFS_EXTENT_FLAG_DATA) &&
+ (sbio->spag[ix].have_csum == 0)) {
+ /*
+ * nodatasum, don't try to fix anything
+ * FIXME: we can do better, open the inode and trigger a
+ * writeback
+ */
+ goto uncorrectable;
+ }
+
+ length = PAGE_SIZE;
+ ret = btrfs_map_block(map_tree, REQ_WRITE, logical, &length,
+ &multi, 0);
+ if (ret || !multi || length < PAGE_SIZE) {
+ printk(KERN_ERR
+ "scrub_fixup: btrfs_map_block failed us for %llu\n",
+ (unsigned long long)logical);
+ WARN_ON(1);
+ return;
+ }
+
+ if (multi->num_stripes == 1)
+ /* there aren't any replicas */
+ goto uncorrectable;
+
+ /*
+ * first find a good copy
+ */
+ for (i = 0; i < multi->num_stripes; ++i) {
+ if (i == sbio->spag[ix].mirror_num)
+ continue;
+
+ if (scrub_fixup_io(READ, multi->stripes[i].dev->bdev,
+ multi->stripes[i].physical >> 9,
+ sbio->bio->bi_io_vec[ix].bv_page)) {
+ /* I/O-error, this is not a good copy */
+ continue;
+ }
+
+ if (scrub_fixup_check(sbio, ix) == 0)
+ break;
+ }
+ if (i == multi->num_stripes)
+ goto uncorrectable;
+
+ if (!sdev->readonly) {
+ /*
+ * bi_io_vec[ix].bv_page now contains good data, write it back
+ */
+ if (scrub_fixup_io(WRITE, sdev->dev->bdev,
+ (sbio->physical + ix * PAGE_SIZE) >> 9,
+ sbio->bio->bi_io_vec[ix].bv_page)) {
+ /* I/O-error, writeback failed, give up */
+ goto uncorrectable;
+ }
+ }
+
+ kfree(multi);
+ spin_lock(&sdev->stat_lock);
+ ++sdev->stat.corrected_errors;
+ spin_unlock(&sdev->stat_lock);
+
+ if (printk_ratelimit())
+ printk(KERN_ERR "btrfs: fixed up at %llu\n",
+ (unsigned long long)logical);
+ return;
+
+uncorrectable:
+ kfree(multi);
+ spin_lock(&sdev->stat_lock);
+ ++sdev->stat.uncorrectable_errors;
+ spin_unlock(&sdev->stat_lock);
+
+ if (printk_ratelimit())
+ printk(KERN_ERR "btrfs: unable to fixup at %llu\n",
+ (unsigned long long)logical);
+}
+
+static int scrub_fixup_io(int rw, struct block_device *bdev, sector_t sector,
+ struct page *page)
+{
+ struct bio *bio = NULL;
+ int ret;
+ DECLARE_COMPLETION_ONSTACK(complete);
+
+ /* we are going to wait on this IO */
+ rw |= REQ_SYNC;
+
+ bio = bio_alloc(GFP_NOFS, 1);
+ bio->bi_bdev = bdev;
+ bio->bi_sector = sector;
+ bio_add_page(bio, page, PAGE_SIZE, 0);
+ bio->bi_end_io = scrub_fixup_end_io;
+ bio->bi_private = &complete;
+ submit_bio(rw, bio);
+
+ wait_for_completion(&complete);
+
+ ret = !test_bit(BIO_UPTODATE, &bio->bi_flags);
+ bio_put(bio);
+ return ret;
+}
+
+static void scrub_bio_end_io(struct bio *bio, int err)
+{
+ struct scrub_bio *sbio = bio->bi_private;
+ struct scrub_dev *sdev = sbio->sdev;
+ struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info;
+
+ sbio->err = err;
+
+ btrfs_queue_worker(&fs_info->scrub_workers, &sbio->work);
+}
+
+static void scrub_checksum(struct btrfs_work *work)
+{
+ struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
+ struct scrub_dev *sdev = sbio->sdev;
+ struct page *page;
+ void *buffer;
+ int i;
+ u64 flags;
+ u64 logical;
+ int ret;
+
+ if (sbio->err) {
+ for (i = 0; i < sbio->count; ++i)
+ scrub_recheck_error(sbio, i);
+
+ sbio->bio->bi_flags &= ~(BIO_POOL_MASK - 1);
+ sbio->bio->bi_flags |= 1 << BIO_UPTODATE;
+ sbio->bio->bi_phys_segments = 0;
+ sbio->bio->bi_idx = 0;
+
+ for (i = 0; i < sbio->count; i++) {
+ struct bio_vec *bi;
+ bi = &sbio->bio->bi_io_vec[i];
+ bi->bv_offset = 0;
+ bi->bv_len = PAGE_SIZE;
+ }
+
+ spin_lock(&sdev->stat_lock);
+ ++sdev->stat.read_errors;
+ spin_unlock(&sdev->stat_lock);
+ goto out;
+ }
+ for (i = 0; i < sbio->count; ++i) {
+ page = sbio->bio->bi_io_vec[i].bv_page;
+ buffer = kmap_atomic(page, KM_USER0);
+ flags = sbio->spag[i].flags;
+ logical = sbio->logical + i * PAGE_SIZE;
+ ret = 0;
+ if (flags & BTRFS_EXTENT_FLAG_DATA) {
+ ret = scrub_checksum_data(sdev, sbio->spag + i, buffer);
+ } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
+ ret = scrub_checksum_tree_block(sdev, sbio->spag + i,
+ logical, buffer);
+ } else if (flags & BTRFS_EXTENT_FLAG_SUPER) {
+ BUG_ON(i);
+ (void)scrub_checksum_super(sbio, buffer);
+ } else {
+ WARN_ON(1);
+ }
+ kunmap_atomic(buffer, KM_USER0);
+ if (ret)
+ scrub_recheck_error(sbio, i);
+ }
+
+out:
+ spin_lock(&sdev->list_lock);
+ sbio->next_free = sdev->first_free;
+ sdev->first_free = sbio->index;
+ spin_unlock(&sdev->list_lock);
+ atomic_dec(&sdev->in_flight);
+ wake_up(&sdev->list_wait);
+}
+
+static int scrub_checksum_data(struct scrub_dev *sdev,
+ struct scrub_page *spag, void *buffer)
+{
+ u8 csum[BTRFS_CSUM_SIZE];
+ u32 crc = ~(u32)0;
+ int fail = 0;
+ struct btrfs_root *root = sdev->dev->dev_root;
+
+ if (!spag->have_csum)
+ return 0;
+
+ crc = btrfs_csum_data(root, buffer, crc, PAGE_SIZE);
+ btrfs_csum_final(crc, csum);
+ if (memcmp(csum, spag->csum, sdev->csum_size))
+ fail = 1;
+
+ spin_lock(&sdev->stat_lock);
+ ++sdev->stat.data_extents_scrubbed;
+ sdev->stat.data_bytes_scrubbed += PAGE_SIZE;
+ if (fail)
+ ++sdev->stat.csum_errors;
+ spin_unlock(&sdev->stat_lock);
+
+ return fail;
+}
+
+static int scrub_checksum_tree_block(struct scrub_dev *sdev,
+ struct scrub_page *spag, u64 logical,
+ void *buffer)
+{
+ struct btrfs_header *h;
+ struct btrfs_root *root = sdev->dev->dev_root;
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ u8 csum[BTRFS_CSUM_SIZE];
+ u32 crc = ~(u32)0;
+ int fail = 0;
+ int crc_fail = 0;
+
+ /*
+ * we don't use the getter functions here, as we
+ * a) don't have an extent buffer and
+ * b) the page is already kmapped
+ */
+ h = (struct btrfs_header *)buffer;
+
+ if (logical != le64_to_cpu(h->bytenr))
+ ++fail;
+
+ if (spag->generation != le64_to_cpu(h->generation))
+ ++fail;
+
+ if (memcmp(h->fsid, fs_info->fsid, BTRFS_UUID_SIZE))
+ ++fail;
+
+ if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
+ BTRFS_UUID_SIZE))
+ ++fail;
+
+ crc = btrfs_csum_data(root, buffer + BTRFS_CSUM_SIZE, crc,
+ PAGE_SIZE - BTRFS_CSUM_SIZE);
+ btrfs_csum_final(crc, csum);
+ if (memcmp(csum, h->csum, sdev->csum_size))
+ ++crc_fail;
+
+ spin_lock(&sdev->stat_lock);
+ ++sdev->stat.tree_extents_scrubbed;
+ sdev->stat.tree_bytes_scrubbed += PAGE_SIZE;
+ if (crc_fail)
+ ++sdev->stat.csum_errors;
+ if (fail)
+ ++sdev->stat.verify_errors;
+ spin_unlock(&sdev->stat_lock);
+
+ return fail || crc_fail;
+}
+
+static int scrub_checksum_super(struct scrub_bio *sbio, void *buffer)
+{
+ struct btrfs_super_block *s;
+ u64 logical;
+ struct scrub_dev *sdev = sbio->sdev;
+ struct btrfs_root *root = sdev->dev->dev_root;
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ u8 csum[BTRFS_CSUM_SIZE];
+ u32 crc = ~(u32)0;
+ int fail = 0;
+
+ s = (struct btrfs_super_block *)buffer;
+ logical = sbio->logical;
+
+ if (logical != le64_to_cpu(s->bytenr))
+ ++fail;
+
+ if (sbio->spag[0].generation != le64_to_cpu(s->generation))
+ ++fail;
+
+ if (memcmp(s->fsid, fs_info->fsid, BTRFS_UUID_SIZE))
+ ++fail;
+
+ crc = btrfs_csum_data(root, buffer + BTRFS_CSUM_SIZE, crc,
+ PAGE_SIZE - BTRFS_CSUM_SIZE);
+ btrfs_csum_final(crc, csum);
+ if (memcmp(csum, s->csum, sbio->sdev->csum_size))
+ ++fail;
+
+ if (fail) {
+ /*
+ * if we find an error in a super block, we just report it.
+ * They will get written with the next transaction commit
+ * anyway
+ */
+ spin_lock(&sdev->stat_lock);
+ ++sdev->stat.super_errors;
+ spin_unlock(&sdev->stat_lock);
+ }
+
+ return fail;
+}
+
+static int scrub_submit(struct scrub_dev *sdev)
+{
+ struct scrub_bio *sbio;
+
+ if (sdev->curr == -1)
+ return 0;
+
+ sbio = sdev->bios[sdev->curr];
+
+ sbio->bio->bi_sector = sbio->physical >> 9;
+ sbio->bio->bi_size = sbio->count * PAGE_SIZE;
+ sbio->bio->bi_next = NULL;
+ sbio->bio->bi_flags |= 1 << BIO_UPTODATE;
+ sbio->bio->bi_comp_cpu = -1;
+ sbio->bio->bi_bdev = sdev->dev->bdev;
+ sbio->err = 0;
+ sdev->curr = -1;
+ atomic_inc(&sdev->in_flight);
+
+ submit_bio(0, sbio->bio);
+
+ return 0;
+}
+
+static int scrub_page(struct scrub_dev *sdev, u64 logical, u64 len,
+ u64 physical, u64 flags, u64 gen, u64 mirror_num,
+ u8 *csum, int force)
+{
+ struct scrub_bio *sbio;
+
+again:
+ /*
+ * grab a fresh bio or wait for one to become available
+ */
+ while (sdev->curr == -1) {
+ spin_lock(&sdev->list_lock);
+ sdev->curr = sdev->first_free;
+ if (sdev->curr != -1) {
+ sdev->first_free = sdev->bios[sdev->curr]->next_free;
+ sdev->bios[sdev->curr]->next_free = -1;
+ sdev->bios[sdev->curr]->count = 0;
+ spin_unlock(&sdev->list_lock);
+ } else {
+ spin_unlock(&sdev->list_lock);
+ wait_event(sdev->list_wait, sdev->first_free != -1);
+ }
+ }
+ sbio = sdev->bios[sdev->curr];
+ if (sbio->count == 0) {
+ sbio->physical = physical;
+ sbio->logical = logical;
+ } else if (sbio->physical + sbio->count * PAGE_SIZE != physical ||
+ sbio->logical + sbio->count * PAGE_SIZE != logical) {
+ scrub_submit(sdev);
+ goto again;
+ }
+ sbio->spag[sbio->count].flags = flags;
+ sbio->spag[sbio->count].generation = gen;
+ sbio->spag[sbio->count].have_csum = 0;
+ sbio->spag[sbio->count].mirror_num = mirror_num;
+ if (csum) {
+ sbio->spag[sbio->count].have_csum = 1;
+ memcpy(sbio->spag[sbio->count].csum, csum, sdev->csum_size);
+ }
+ ++sbio->count;
+ if (sbio->count == SCRUB_PAGES_PER_BIO || force)
+ scrub_submit(sdev);
+
+ return 0;
+}
+
+static int scrub_find_csum(struct scrub_dev *sdev, u64 logical, u64 len,
+ u8 *csum)
+{
+ struct btrfs_ordered_sum *sum = NULL;
+ int ret = 0;
+ unsigned long i;
+ unsigned long num_sectors;
+ u32 sectorsize = sdev->dev->dev_root->sectorsize;
+
+ while (!list_empty(&sdev->csum_list)) {
+ sum = list_first_entry(&sdev->csum_list,
+ struct btrfs_ordered_sum, list);
+ if (sum->bytenr > logical)
+ return 0;
+ if (sum->bytenr + sum->len > logical)
+ break;
+
+ ++sdev->stat.csum_discards;
+ list_del(&sum->list);
+ kfree(sum);
+ sum = NULL;
+ }
+ if (!sum)
+ return 0;
+
+ num_sectors = sum->len / sectorsize;
+ for (i = 0; i < num_sectors; ++i) {
+ if (sum->sums[i].bytenr == logical) {
+ memcpy(csum, &sum->sums[i].sum, sdev->csum_size);
+ ret = 1;
+ break;
+ }
+ }
+ if (ret && i == num_sectors - 1) {
+ list_del(&sum->list);
+ kfree(sum);
+ }
+ return ret;
+}
+
+/* scrub extent tries to collect up to 64 kB for each bio */
+static int scrub_extent(struct scrub_dev *sdev, u64 logical, u64 len,
+ u64 physical, u64 flags, u64 gen, u64 mirror_num)
+{
+ int ret;
+ u8 csum[BTRFS_CSUM_SIZE];
+
+ while (len) {
+ u64 l = min_t(u64, len, PAGE_SIZE);
+ int have_csum = 0;
+
+ if (flags & BTRFS_EXTENT_FLAG_DATA) {
+ /* push csums to sbio */
+ have_csum = scrub_find_csum(sdev, logical, l, csum);
+ if (have_csum == 0)
+ ++sdev->stat.no_csum;
+ }
+ ret = scrub_page(sdev, logical, l, physical, flags, gen,
+ mirror_num, have_csum ? csum : NULL, 0);
+ if (ret)
+ return ret;
+ len -= l;
+ logical += l;
+ physical += l;
+ }
+ return 0;
+}
+
+static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev,
+ struct map_lookup *map, int num, u64 base, u64 length)
+{
+ struct btrfs_path *path;
+ struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info;
+ struct btrfs_root *root = fs_info->extent_root;
+ struct btrfs_root *csum_root = fs_info->csum_root;
+ struct btrfs_extent_item *extent;
+ u64 flags;
+ int ret;
+ int slot;
+ int i;
+ u64 nstripes;
+ int start_stripe;
+ struct extent_buffer *l;
+ struct btrfs_key key;
+ u64 physical;
+ u64 logical;
+ u64 generation;
+ u64 mirror_num;
+
+ u64 increment = map->stripe_len;
+ u64 offset;
+
+ nstripes = length;
+ offset = 0;
+ do_div(nstripes, map->stripe_len);
+ if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
+ offset = map->stripe_len * num;
+ increment = map->stripe_len * map->num_stripes;
+ mirror_num = 0;
+ } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
+ int factor = map->num_stripes / map->sub_stripes;
+ offset = map->stripe_len * (num / map->sub_stripes);
+ increment = map->stripe_len * factor;
+ mirror_num = num % map->sub_stripes;
+ } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
+ increment = map->stripe_len;
+ mirror_num = num % map->num_stripes;
+ } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
+ increment = map->stripe_len;
+ mirror_num = num % map->num_stripes;
+ } else {
+ increment = map->stripe_len;
+ mirror_num = 0;
+ }
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+
+ path->reada = 2;
+ path->search_commit_root = 1;
+ path->skip_locking = 1;
+
+ /*
+ * find all extents for each stripe and just read them to get
+ * them into the page cache
+ * FIXME: we can do better. build a more intelligent prefetching
+ */
+ logical = base + offset;
+ physical = map->stripes[num].physical;
+ ret = 0;
+ for (i = 0; i < nstripes; ++i) {
+ key.objectid = logical;
+ key.type = BTRFS_EXTENT_ITEM_KEY;
+ key.offset = (u64)0;
+
+ ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+ if (ret < 0)
+ goto out;
+
+ l = path->nodes[0];
+ slot = path->slots[0];
+ btrfs_item_key_to_cpu(l, &key, slot);
+ if (key.objectid != logical) {
+ ret = btrfs_previous_item(root, path, 0,
+ BTRFS_EXTENT_ITEM_KEY);
+ if (ret < 0)
+ goto out;
+ }
+
+ while (1) {
+ l = path->nodes[0];
+ slot = path->slots[0];
+ if (slot >= btrfs_header_nritems(l)) {
+ ret = btrfs_next_leaf(root, path);
+ if (ret == 0)
+ continue;
+ if (ret < 0)
+ goto out;
+
+ break;
+ }
+ btrfs_item_key_to_cpu(l, &key, slot);
+
+ if (key.objectid >= logical + map->stripe_len)
+ break;
+
+ path->slots[0]++;
+ }
+ btrfs_release_path(path);
+ logical += increment;
+ physical += map->stripe_len;
+ cond_resched();
+ }
+
+ /*
+ * collect all data csums for the stripe to avoid seeking during
+ * the scrub. This might currently (crc32) end up to be about 1MB
+ */
+ start_stripe = 0;
+again:
+ logical = base + offset + start_stripe * increment;
+ for (i = start_stripe; i < nstripes; ++i) {
+ ret = btrfs_lookup_csums_range(csum_root, logical,
+ logical + map->stripe_len - 1,
+ &sdev->csum_list, 1);
+ if (ret)
+ goto out;
+
+ logical += increment;
+ cond_resched();
+ }
+ /*
+ * now find all extents for each stripe and scrub them
+ */
+ logical = base + offset + start_stripe * increment;
+ physical = map->stripes[num].physical + start_stripe * map->stripe_len;
+ ret = 0;
+ for (i = start_stripe; i < nstripes; ++i) {
+ /*
+ * canceled?
+ */
+ if (atomic_read(&fs_info->scrub_cancel_req) ||
+ atomic_read(&sdev->cancel_req)) {
+ ret = -ECANCELED;
+ goto out;
+ }
+ /*
+ * check to see if we have to pause
+ */
+ if (atomic_read(&fs_info->scrub_pause_req)) {
+ /* push queued extents */
+ scrub_submit(sdev);
+ wait_event(sdev->list_wait,
+ atomic_read(&sdev->in_flight) == 0);
+ atomic_inc(&fs_info->scrubs_paused);
+ wake_up(&fs_info->scrub_pause_wait);
+ mutex_lock(&fs_info->scrub_lock);
+ while (atomic_read(&fs_info->scrub_pause_req)) {
+ mutex_unlock(&fs_info->scrub_lock);
+ wait_event(fs_info->scrub_pause_wait,
+ atomic_read(&fs_info->scrub_pause_req) == 0);
+ mutex_lock(&fs_info->scrub_lock);
+ }
+ atomic_dec(&fs_info->scrubs_paused);
+ mutex_unlock(&fs_info->scrub_lock);
+ wake_up(&fs_info->scrub_pause_wait);
+ scrub_free_csums(sdev);
+ start_stripe = i;
+ goto again;
+ }
+
+ key.objectid = logical;
+ key.type = BTRFS_EXTENT_ITEM_KEY;
+ key.offset = (u64)0;
+
+ ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+ if (ret < 0)
+ goto out;
+
+ l = path->nodes[0];
+ slot = path->slots[0];
+ btrfs_item_key_to_cpu(l, &key, slot);
+ if (key.objectid != logical) {
+ ret = btrfs_previous_item(root, path, 0,
+ BTRFS_EXTENT_ITEM_KEY);
+ if (ret < 0)
+ goto out;
+ }
+
+ while (1) {
+ l = path->nodes[0];
+ slot = path->slots[0];
+ if (slot >= btrfs_header_nritems(l)) {
+ ret = btrfs_next_leaf(root, path);
+ if (ret == 0)
+ continue;
+ if (ret < 0)
+ goto out;
+
+ break;
+ }
+ btrfs_item_key_to_cpu(l, &key, slot);
+
+ if (key.objectid + key.offset <= logical)
+ goto next;
+
+ if (key.objectid >= logical + map->stripe_len)
+ break;
+
+ if (btrfs_key_type(&key) != BTRFS_EXTENT_ITEM_KEY)
+ goto next;
+
+ extent = btrfs_item_ptr(l, slot,
+ struct btrfs_extent_item);
+ flags = btrfs_extent_flags(l, extent);
+ generation = btrfs_extent_generation(l, extent);
+
+ if (key.objectid < logical &&
+ (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)) {
+ printk(KERN_ERR
+ "btrfs scrub: tree block %llu spanning "
+ "stripes, ignored. logical=%llu\n",
+ (unsigned long long)key.objectid,
+ (unsigned long long)logical);
+ goto next;
+ }
+
+ /*
+ * trim extent to this stripe
+ */
+ if (key.objectid < logical) {
+ key.offset -= logical - key.objectid;
+ key.objectid = logical;
+ }
+ if (key.objectid + key.offset >
+ logical + map->stripe_len) {
+ key.offset = logical + map->stripe_len -
+ key.objectid;
+ }
+
+ ret = scrub_extent(sdev, key.objectid, key.offset,
+ key.objectid - logical + physical,
+ flags, generation, mirror_num);
+ if (ret)
+ goto out;
+
+next:
+ path->slots[0]++;
+ }
+ btrfs_release_path(path);
+ logical += increment;
+ physical += map->stripe_len;
+ spin_lock(&sdev->stat_lock);
+ sdev->stat.last_physical = physical;
+ spin_unlock(&sdev->stat_lock);
+ }
+ /* push queued extents */
+ scrub_submit(sdev);
+
+out:
+ btrfs_free_path(path);
+ return ret < 0 ? ret : 0;
+}
+
+static noinline_for_stack int scrub_chunk(struct scrub_dev *sdev,
+ u64 chunk_tree, u64 chunk_objectid, u64 chunk_offset, u64 length)
+{
+ struct btrfs_mapping_tree *map_tree =
+ &sdev->dev->dev_root->fs_info->mapping_tree;
+ struct map_lookup *map;
+ struct extent_map *em;
+ int i;
+ int ret = -EINVAL;
+
+ read_lock(&map_tree->map_tree.lock);
+ em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
+ read_unlock(&map_tree->map_tree.lock);
+
+ if (!em)
+ return -EINVAL;
+
+ map = (struct map_lookup *)em->bdev;
+ if (em->start != chunk_offset)
+ goto out;
+
+ if (em->len < length)
+ goto out;
+
+ for (i = 0; i < map->num_stripes; ++i) {
+ if (map->stripes[i].dev == sdev->dev) {
+ ret = scrub_stripe(sdev, map, i, chunk_offset, length);
+ if (ret)
+ goto out;
+ }
+ }
+out:
+ free_extent_map(em);
+
+ return ret;
+}
+
+static noinline_for_stack
+int scrub_enumerate_chunks(struct scrub_dev *sdev, u64 start, u64 end)
+{
+ struct btrfs_dev_extent *dev_extent = NULL;
+ struct btrfs_path *path;
+ struct btrfs_root *root = sdev->dev->dev_root;
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ u64 length;
+ u64 chunk_tree;
+ u64 chunk_objectid;
+ u64 chunk_offset;
+ int ret;
+ int slot;
+ struct extent_buffer *l;
+ struct btrfs_key key;
+ struct btrfs_key found_key;
+ struct btrfs_block_group_cache *cache;
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+
+ path->reada = 2;
+ path->search_commit_root = 1;
+ path->skip_locking = 1;
+
+ key.objectid = sdev->dev->devid;
+ key.offset = 0ull;
+ key.type = BTRFS_DEV_EXTENT_KEY;
+
+
+ while (1) {
+ ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+ if (ret < 0)
+ goto out;
+ ret = 0;
+
+ l = path->nodes[0];
+ slot = path->slots[0];
+
+ btrfs_item_key_to_cpu(l, &found_key, slot);
+
+ if (found_key.objectid != sdev->dev->devid)
+ break;
+
+ if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
+ break;
+
+ if (found_key.offset >= end)
+ break;
+
+ if (found_key.offset < key.offset)
+ break;
+
+ dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
+ length = btrfs_dev_extent_length(l, dev_extent);
+
+ if (found_key.offset + length <= start) {
+ key.offset = found_key.offset + length;
+ btrfs_release_path(path);
+ continue;
+ }
+
+ chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
+ chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
+ chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
+
+ /*
+ * get a reference on the corresponding block group to prevent
+ * the chunk from going away while we scrub it
+ */
+ cache = btrfs_lookup_block_group(fs_info, chunk_offset);
+ if (!cache) {
+ ret = -ENOENT;
+ goto out;
+ }
+ ret = scrub_chunk(sdev, chunk_tree, chunk_objectid,
+ chunk_offset, length);
+ btrfs_put_block_group(cache);
+ if (ret)
+ break;
+
+ key.offset = found_key.offset + length;
+ btrfs_release_path(path);
+ }
+
+out:
+ btrfs_free_path(path);
+ return ret;
+}
+
+static noinline_for_stack int scrub_supers(struct scrub_dev *sdev)
+{
+ int i;
+ u64 bytenr;
+ u64 gen;
+ int ret;
+ struct btrfs_device *device = sdev->dev;
+ struct btrfs_root *root = device->dev_root;
+
+ gen = root->fs_info->last_trans_committed;
+
+ for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
+ bytenr = btrfs_sb_offset(i);
+ if (bytenr + BTRFS_SUPER_INFO_SIZE >= device->total_bytes)
+ break;
+
+ ret = scrub_page(sdev, bytenr, PAGE_SIZE, bytenr,
+ BTRFS_EXTENT_FLAG_SUPER, gen, i, NULL, 1);
+ if (ret)
+ return ret;
+ }
+ wait_event(sdev->list_wait, atomic_read(&sdev->in_flight) == 0);
+
+ return 0;
+}
+
+/*
+ * get a reference count on fs_info->scrub_workers. start worker if necessary
+ */
+static noinline_for_stack int scrub_workers_get(struct btrfs_root *root)
+{
+ struct btrfs_fs_info *fs_info = root->fs_info;
+
+ mutex_lock(&fs_info->scrub_lock);
+ if (fs_info->scrub_workers_refcnt == 0)
+ btrfs_start_workers(&fs_info->scrub_workers, 1);
+ ++fs_info->scrub_workers_refcnt;
+ mutex_unlock(&fs_info->scrub_lock);
+
+ return 0;
+}
+
+static noinline_for_stack void scrub_workers_put(struct btrfs_root *root)
+{
+ struct btrfs_fs_info *fs_info = root->fs_info;
+
+ mutex_lock(&fs_info->scrub_lock);
+ if (--fs_info->scrub_workers_refcnt == 0)
+ btrfs_stop_workers(&fs_info->scrub_workers);
+ WARN_ON(fs_info->scrub_workers_refcnt < 0);
+ mutex_unlock(&fs_info->scrub_lock);
+}
+
+
+int btrfs_scrub_dev(struct btrfs_root *root, u64 devid, u64 start, u64 end,
+ struct btrfs_scrub_progress *progress, int readonly)
+{
+ struct scrub_dev *sdev;
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ int ret;
+ struct btrfs_device *dev;
+
+ if (root->fs_info->closing)
+ return -EINVAL;
+
+ /*
+ * check some assumptions
+ */
+ if (root->sectorsize != PAGE_SIZE ||
+ root->sectorsize != root->leafsize ||
+ root->sectorsize != root->nodesize) {
+ printk(KERN_ERR "btrfs_scrub: size assumptions fail\n");
+ return -EINVAL;
+ }
+
+ ret = scrub_workers_get(root);
+ if (ret)
+ return ret;
+
+ mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
+ dev = btrfs_find_device(root, devid, NULL, NULL);
+ if (!dev || dev->missing) {
+ mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
+ scrub_workers_put(root);
+ return -ENODEV;
+ }
+ mutex_lock(&fs_info->scrub_lock);
+
+ if (!dev->in_fs_metadata) {
+ mutex_unlock(&fs_info->scrub_lock);
+ mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
+ scrub_workers_put(root);
+ return -ENODEV;
+ }
+
+ if (dev->scrub_device) {
+ mutex_unlock(&fs_info->scrub_lock);
+ mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
+ scrub_workers_put(root);
+ return -EINPROGRESS;
+ }
+ sdev = scrub_setup_dev(dev);
+ if (IS_ERR(sdev)) {
+ mutex_unlock(&fs_info->scrub_lock);
+ mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
+ scrub_workers_put(root);
+ return PTR_ERR(sdev);
+ }
+ sdev->readonly = readonly;
+ dev->scrub_device = sdev;
+
+ atomic_inc(&fs_info->scrubs_running);
+ mutex_unlock(&fs_info->scrub_lock);
+ mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
+
+ down_read(&fs_info->scrub_super_lock);
+ ret = scrub_supers(sdev);
+ up_read(&fs_info->scrub_super_lock);
+
+ if (!ret)
+ ret = scrub_enumerate_chunks(sdev, start, end);
+
+ wait_event(sdev->list_wait, atomic_read(&sdev->in_flight) == 0);
+
+ atomic_dec(&fs_info->scrubs_running);
+ wake_up(&fs_info->scrub_pause_wait);
+
+ if (progress)
+ memcpy(progress, &sdev->stat, sizeof(*progress));
+
+ mutex_lock(&fs_info->scrub_lock);
+ dev->scrub_device = NULL;
+ mutex_unlock(&fs_info->scrub_lock);
+
+ scrub_free_dev(sdev);
+ scrub_workers_put(root);
+
+ return ret;
+}
+
+int btrfs_scrub_pause(struct btrfs_root *root)
+{
+ struct btrfs_fs_info *fs_info = root->fs_info;
+
+ mutex_lock(&fs_info->scrub_lock);
+ atomic_inc(&fs_info->scrub_pause_req);
+ while (atomic_read(&fs_info->scrubs_paused) !=
+ atomic_read(&fs_info->scrubs_running)) {
+ mutex_unlock(&fs_info->scrub_lock);
+ wait_event(fs_info->scrub_pause_wait,
+ atomic_read(&fs_info->scrubs_paused) ==
+ atomic_read(&fs_info->scrubs_running));
+ mutex_lock(&fs_info->scrub_lock);
+ }
+ mutex_unlock(&fs_info->scrub_lock);
+
+ return 0;
+}
+
+int btrfs_scrub_continue(struct btrfs_root *root)
+{
+ struct btrfs_fs_info *fs_info = root->fs_info;
+
+ atomic_dec(&fs_info->scrub_pause_req);
+ wake_up(&fs_info->scrub_pause_wait);
+ return 0;
+}
+
+int btrfs_scrub_pause_super(struct btrfs_root *root)
+{
+ down_write(&root->fs_info->scrub_super_lock);
+ return 0;
+}
+
+int btrfs_scrub_continue_super(struct btrfs_root *root)
+{
+ up_write(&root->fs_info->scrub_super_lock);
+ return 0;
+}
+
+int btrfs_scrub_cancel(struct btrfs_root *root)
+{
+ struct btrfs_fs_info *fs_info = root->fs_info;
+
+ mutex_lock(&fs_info->scrub_lock);
+ if (!atomic_read(&fs_info->scrubs_running)) {
+ mutex_unlock(&fs_info->scrub_lock);
+ return -ENOTCONN;
+ }
+
+ atomic_inc(&fs_info->scrub_cancel_req);
+ while (atomic_read(&fs_info->scrubs_running)) {
+ mutex_unlock(&fs_info->scrub_lock);
+ wait_event(fs_info->scrub_pause_wait,
+ atomic_read(&fs_info->scrubs_running) == 0);
+ mutex_lock(&fs_info->scrub_lock);
+ }
+ atomic_dec(&fs_info->scrub_cancel_req);
+ mutex_unlock(&fs_info->scrub_lock);
+
+ return 0;
+}
+
+int btrfs_scrub_cancel_dev(struct btrfs_root *root, struct btrfs_device *dev)
+{
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ struct scrub_dev *sdev;
+
+ mutex_lock(&fs_info->scrub_lock);
+ sdev = dev->scrub_device;
+ if (!sdev) {
+ mutex_unlock(&fs_info->scrub_lock);
+ return -ENOTCONN;
+ }
+ atomic_inc(&sdev->cancel_req);
+ while (dev->scrub_device) {
+ mutex_unlock(&fs_info->scrub_lock);
+ wait_event(fs_info->scrub_pause_wait,
+ dev->scrub_device == NULL);
+ mutex_lock(&fs_info->scrub_lock);
+ }
+ mutex_unlock(&fs_info->scrub_lock);
+
+ return 0;
+}
+int btrfs_scrub_cancel_devid(struct btrfs_root *root, u64 devid)
+{
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ struct btrfs_device *dev;
+ int ret;
+
+ /*
+ * we have to hold the device_list_mutex here so the device
+ * does not go away in cancel_dev. FIXME: find a better solution
+ */
+ mutex_lock(&fs_info->fs_devices->device_list_mutex);
+ dev = btrfs_find_device(root, devid, NULL, NULL);
+ if (!dev) {
+ mutex_unlock(&fs_info->fs_devices->device_list_mutex);
+ return -ENODEV;
+ }
+ ret = btrfs_scrub_cancel_dev(root, dev);
+ mutex_unlock(&fs_info->fs_devices->device_list_mutex);
+
+ return ret;
+}
+
+int btrfs_scrub_progress(struct btrfs_root *root, u64 devid,
+ struct btrfs_scrub_progress *progress)
+{
+ struct btrfs_device *dev;
+ struct scrub_dev *sdev = NULL;
+
+ mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
+ dev = btrfs_find_device(root, devid, NULL, NULL);
+ if (dev)
+ sdev = dev->scrub_device;
+ if (sdev)
+ memcpy(progress, &sdev->stat, sizeof(*progress));
+ mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
+
+ return dev ? (sdev ? 0 : -ENOTCONN) : -ENODEV;
+}
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index be4ffa12f3ef..9b2e7e5bc3ef 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -41,6 +41,7 @@
#include <linux/slab.h>
#include <linux/cleancache.h>
#include "compat.h"
+#include "delayed-inode.h"
#include "ctree.h"
#include "disk-io.h"
#include "transaction.h"
@@ -160,7 +161,7 @@ enum {
Opt_compress_type, Opt_compress_force, Opt_compress_force_type,
Opt_notreelog, Opt_ratio, Opt_flushoncommit, Opt_discard,
Opt_space_cache, Opt_clear_cache, Opt_user_subvol_rm_allowed,
- Opt_enospc_debug, Opt_subvolrootid, Opt_err,
+ Opt_enospc_debug, Opt_subvolrootid, Opt_defrag, Opt_err,
};
static match_table_t tokens = {
@@ -191,6 +192,7 @@ static match_table_t tokens = {
{Opt_user_subvol_rm_allowed, "user_subvol_rm_allowed"},
{Opt_enospc_debug, "enospc_debug"},
{Opt_subvolrootid, "subvolrootid=%d"},
+ {Opt_defrag, "autodefrag"},
{Opt_err, NULL},
};
@@ -369,6 +371,10 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
case Opt_enospc_debug:
btrfs_set_opt(info->mount_opt, ENOSPC_DEBUG);
break;
+ case Opt_defrag:
+ printk(KERN_INFO "btrfs: enabling auto defrag");
+ btrfs_set_opt(info->mount_opt, AUTO_DEFRAG);
+ break;
case Opt_err:
printk(KERN_INFO "btrfs: unrecognized mount option "
"'%s'\n", p);
@@ -507,8 +513,10 @@ static struct dentry *get_default_root(struct super_block *sb,
*/
dir_id = btrfs_super_root_dir(&root->fs_info->super_copy);
di = btrfs_lookup_dir_item(NULL, root, path, dir_id, "default", 7, 0);
- if (IS_ERR(di))
+ if (IS_ERR(di)) {
+ btrfs_free_path(path);
return ERR_CAST(di);
+ }
if (!di) {
/*
* Ok the default dir item isn't there. This is weird since
@@ -741,7 +749,7 @@ static int btrfs_set_super(struct super_block *s, void *data)
* for multiple device setup. Make sure to keep it in sync.
*/
static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
- const char *dev_name, void *data)
+ const char *device_name, void *data)
{
struct block_device *bdev = NULL;
struct super_block *s;
@@ -764,7 +772,7 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
if (error)
return ERR_PTR(error);
- error = btrfs_scan_one_device(dev_name, mode, fs_type, &fs_devices);
+ error = btrfs_scan_one_device(device_name, mode, fs_type, &fs_devices);
if (error)
goto error_free_subvol_name;
@@ -915,6 +923,32 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
return 0;
}
+/* Used to sort the devices by max_avail(descending sort) */
+static int btrfs_cmp_device_free_bytes(const void *dev_info1,
+ const void *dev_info2)
+{
+ if (((struct btrfs_device_info *)dev_info1)->max_avail >
+ ((struct btrfs_device_info *)dev_info2)->max_avail)
+ return -1;
+ else if (((struct btrfs_device_info *)dev_info1)->max_avail <
+ ((struct btrfs_device_info *)dev_info2)->max_avail)
+ return 1;
+ else
+ return 0;
+}
+
+/*
+ * sort the devices by max_avail, in which max free extent size of each device
+ * is stored.(Descending Sort)
+ */
+static inline void btrfs_descending_sort_devices(
+ struct btrfs_device_info *devices,
+ size_t nr_devices)
+{
+ sort(devices, nr_devices, sizeof(struct btrfs_device_info),
+ btrfs_cmp_device_free_bytes, NULL);
+}
+
/*
* The helper to calc the free space on the devices that can be used to store
* file data.
@@ -1208,10 +1242,14 @@ static int __init init_btrfs_fs(void)
if (err)
goto free_extent_io;
- err = btrfs_interface_init();
+ err = btrfs_delayed_inode_init();
if (err)
goto free_extent_map;
+ err = btrfs_interface_init();
+ if (err)
+ goto free_delayed_inode;
+
err = register_filesystem(&btrfs_fs_type);
if (err)
goto unregister_ioctl;
@@ -1221,6 +1259,8 @@ static int __init init_btrfs_fs(void)
unregister_ioctl:
btrfs_interface_exit();
+free_delayed_inode:
+ btrfs_delayed_inode_exit();
free_extent_map:
extent_map_exit();
free_extent_io:
@@ -1237,6 +1277,7 @@ free_sysfs:
static void __exit exit_btrfs_fs(void)
{
btrfs_destroy_cachep();
+ btrfs_delayed_inode_exit();
extent_map_exit();
extent_io_exit();
btrfs_interface_exit();
diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
index 4ce16ef702a3..c3c223ae6691 100644
--- a/fs/btrfs/sysfs.c
+++ b/fs/btrfs/sysfs.c
@@ -174,86 +174,9 @@ static const struct sysfs_ops btrfs_root_attr_ops = {
.store = btrfs_root_attr_store,
};
-static struct kobj_type btrfs_root_ktype = {
- .default_attrs = btrfs_root_attrs,
- .sysfs_ops = &btrfs_root_attr_ops,
- .release = btrfs_root_release,
-};
-
-static struct kobj_type btrfs_super_ktype = {
- .default_attrs = btrfs_super_attrs,
- .sysfs_ops = &btrfs_super_attr_ops,
- .release = btrfs_super_release,
-};
-
/* /sys/fs/btrfs/ entry */
static struct kset *btrfs_kset;
-int btrfs_sysfs_add_super(struct btrfs_fs_info *fs)
-{
- int error;
- char *name;
- char c;
- int len = strlen(fs->sb->s_id) + 1;
- int i;
-
- name = kmalloc(len, GFP_NOFS);
- if (!name) {
- error = -ENOMEM;
- goto fail;
- }
-
- for (i = 0; i < len; i++) {
- c = fs->sb->s_id[i];
- if (c == '/' || c == '\\')
- c = '!';
- name[i] = c;
- }
- name[len] = '\0';
-
- fs->super_kobj.kset = btrfs_kset;
- error = kobject_init_and_add(&fs->super_kobj, &btrfs_super_ktype,
- NULL, "%s", name);
- kfree(name);
- if (error)
- goto fail;
-
- return 0;
-
-fail:
- printk(KERN_ERR "btrfs: sysfs creation for super failed\n");
- return error;
-}
-
-int btrfs_sysfs_add_root(struct btrfs_root *root)
-{
- int error;
-
- error = kobject_init_and_add(&root->root_kobj, &btrfs_root_ktype,
- &root->fs_info->super_kobj,
- "%s", root->name);
- if (error)
- goto fail;
-
- return 0;
-
-fail:
- printk(KERN_ERR "btrfs: sysfs creation for root failed\n");
- return error;
-}
-
-void btrfs_sysfs_del_root(struct btrfs_root *root)
-{
- kobject_put(&root->root_kobj);
- wait_for_completion(&root->kobj_unregister);
-}
-
-void btrfs_sysfs_del_super(struct btrfs_fs_info *fs)
-{
- kobject_put(&fs->super_kobj);
- wait_for_completion(&fs->kobj_unregister);
-}
-
int btrfs_init_sysfs(void)
{
btrfs_kset = kset_create_and_add("btrfs", NULL, fs_kobj);
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index c571734d5e5a..dc80f7156923 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -27,6 +27,7 @@
#include "transaction.h"
#include "locking.h"
#include "tree-log.h"
+#include "inode-map.h"
#define BTRFS_ROOT_TRANS_TAG 0
@@ -80,8 +81,7 @@ static noinline int join_transaction(struct btrfs_root *root)
INIT_LIST_HEAD(&cur_trans->pending_snapshots);
list_add_tail(&cur_trans->list, &root->fs_info->trans_list);
extent_io_tree_init(&cur_trans->dirty_pages,
- root->fs_info->btree_inode->i_mapping,
- GFP_NOFS);
+ root->fs_info->btree_inode->i_mapping);
spin_lock(&root->fs_info->new_trans_lock);
root->fs_info->running_transaction = cur_trans;
spin_unlock(&root->fs_info->new_trans_lock);
@@ -347,49 +347,6 @@ out_unlock:
return ret;
}
-#if 0
-/*
- * rate limit against the drop_snapshot code. This helps to slow down new
- * operations if the drop_snapshot code isn't able to keep up.
- */
-static void throttle_on_drops(struct btrfs_root *root)
-{
- struct btrfs_fs_info *info = root->fs_info;
- int harder_count = 0;
-
-harder:
- if (atomic_read(&info->throttles)) {
- DEFINE_WAIT(wait);
- int thr;
- thr = atomic_read(&info->throttle_gen);
-
- do {
- prepare_to_wait(&info->transaction_throttle,
- &wait, TASK_UNINTERRUPTIBLE);
- if (!atomic_read(&info->throttles)) {
- finish_wait(&info->transaction_throttle, &wait);
- break;
- }
- schedule();
- finish_wait(&info->transaction_throttle, &wait);
- } while (thr == atomic_read(&info->throttle_gen));
- harder_count++;
-
- if (root->fs_info->total_ref_cache_size > 1 * 1024 * 1024 &&
- harder_count < 2)
- goto harder;
-
- if (root->fs_info->total_ref_cache_size > 5 * 1024 * 1024 &&
- harder_count < 10)
- goto harder;
-
- if (root->fs_info->total_ref_cache_size > 10 * 1024 * 1024 &&
- harder_count < 20)
- goto harder;
- }
-}
-#endif
-
void btrfs_throttle(struct btrfs_root *root)
{
mutex_lock(&root->fs_info->trans_mutex);
@@ -487,19 +444,40 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
int btrfs_end_transaction(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
{
- return __btrfs_end_transaction(trans, root, 0, 1);
+ int ret;
+
+ ret = __btrfs_end_transaction(trans, root, 0, 1);
+ if (ret)
+ return ret;
+ return 0;
}
int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
{
- return __btrfs_end_transaction(trans, root, 1, 1);
+ int ret;
+
+ ret = __btrfs_end_transaction(trans, root, 1, 1);
+ if (ret)
+ return ret;
+ return 0;
}
int btrfs_end_transaction_nolock(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
{
- return __btrfs_end_transaction(trans, root, 0, 0);
+ int ret;
+
+ ret = __btrfs_end_transaction(trans, root, 0, 0);
+ if (ret)
+ return ret;
+ return 0;
+}
+
+int btrfs_end_transaction_dmeta(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root)
+{
+ return __btrfs_end_transaction(trans, root, 1, 1);
}
/*
@@ -760,8 +738,14 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
btrfs_update_reloc_root(trans, root);
btrfs_orphan_commit_root(trans, root);
+ btrfs_save_ino_cache(root, trans);
+
if (root->commit_root != root->node) {
+ mutex_lock(&root->fs_commit_mutex);
switch_commit_root(root);
+ btrfs_unpin_free_ino(root);
+ mutex_unlock(&root->fs_commit_mutex);
+
btrfs_set_root_node(&root->root_item,
root->node);
}
@@ -809,97 +793,6 @@ int btrfs_defrag_root(struct btrfs_root *root, int cacheonly)
return ret;
}
-#if 0
-/*
- * when dropping snapshots, we generate a ton of delayed refs, and it makes
- * sense not to join the transaction while it is trying to flush the current
- * queue of delayed refs out.
- *
- * This is used by the drop snapshot code only
- */
-static noinline int wait_transaction_pre_flush(struct btrfs_fs_info *info)
-{
- DEFINE_WAIT(wait);
-
- mutex_lock(&info->trans_mutex);
- while (info->running_transaction &&
- info->running_transaction->delayed_refs.flushing) {
- prepare_to_wait(&info->transaction_wait, &wait,
- TASK_UNINTERRUPTIBLE);
- mutex_unlock(&info->trans_mutex);
-
- schedule();
-
- mutex_lock(&info->trans_mutex);
- finish_wait(&info->transaction_wait, &wait);
- }
- mutex_unlock(&info->trans_mutex);
- return 0;
-}
-
-/*
- * Given a list of roots that need to be deleted, call btrfs_drop_snapshot on
- * all of them
- */
-int btrfs_drop_dead_root(struct btrfs_root *root)
-{
- struct btrfs_trans_handle *trans;
- struct btrfs_root *tree_root = root->fs_info->tree_root;
- unsigned long nr;
- int ret;
-
- while (1) {
- /*
- * we don't want to jump in and create a bunch of
- * delayed refs if the transaction is starting to close
- */
- wait_transaction_pre_flush(tree_root->fs_info);
- trans = btrfs_start_transaction(tree_root, 1);
-
- /*
- * we've joined a transaction, make sure it isn't
- * closing right now
- */
- if (trans->transaction->delayed_refs.flushing) {
- btrfs_end_transaction(trans, tree_root);
- continue;
- }
-
- ret = btrfs_drop_snapshot(trans, root);
- if (ret != -EAGAIN)
- break;
-
- ret = btrfs_update_root(trans, tree_root,
- &root->root_key,
- &root->root_item);
- if (ret)
- break;
-
- nr = trans->blocks_used;
- ret = btrfs_end_transaction(trans, tree_root);
- BUG_ON(ret);
-
- btrfs_btree_balance_dirty(tree_root, nr);
- cond_resched();
- }
- BUG_ON(ret);
-
- ret = btrfs_del_root(trans, tree_root, &root->root_key);
- BUG_ON(ret);
-
- nr = trans->blocks_used;
- ret = btrfs_end_transaction(trans, tree_root);
- BUG_ON(ret);
-
- free_extent_buffer(root->node);
- free_extent_buffer(root->commit_root);
- kfree(root);
-
- btrfs_btree_balance_dirty(tree_root, nr);
- return ret;
-}
-#endif
-
/*
* new snapshots need to be created at a very specific time in the
* transaction commit. This does the actual creation
@@ -930,7 +823,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
goto fail;
}
- ret = btrfs_find_free_objectid(trans, tree_root, 0, &objectid);
+ ret = btrfs_find_free_objectid(tree_root, &objectid);
if (ret) {
pending->error = ret;
goto fail;
@@ -967,7 +860,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
BUG_ON(ret);
ret = btrfs_insert_dir_item(trans, parent_root,
dentry->d_name.name, dentry->d_name.len,
- parent_inode->i_ino, &key,
+ parent_inode, &key,
BTRFS_FT_DIR, index);
BUG_ON(ret);
@@ -1009,7 +902,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
*/
ret = btrfs_add_root_ref(trans, tree_root, objectid,
parent_root->root_key.objectid,
- parent_inode->i_ino, index,
+ btrfs_ino(parent_inode), index,
dentry->d_name.name, dentry->d_name.len);
BUG_ON(ret);
dput(parent);
@@ -1037,6 +930,14 @@ static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans,
int ret;
list_for_each_entry(pending, head, list) {
+ /*
+ * We must deal with the delayed items before creating
+ * snapshots, or we will create a snapthot with inconsistent
+ * information.
+ */
+ ret = btrfs_run_delayed_items(trans, fs_info->fs_root);
+ BUG_ON(ret);
+
ret = create_pending_snapshot(trans, fs_info, pending);
BUG_ON(ret);
}
@@ -1290,6 +1191,9 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
BUG_ON(ret);
}
+ ret = btrfs_run_delayed_items(trans, root);
+ BUG_ON(ret);
+
/*
* rename don't use btrfs_join_transaction, so, once we
* set the transaction to blocked above, we aren't going
@@ -1316,11 +1220,15 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
ret = create_pending_snapshots(trans, root->fs_info);
BUG_ON(ret);
+ ret = btrfs_run_delayed_items(trans, root);
+ BUG_ON(ret);
+
ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
BUG_ON(ret);
WARN_ON(cur_trans != trans->transaction);
+ btrfs_scrub_pause(root);
/* btrfs_commit_tree_roots is responsible for getting the
* various roots consistent with each other. Every pointer
* in the tree of tree roots has to point to the most up to date
@@ -1405,6 +1313,8 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
mutex_unlock(&root->fs_info->trans_mutex);
+ btrfs_scrub_continue(root);
+
if (current->journal_info == trans)
current->journal_info = NULL;
@@ -1432,6 +1342,8 @@ int btrfs_clean_old_snapshots(struct btrfs_root *root)
root = list_entry(list.next, struct btrfs_root, root_list);
list_del(&root->root_list);
+ btrfs_kill_all_delayed_nodes(root);
+
if (btrfs_header_backref_rev(root->node) <
BTRFS_MIXED_BACKREF_REV)
btrfs_drop_snapshot(root, NULL, 0);
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index e441acc6c584..804c88639e5d 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -101,11 +101,8 @@ struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *r,
int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid);
int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
-int btrfs_commit_tree_roots(struct btrfs_trans_handle *trans,
- struct btrfs_root *root);
int btrfs_add_dead_root(struct btrfs_root *root);
-int btrfs_drop_dead_root(struct btrfs_root *root);
int btrfs_defrag_root(struct btrfs_root *root, int cacheonly);
int btrfs_clean_old_snapshots(struct btrfs_root *root);
int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
@@ -115,6 +112,8 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
int wait_for_unblock);
int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
+int btrfs_end_transaction_dmeta(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root);
int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
void btrfs_throttle(struct btrfs_root *root);
diff --git a/fs/btrfs/tree-defrag.c b/fs/btrfs/tree-defrag.c
index 992ab425599d..3b580ee8ab1d 100644
--- a/fs/btrfs/tree-defrag.c
+++ b/fs/btrfs/tree-defrag.c
@@ -97,7 +97,7 @@ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
ret = 0;
goto out;
}
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
wret = btrfs_search_slot(trans, root, &key, path, 0, 1);
if (wret < 0) {
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index f997ec0c1ba4..592396c6dc47 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -333,13 +333,13 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans,
goto insert;
if (item_size == 0) {
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
return 0;
}
dst_copy = kmalloc(item_size, GFP_NOFS);
src_copy = kmalloc(item_size, GFP_NOFS);
if (!dst_copy || !src_copy) {
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
kfree(dst_copy);
kfree(src_copy);
return -ENOMEM;
@@ -361,13 +361,13 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans,
* sync
*/
if (ret == 0) {
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
return 0;
}
}
insert:
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
/* try to insert the key into the destination tree */
ret = btrfs_insert_empty_item(trans, root, path,
key, item_size);
@@ -382,7 +382,6 @@ insert:
} else if (found_size < item_size) {
ret = btrfs_extend_item(trans, root, path,
item_size - found_size);
- BUG_ON(ret);
}
} else if (ret) {
return ret;
@@ -438,7 +437,7 @@ insert:
}
no_copy:
btrfs_mark_buffer_dirty(path->nodes[0]);
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
return 0;
}
@@ -519,7 +518,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
* file. This must be done before the btrfs_drop_extents run
* so we don't try to drop this extent.
*/
- ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
+ ret = btrfs_lookup_file_extent(trans, root, path, btrfs_ino(inode),
start, 0);
if (ret == 0 &&
@@ -544,11 +543,11 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
* we don't have to do anything
*/
if (memcmp(&cmp1, &cmp2, sizeof(cmp1)) == 0) {
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
goto out;
}
}
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
saved_nbytes = inode_get_bytes(inode);
/* drop any overlapping extents */
@@ -590,6 +589,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
ins.objectid, ins.offset,
0, root->root_key.objectid,
key->objectid, offset);
+ BUG_ON(ret);
} else {
/*
* insert the extent pointer in the extent
@@ -600,7 +600,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
key->objectid, offset, &ins);
BUG_ON(ret);
}
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
if (btrfs_file_extent_compression(eb, item)) {
csum_start = ins.objectid;
@@ -614,7 +614,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
ret = btrfs_lookup_csums_range(root->log_root,
csum_start, csum_end - 1,
- &ordered_sums);
+ &ordered_sums, 0);
BUG_ON(ret);
while (!list_empty(&ordered_sums)) {
struct btrfs_ordered_sum *sums;
@@ -629,7 +629,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
kfree(sums);
}
} else {
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
}
} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
/* inline extents are easy, we just overwrite them */
@@ -675,10 +675,13 @@ static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans,
return -ENOMEM;
read_extent_buffer(leaf, name, (unsigned long)(di + 1), name_len);
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
inode = read_one_inode(root, location.objectid);
- BUG_ON(!inode);
+ if (!inode) {
+ kfree(name);
+ return -EIO;
+ }
ret = link_to_fixup_dir(trans, root, path, location.objectid);
BUG_ON(ret);
@@ -713,7 +716,7 @@ static noinline int inode_in_dir(struct btrfs_root *root,
goto out;
} else
goto out;
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, name_len, 0);
if (di && !IS_ERR(di)) {
@@ -724,7 +727,7 @@ static noinline int inode_in_dir(struct btrfs_root *root,
goto out;
match = 1;
out:
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
return match;
}
@@ -817,7 +820,10 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
return -ENOENT;
inode = read_one_inode(root, key->objectid);
- BUG_ON(!inode);
+ if (!inode) {
+ iput(dir);
+ return -EIO;
+ }
ref_ptr = btrfs_item_ptr_offset(eb, slot);
ref_end = ref_ptr + btrfs_item_size_nr(eb, slot);
@@ -832,7 +838,7 @@ again:
read_extent_buffer(eb, name, (unsigned long)(ref + 1), namelen);
/* if we already have a perfect match, we're done */
- if (inode_in_dir(root, path, dir->i_ino, inode->i_ino,
+ if (inode_in_dir(root, path, btrfs_ino(dir), btrfs_ino(inode),
btrfs_inode_ref_index(eb, ref),
name, namelen)) {
goto out;
@@ -884,7 +890,7 @@ again:
if (!backref_in_log(log, key, victim_name,
victim_name_len)) {
btrfs_inc_nlink(inode);
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
ret = btrfs_unlink_inode(trans, root, dir,
inode, victim_name,
@@ -901,7 +907,7 @@ again:
*/
search_done = 1;
}
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
insert:
/* insert our name */
@@ -922,7 +928,7 @@ out:
BUG_ON(ret);
out_nowrite:
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
iput(dir);
iput(inode);
return 0;
@@ -960,8 +966,9 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
unsigned long ptr;
unsigned long ptr_end;
int name_len;
+ u64 ino = btrfs_ino(inode);
- key.objectid = inode->i_ino;
+ key.objectid = ino;
key.type = BTRFS_INODE_REF_KEY;
key.offset = (u64)-1;
@@ -980,7 +987,7 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
}
btrfs_item_key_to_cpu(path->nodes[0], &key,
path->slots[0]);
- if (key.objectid != inode->i_ino ||
+ if (key.objectid != ino ||
key.type != BTRFS_INODE_REF_KEY)
break;
ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
@@ -999,9 +1006,9 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
if (key.offset == 0)
break;
key.offset--;
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
}
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
if (nlink != inode->i_nlink) {
inode->i_nlink = nlink;
btrfs_update_inode(trans, root, inode);
@@ -1011,10 +1018,10 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
if (inode->i_nlink == 0) {
if (S_ISDIR(inode->i_mode)) {
ret = replay_dir_deletes(trans, root, NULL, path,
- inode->i_ino, 1);
+ ino, 1);
BUG_ON(ret);
}
- ret = insert_orphan_item(trans, root, inode->i_ino);
+ ret = insert_orphan_item(trans, root, ino);
BUG_ON(ret);
}
btrfs_free_path(path);
@@ -1050,11 +1057,13 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
break;
ret = btrfs_del_item(trans, root, path);
- BUG_ON(ret);
+ if (ret)
+ goto out;
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
inode = read_one_inode(root, key.offset);
- BUG_ON(!inode);
+ if (!inode)
+ return -EIO;
ret = fixup_inode_link_count(trans, root, inode);
BUG_ON(ret);
@@ -1068,8 +1077,10 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
*/
key.offset = (u64)-1;
}
- btrfs_release_path(root, path);
- return 0;
+ ret = 0;
+out:
+ btrfs_release_path(path);
+ return ret;
}
@@ -1088,7 +1099,8 @@ static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans,
struct inode *inode;
inode = read_one_inode(root, objectid);
- BUG_ON(!inode);
+ if (!inode)
+ return -EIO;
key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY);
@@ -1096,7 +1108,7 @@ static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans,
ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
if (ret == 0) {
btrfs_inc_nlink(inode);
btrfs_update_inode(trans, root, inode);
@@ -1175,7 +1187,8 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
int ret;
dir = read_one_inode(root, key->objectid);
- BUG_ON(!dir);
+ if (!dir)
+ return -EIO;
name_len = btrfs_dir_name_len(eb, di);
name = kmalloc(name_len, GFP_NOFS);
@@ -1192,7 +1205,7 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
exists = 1;
else
exists = 0;
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
if (key->type == BTRFS_DIR_ITEM_KEY) {
dst_di = btrfs_lookup_dir_item(trans, root, path, key->objectid,
@@ -1205,7 +1218,7 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
} else {
BUG();
}
- if (!dst_di || IS_ERR(dst_di)) {
+ if (IS_ERR_OR_NULL(dst_di)) {
/* we need a sequence number to insert, so we only
* do inserts for the BTRFS_DIR_INDEX_KEY types
*/
@@ -1236,13 +1249,13 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
if (key->type == BTRFS_DIR_INDEX_KEY)
goto insert;
out:
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
kfree(name);
iput(dir);
return 0;
insert:
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
ret = insert_one_name(trans, root, path, key->objectid, key->offset,
name, name_len, log_type, &log_key);
@@ -1363,7 +1376,7 @@ next:
*end_ret = found_end;
ret = 0;
out:
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
return ret;
}
@@ -1426,12 +1439,15 @@ again:
dir_key->offset,
name, name_len, 0);
}
- if (!log_di || IS_ERR(log_di)) {
+ if (IS_ERR_OR_NULL(log_di)) {
btrfs_dir_item_key_to_cpu(eb, di, &location);
- btrfs_release_path(root, path);
- btrfs_release_path(log, log_path);
+ btrfs_release_path(path);
+ btrfs_release_path(log_path);
inode = read_one_inode(root, location.objectid);
- BUG_ON(!inode);
+ if (!inode) {
+ kfree(name);
+ return -EIO;
+ }
ret = link_to_fixup_dir(trans, root,
path, location.objectid);
@@ -1453,7 +1469,7 @@ again:
ret = 0;
goto out;
}
- btrfs_release_path(log, log_path);
+ btrfs_release_path(log_path);
kfree(name);
ptr = (unsigned long)(di + 1);
@@ -1461,8 +1477,8 @@ again:
}
ret = 0;
out:
- btrfs_release_path(root, path);
- btrfs_release_path(log, log_path);
+ btrfs_release_path(path);
+ btrfs_release_path(log_path);
return ret;
}
@@ -1550,7 +1566,7 @@ again:
break;
dir_key.offset = found_key.offset + 1;
}
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
if (range_end == (u64)-1)
break;
range_start = range_end + 1;
@@ -1561,11 +1577,11 @@ next_type:
if (key_type == BTRFS_DIR_LOG_ITEM_KEY) {
key_type = BTRFS_DIR_LOG_INDEX_KEY;
dir_key.type = BTRFS_DIR_INDEX_KEY;
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
goto again;
}
out:
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
btrfs_free_path(log_path);
iput(dir);
return ret;
@@ -2093,7 +2109,9 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
* the running transaction open, so a full commit can't hop
* in and cause problems either.
*/
+ btrfs_scrub_pause_super(root);
write_ctree_super(trans, root->fs_info->tree_root, 1);
+ btrfs_scrub_continue_super(root);
ret = 0;
mutex_lock(&root->log_mutex);
@@ -2197,6 +2215,7 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
int ret;
int err = 0;
int bytes_del = 0;
+ u64 dir_ino = btrfs_ino(dir);
if (BTRFS_I(dir)->logged_trans < trans->transid)
return 0;
@@ -2214,7 +2233,7 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
goto out_unlock;
}
- di = btrfs_lookup_dir_item(trans, log, path, dir->i_ino,
+ di = btrfs_lookup_dir_item(trans, log, path, dir_ino,
name, name_len, -1);
if (IS_ERR(di)) {
err = PTR_ERR(di);
@@ -2225,8 +2244,8 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
bytes_del += name_len;
BUG_ON(ret);
}
- btrfs_release_path(log, path);
- di = btrfs_lookup_dir_index_item(trans, log, path, dir->i_ino,
+ btrfs_release_path(path);
+ di = btrfs_lookup_dir_index_item(trans, log, path, dir_ino,
index, name, name_len, -1);
if (IS_ERR(di)) {
err = PTR_ERR(di);
@@ -2244,10 +2263,10 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
if (bytes_del) {
struct btrfs_key key;
- key.objectid = dir->i_ino;
+ key.objectid = dir_ino;
key.offset = 0;
key.type = BTRFS_INODE_ITEM_KEY;
- btrfs_release_path(log, path);
+ btrfs_release_path(path);
ret = btrfs_search_slot(trans, log, &key, path, 0, 1);
if (ret < 0) {
@@ -2269,7 +2288,7 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
btrfs_mark_buffer_dirty(path->nodes[0]);
} else
ret = 0;
- btrfs_release_path(log, path);
+ btrfs_release_path(path);
}
fail:
btrfs_free_path(path);
@@ -2303,7 +2322,7 @@ int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
log = root->log_root;
mutex_lock(&BTRFS_I(inode)->log_mutex);
- ret = btrfs_del_inode_ref(trans, log, name, name_len, inode->i_ino,
+ ret = btrfs_del_inode_ref(trans, log, name, name_len, btrfs_ino(inode),
dirid, &index);
mutex_unlock(&BTRFS_I(inode)->log_mutex);
if (ret == -ENOSPC) {
@@ -2344,7 +2363,7 @@ static noinline int insert_dir_log_key(struct btrfs_trans_handle *trans,
struct btrfs_dir_log_item);
btrfs_set_dir_log_end(path->nodes[0], item, last_offset);
btrfs_mark_buffer_dirty(path->nodes[0]);
- btrfs_release_path(log, path);
+ btrfs_release_path(path);
return 0;
}
@@ -2369,13 +2388,14 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
int nritems;
u64 first_offset = min_offset;
u64 last_offset = (u64)-1;
+ u64 ino = btrfs_ino(inode);
log = root->log_root;
- max_key.objectid = inode->i_ino;
+ max_key.objectid = ino;
max_key.offset = (u64)-1;
max_key.type = key_type;
- min_key.objectid = inode->i_ino;
+ min_key.objectid = ino;
min_key.type = key_type;
min_key.offset = min_offset;
@@ -2388,18 +2408,17 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
* we didn't find anything from this transaction, see if there
* is anything at all
*/
- if (ret != 0 || min_key.objectid != inode->i_ino ||
- min_key.type != key_type) {
- min_key.objectid = inode->i_ino;
+ if (ret != 0 || min_key.objectid != ino || min_key.type != key_type) {
+ min_key.objectid = ino;
min_key.type = key_type;
min_key.offset = (u64)-1;
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
if (ret < 0) {
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
return ret;
}
- ret = btrfs_previous_item(root, path, inode->i_ino, key_type);
+ ret = btrfs_previous_item(root, path, ino, key_type);
/* if ret == 0 there are items for this type,
* create a range to tell us the last key of this type.
@@ -2417,7 +2436,7 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
}
/* go backward to find any previous key */
- ret = btrfs_previous_item(root, path, inode->i_ino, key_type);
+ ret = btrfs_previous_item(root, path, ino, key_type);
if (ret == 0) {
struct btrfs_key tmp;
btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
@@ -2432,7 +2451,7 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
}
}
}
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
/* find the first key from this transaction again */
ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
@@ -2452,8 +2471,7 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
for (i = path->slots[0]; i < nritems; i++) {
btrfs_item_key_to_cpu(src, &min_key, i);
- if (min_key.objectid != inode->i_ino ||
- min_key.type != key_type)
+ if (min_key.objectid != ino || min_key.type != key_type)
goto done;
ret = overwrite_item(trans, log, dst_path, src, i,
&min_key);
@@ -2474,7 +2492,7 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
goto done;
}
btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
- if (tmp.objectid != inode->i_ino || tmp.type != key_type) {
+ if (tmp.objectid != ino || tmp.type != key_type) {
last_offset = (u64)-1;
goto done;
}
@@ -2490,8 +2508,8 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
}
}
done:
- btrfs_release_path(root, path);
- btrfs_release_path(log, dst_path);
+ btrfs_release_path(path);
+ btrfs_release_path(dst_path);
if (err == 0) {
*last_offset_ret = last_offset;
@@ -2500,8 +2518,7 @@ done:
* is valid
*/
ret = insert_dir_log_key(trans, log, path, key_type,
- inode->i_ino, first_offset,
- last_offset);
+ ino, first_offset, last_offset);
if (ret)
err = ret;
}
@@ -2587,10 +2604,11 @@ static int drop_objectid_items(struct btrfs_trans_handle *trans,
break;
ret = btrfs_del_item(trans, log, path);
- BUG_ON(ret);
- btrfs_release_path(log, path);
+ if (ret)
+ break;
+ btrfs_release_path(path);
}
- btrfs_release_path(log, path);
+ btrfs_release_path(path);
return ret;
}
@@ -2665,6 +2683,9 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
extent = btrfs_item_ptr(src, start_slot + i,
struct btrfs_file_extent_item);
+ if (btrfs_file_extent_generation(src, extent) < trans->transid)
+ continue;
+
found_type = btrfs_file_extent_type(src, extent);
if (found_type == BTRFS_FILE_EXTENT_REG ||
found_type == BTRFS_FILE_EXTENT_PREALLOC) {
@@ -2689,14 +2710,14 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
ret = btrfs_lookup_csums_range(
log->fs_info->csum_root,
ds + cs, ds + cs + cl - 1,
- &ordered_sums);
+ &ordered_sums, 0);
BUG_ON(ret);
}
}
}
btrfs_mark_buffer_dirty(dst_path->nodes[0]);
- btrfs_release_path(log, dst_path);
+ btrfs_release_path(dst_path);
kfree(ins_data);
/*
@@ -2745,6 +2766,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
int nritems;
int ins_start_slot = 0;
int ins_nr;
+ u64 ino = btrfs_ino(inode);
log = root->log_root;
@@ -2757,11 +2779,11 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
return -ENOMEM;
}
- min_key.objectid = inode->i_ino;
+ min_key.objectid = ino;
min_key.type = BTRFS_INODE_ITEM_KEY;
min_key.offset = 0;
- max_key.objectid = inode->i_ino;
+ max_key.objectid = ino;
/* today the code can only do partial logging of directories */
if (!S_ISDIR(inode->i_mode))
@@ -2773,6 +2795,13 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
max_key.type = (u8)-1;
max_key.offset = (u64)-1;
+ ret = btrfs_commit_inode_delayed_items(trans, inode);
+ if (ret) {
+ btrfs_free_path(path);
+ btrfs_free_path(dst_path);
+ return ret;
+ }
+
mutex_lock(&BTRFS_I(inode)->log_mutex);
/*
@@ -2784,8 +2813,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
if (inode_only == LOG_INODE_EXISTS)
max_key_type = BTRFS_XATTR_ITEM_KEY;
- ret = drop_objectid_items(trans, log, path,
- inode->i_ino, max_key_type);
+ ret = drop_objectid_items(trans, log, path, ino, max_key_type);
} else {
ret = btrfs_truncate_inode_items(trans, log, inode, 0, 0);
}
@@ -2803,7 +2831,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
break;
again:
/* note, ins_nr might be > 0 here, cleanup outside the loop */
- if (min_key.objectid != inode->i_ino)
+ if (min_key.objectid != ino)
break;
if (min_key.type > max_key.type)
break;
@@ -2845,7 +2873,7 @@ next_slot:
}
ins_nr = 0;
}
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
if (min_key.offset < (u64)-1)
min_key.offset++;
@@ -2868,8 +2896,8 @@ next_slot:
}
WARN_ON(ins_nr);
if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->i_mode)) {
- btrfs_release_path(root, path);
- btrfs_release_path(log, dst_path);
+ btrfs_release_path(path);
+ btrfs_release_path(dst_path);
ret = log_directory_changes(trans, root, inode, path, dst_path);
if (ret) {
err = ret;
@@ -3136,7 +3164,7 @@ again:
}
btrfs_item_key_to_cpu(path->nodes[0], &found_key,
path->slots[0]);
- btrfs_release_path(log_root_tree, path);
+ btrfs_release_path(path);
if (found_key.objectid != BTRFS_TREE_LOG_OBJECTID)
break;
@@ -3171,7 +3199,7 @@ again:
if (found_key.offset == 0)
break;
}
- btrfs_release_path(log_root_tree, path);
+ btrfs_release_path(path);
/* step one is to pin it all, step two is to replay just inodes */
if (wc.pin) {
diff --git a/fs/btrfs/tree-log.h b/fs/btrfs/tree-log.h
index 3dfae84c8cc8..2270ac58d746 100644
--- a/fs/btrfs/tree-log.h
+++ b/fs/btrfs/tree-log.h
@@ -38,7 +38,6 @@ int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
const char *name, int name_len,
struct inode *inode, u64 dirid);
-int btrfs_join_running_log_trans(struct btrfs_root *root);
int btrfs_end_log_trans(struct btrfs_root *root);
int btrfs_pin_log_trans(struct btrfs_root *root);
int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
diff --git a/fs/btrfs/version.sh b/fs/btrfs/version.sh
deleted file mode 100644
index 1ca1952fd917..000000000000
--- a/fs/btrfs/version.sh
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/bin/bash
-#
-# determine-version -- report a useful version for releases
-#
-# Copyright 2008, Aron Griffis <agriffis@n01se.net>
-# Copyright 2008, Oracle
-# Released under the GNU GPLv2
-
-v="v0.16"
-
-which git &> /dev/null
-if [ $? == 0 ]; then
- git branch >& /dev/null
- if [ $? == 0 ]; then
- if head=`git rev-parse --verify HEAD 2>/dev/null`; then
- if tag=`git describe --tags 2>/dev/null`; then
- v="$tag"
- fi
-
- # Are there uncommitted changes?
- git update-index --refresh --unmerged > /dev/null
- if git diff-index --name-only HEAD | \
- grep -v "^scripts/package" \
- | read dummy; then
- v="$v"-dirty
- fi
- fi
- fi
-fi
-
-echo "#ifndef __BUILD_VERSION" > .build-version.h
-echo "#define __BUILD_VERSION" >> .build-version.h
-echo "#define BTRFS_BUILD_VERSION \"Btrfs $v\"" >> .build-version.h
-echo "#endif" >> .build-version.h
-
-diff -q version.h .build-version.h >& /dev/null
-
-if [ $? == 0 ]; then
- rm .build-version.h
- exit 0
-fi
-
-mv .build-version.h version.h
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index c7367ae5a3e6..c48214ef5c09 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -38,22 +38,9 @@ static int init_first_rw_device(struct btrfs_trans_handle *trans,
struct btrfs_device *device);
static int btrfs_relocate_sys_chunks(struct btrfs_root *root);
-#define map_lookup_size(n) (sizeof(struct map_lookup) + \
- (sizeof(struct btrfs_bio_stripe) * (n)))
-
static DEFINE_MUTEX(uuid_mutex);
static LIST_HEAD(fs_uuids);
-void btrfs_lock_volumes(void)
-{
- mutex_lock(&uuid_mutex);
-}
-
-void btrfs_unlock_volumes(void)
-{
- mutex_unlock(&uuid_mutex);
-}
-
static void lock_chunks(struct btrfs_root *root)
{
mutex_lock(&root->fs_info->chunk_mutex);
@@ -363,7 +350,7 @@ static noinline int device_list_add(const char *path,
INIT_LIST_HEAD(&device->dev_alloc_list);
mutex_lock(&fs_devices->device_list_mutex);
- list_add(&device->dev_list, &fs_devices->devices);
+ list_add_rcu(&device->dev_list, &fs_devices->devices);
mutex_unlock(&fs_devices->device_list_mutex);
device->fs_devices = fs_devices;
@@ -406,7 +393,7 @@ static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
fs_devices->latest_trans = orig->latest_trans;
memcpy(fs_devices->fsid, orig->fsid, sizeof(fs_devices->fsid));
- mutex_lock(&orig->device_list_mutex);
+ /* We have held the volume lock, it is safe to get the devices. */
list_for_each_entry(orig_dev, &orig->devices, dev_list) {
device = kzalloc(sizeof(*device), GFP_NOFS);
if (!device)
@@ -429,10 +416,8 @@ static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
device->fs_devices = fs_devices;
fs_devices->num_devices++;
}
- mutex_unlock(&orig->device_list_mutex);
return fs_devices;
error:
- mutex_unlock(&orig->device_list_mutex);
free_fs_devices(fs_devices);
return ERR_PTR(-ENOMEM);
}
@@ -443,7 +428,7 @@ int btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices)
mutex_lock(&uuid_mutex);
again:
- mutex_lock(&fs_devices->device_list_mutex);
+ /* This is the initialized path, it is safe to release the devices. */
list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
if (device->in_fs_metadata)
continue;
@@ -463,7 +448,6 @@ again:
kfree(device->name);
kfree(device);
}
- mutex_unlock(&fs_devices->device_list_mutex);
if (fs_devices->seed) {
fs_devices = fs_devices->seed;
@@ -474,6 +458,29 @@ again:
return 0;
}
+static void __free_device(struct work_struct *work)
+{
+ struct btrfs_device *device;
+
+ device = container_of(work, struct btrfs_device, rcu_work);
+
+ if (device->bdev)
+ blkdev_put(device->bdev, device->mode);
+
+ kfree(device->name);
+ kfree(device);
+}
+
+static void free_device(struct rcu_head *head)
+{
+ struct btrfs_device *device;
+
+ device = container_of(head, struct btrfs_device, rcu);
+
+ INIT_WORK(&device->rcu_work, __free_device);
+ schedule_work(&device->rcu_work);
+}
+
static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
{
struct btrfs_device *device;
@@ -481,20 +488,32 @@ static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
if (--fs_devices->opened > 0)
return 0;
+ mutex_lock(&fs_devices->device_list_mutex);
list_for_each_entry(device, &fs_devices->devices, dev_list) {
- if (device->bdev) {
- blkdev_put(device->bdev, device->mode);
+ struct btrfs_device *new_device;
+
+ if (device->bdev)
fs_devices->open_devices--;
- }
+
if (device->writeable) {
list_del_init(&device->dev_alloc_list);
fs_devices->rw_devices--;
}
- device->bdev = NULL;
- device->writeable = 0;
- device->in_fs_metadata = 0;
+ new_device = kmalloc(sizeof(*new_device), GFP_NOFS);
+ BUG_ON(!new_device);
+ memcpy(new_device, device, sizeof(*new_device));
+ new_device->name = kstrdup(device->name, GFP_NOFS);
+ BUG_ON(!new_device->name);
+ new_device->bdev = NULL;
+ new_device->writeable = 0;
+ new_device->in_fs_metadata = 0;
+ list_replace_rcu(&device->dev_list, &new_device->dev_list);
+
+ call_rcu(&device->rcu, free_device);
}
+ mutex_unlock(&fs_devices->device_list_mutex);
+
WARN_ON(fs_devices->open_devices);
WARN_ON(fs_devices->rw_devices);
fs_devices->opened = 0;
@@ -597,6 +616,7 @@ static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
list_add(&device->dev_alloc_list,
&fs_devices->alloc_list);
}
+ brelse(bh);
continue;
error_brelse:
@@ -815,10 +835,7 @@ int find_free_dev_extent(struct btrfs_trans_handle *trans,
/* we don't want to overwrite the superblock on the drive,
* so we make sure to start at an offset of at least 1MB
*/
- search_start = 1024 * 1024;
-
- if (root->fs_info->alloc_start + num_bytes <= search_end)
- search_start = max(root->fs_info->alloc_start, search_start);
+ search_start = max(root->fs_info->alloc_start, 1024ull * 1024);
max_hole_start = search_start;
max_hole_size = 0;
@@ -949,14 +966,14 @@ static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
if (ret > 0) {
ret = btrfs_previous_item(root, path, key.objectid,
BTRFS_DEV_EXTENT_KEY);
- BUG_ON(ret);
+ if (ret)
+ goto out;
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
extent = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_dev_extent);
BUG_ON(found_key.offset > start || found_key.offset +
btrfs_dev_extent_length(leaf, extent) < start);
- ret = 0;
} else if (ret == 0) {
leaf = path->nodes[0];
extent = btrfs_item_ptr(leaf, path->slots[0],
@@ -967,8 +984,8 @@ static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
if (device->bytes_used > 0)
device->bytes_used -= btrfs_dev_extent_length(leaf, extent);
ret = btrfs_del_item(trans, root, path);
- BUG_ON(ret);
+out:
btrfs_free_path(path);
return ret;
}
@@ -1203,11 +1220,13 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
struct block_device *bdev;
struct buffer_head *bh = NULL;
struct btrfs_super_block *disk_super;
+ struct btrfs_fs_devices *cur_devices;
u64 all_avail;
u64 devid;
u64 num_devices;
u8 *dev_uuid;
int ret = 0;
+ bool clear_super = false;
mutex_lock(&uuid_mutex);
mutex_lock(&root->fs_info->volume_mutex);
@@ -1238,14 +1257,16 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
device = NULL;
devices = &root->fs_info->fs_devices->devices;
- mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
+ /*
+ * It is safe to read the devices since the volume_mutex
+ * is held.
+ */
list_for_each_entry(tmp, devices, dev_list) {
if (tmp->in_fs_metadata && !tmp->bdev) {
device = tmp;
break;
}
}
- mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
bdev = NULL;
bh = NULL;
disk_super = NULL;
@@ -1287,8 +1308,11 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
}
if (device->writeable) {
+ lock_chunks(root);
list_del_init(&device->dev_alloc_list);
+ unlock_chunks(root);
root->fs_info->fs_devices->rw_devices--;
+ clear_super = true;
}
ret = btrfs_shrink_device(device, 0);
@@ -1300,15 +1324,17 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
goto error_undo;
device->in_fs_metadata = 0;
+ btrfs_scrub_cancel_dev(root, device);
/*
* the device list mutex makes sure that we don't change
* the device list while someone else is writing out all
* the device supers.
*/
+
+ cur_devices = device->fs_devices;
mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
- list_del_init(&device->dev_list);
- mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
+ list_del_rcu(&device->dev_list);
device->fs_devices->num_devices--;
@@ -1322,34 +1348,36 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
if (device->bdev == root->fs_info->fs_devices->latest_bdev)
root->fs_info->fs_devices->latest_bdev = next_device->bdev;
- if (device->bdev) {
- blkdev_put(device->bdev, device->mode);
- device->bdev = NULL;
+ if (device->bdev)
device->fs_devices->open_devices--;
- }
+
+ call_rcu(&device->rcu, free_device);
+ mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
num_devices = btrfs_super_num_devices(&root->fs_info->super_copy) - 1;
btrfs_set_super_num_devices(&root->fs_info->super_copy, num_devices);
- if (device->fs_devices->open_devices == 0) {
+ if (cur_devices->open_devices == 0) {
struct btrfs_fs_devices *fs_devices;
fs_devices = root->fs_info->fs_devices;
while (fs_devices) {
- if (fs_devices->seed == device->fs_devices)
+ if (fs_devices->seed == cur_devices)
break;
fs_devices = fs_devices->seed;
}
- fs_devices->seed = device->fs_devices->seed;
- device->fs_devices->seed = NULL;
- __btrfs_close_devices(device->fs_devices);
- free_fs_devices(device->fs_devices);
+ fs_devices->seed = cur_devices->seed;
+ cur_devices->seed = NULL;
+ lock_chunks(root);
+ __btrfs_close_devices(cur_devices);
+ unlock_chunks(root);
+ free_fs_devices(cur_devices);
}
/*
* at this point, the device is zero sized. We want to
* remove it from the devices list and zero out the old super
*/
- if (device->writeable) {
+ if (clear_super) {
/* make sure this device isn't detected as part of
* the FS anymore
*/
@@ -1358,8 +1386,6 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
sync_dirty_buffer(bh);
}
- kfree(device->name);
- kfree(device);
ret = 0;
error_brelse:
@@ -1373,8 +1399,10 @@ out:
return ret;
error_undo:
if (device->writeable) {
+ lock_chunks(root);
list_add(&device->dev_alloc_list,
&root->fs_info->fs_devices->alloc_list);
+ unlock_chunks(root);
root->fs_info->fs_devices->rw_devices++;
}
goto error_brelse;
@@ -1414,7 +1442,12 @@ static int btrfs_prepare_sprout(struct btrfs_trans_handle *trans,
INIT_LIST_HEAD(&seed_devices->devices);
INIT_LIST_HEAD(&seed_devices->alloc_list);
mutex_init(&seed_devices->device_list_mutex);
- list_splice_init(&fs_devices->devices, &seed_devices->devices);
+
+ mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
+ list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
+ synchronize_rcu);
+ mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
+
list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
list_for_each_entry(device, &seed_devices->devices, dev_list) {
device->fs_devices = seed_devices;
@@ -1475,7 +1508,7 @@ next_slot:
goto error;
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
continue;
}
@@ -1611,7 +1644,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
* half setup
*/
mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
- list_add(&device->dev_list, &root->fs_info->fs_devices->devices);
+ list_add_rcu(&device->dev_list, &root->fs_info->fs_devices->devices);
list_add(&device->dev_alloc_list,
&root->fs_info->fs_devices->alloc_list);
root->fs_info->fs_devices->num_devices++;
@@ -1769,10 +1802,9 @@ static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
BUG_ON(ret);
ret = btrfs_del_item(trans, root, path);
- BUG_ON(ret);
btrfs_free_path(path);
- return 0;
+ return ret;
}
static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
@@ -1947,7 +1979,7 @@ again:
chunk = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_chunk);
chunk_type = btrfs_chunk_type(leaf, chunk);
- btrfs_release_path(chunk_root, path);
+ btrfs_release_path(path);
if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
ret = btrfs_relocate_chunk(chunk_root, chunk_tree,
@@ -2065,7 +2097,7 @@ int btrfs_balance(struct btrfs_root *dev_root)
if (found_key.offset == 0)
break;
- btrfs_release_path(chunk_root, path);
+ btrfs_release_path(path);
ret = btrfs_relocate_chunk(chunk_root,
chunk_root->root_key.objectid,
found_key.objectid,
@@ -2137,7 +2169,7 @@ again:
goto done;
if (ret) {
ret = 0;
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
break;
}
@@ -2146,7 +2178,7 @@ again:
btrfs_item_key_to_cpu(l, &key, path->slots[0]);
if (key.objectid != device->devid) {
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
break;
}
@@ -2154,14 +2186,14 @@ again:
length = btrfs_dev_extent_length(l, dev_extent);
if (key.offset + length <= new_size) {
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
break;
}
chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid,
chunk_offset);
@@ -2237,275 +2269,204 @@ static int btrfs_add_system_chunk(struct btrfs_trans_handle *trans,
return 0;
}
-static noinline u64 chunk_bytes_by_type(u64 type, u64 calc_size,
- int num_stripes, int sub_stripes)
+/*
+ * sort the devices in descending order by max_avail, total_avail
+ */
+static int btrfs_cmp_device_info(const void *a, const void *b)
{
- if (type & (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_DUP))
- return calc_size;
- else if (type & BTRFS_BLOCK_GROUP_RAID10)
- return calc_size * (num_stripes / sub_stripes);
- else
- return calc_size * num_stripes;
-}
+ const struct btrfs_device_info *di_a = a;
+ const struct btrfs_device_info *di_b = b;
-/* Used to sort the devices by max_avail(descending sort) */
-int btrfs_cmp_device_free_bytes(const void *dev_info1, const void *dev_info2)
-{
- if (((struct btrfs_device_info *)dev_info1)->max_avail >
- ((struct btrfs_device_info *)dev_info2)->max_avail)
+ if (di_a->max_avail > di_b->max_avail)
return -1;
- else if (((struct btrfs_device_info *)dev_info1)->max_avail <
- ((struct btrfs_device_info *)dev_info2)->max_avail)
+ if (di_a->max_avail < di_b->max_avail)
return 1;
- else
- return 0;
+ if (di_a->total_avail > di_b->total_avail)
+ return -1;
+ if (di_a->total_avail < di_b->total_avail)
+ return 1;
+ return 0;
}
-static int __btrfs_calc_nstripes(struct btrfs_fs_devices *fs_devices, u64 type,
- int *num_stripes, int *min_stripes,
- int *sub_stripes)
+static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
+ struct btrfs_root *extent_root,
+ struct map_lookup **map_ret,
+ u64 *num_bytes_out, u64 *stripe_size_out,
+ u64 start, u64 type)
{
- *num_stripes = 1;
- *min_stripes = 1;
- *sub_stripes = 0;
+ struct btrfs_fs_info *info = extent_root->fs_info;
+ struct btrfs_fs_devices *fs_devices = info->fs_devices;
+ struct list_head *cur;
+ struct map_lookup *map = NULL;
+ struct extent_map_tree *em_tree;
+ struct extent_map *em;
+ struct btrfs_device_info *devices_info = NULL;
+ u64 total_avail;
+ int num_stripes; /* total number of stripes to allocate */
+ int sub_stripes; /* sub_stripes info for map */
+ int dev_stripes; /* stripes per dev */
+ int devs_max; /* max devs to use */
+ int devs_min; /* min devs needed */
+ int devs_increment; /* ndevs has to be a multiple of this */
+ int ncopies; /* how many copies to data has */
+ int ret;
+ u64 max_stripe_size;
+ u64 max_chunk_size;
+ u64 stripe_size;
+ u64 num_bytes;
+ int ndevs;
+ int i;
+ int j;
- if (type & (BTRFS_BLOCK_GROUP_RAID0)) {
- *num_stripes = fs_devices->rw_devices;
- *min_stripes = 2;
- }
- if (type & (BTRFS_BLOCK_GROUP_DUP)) {
- *num_stripes = 2;
- *min_stripes = 2;
- }
- if (type & (BTRFS_BLOCK_GROUP_RAID1)) {
- if (fs_devices->rw_devices < 2)
- return -ENOSPC;
- *num_stripes = 2;
- *min_stripes = 2;
- }
- if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
- *num_stripes = fs_devices->rw_devices;
- if (*num_stripes < 4)
- return -ENOSPC;
- *num_stripes &= ~(u32)1;
- *sub_stripes = 2;
- *min_stripes = 4;
+ if ((type & BTRFS_BLOCK_GROUP_RAID1) &&
+ (type & BTRFS_BLOCK_GROUP_DUP)) {
+ WARN_ON(1);
+ type &= ~BTRFS_BLOCK_GROUP_DUP;
}
- return 0;
-}
+ if (list_empty(&fs_devices->alloc_list))
+ return -ENOSPC;
-static u64 __btrfs_calc_stripe_size(struct btrfs_fs_devices *fs_devices,
- u64 proposed_size, u64 type,
- int num_stripes, int small_stripe)
-{
- int min_stripe_size = 1 * 1024 * 1024;
- u64 calc_size = proposed_size;
- u64 max_chunk_size = calc_size;
- int ncopies = 1;
+ sub_stripes = 1;
+ dev_stripes = 1;
+ devs_increment = 1;
+ ncopies = 1;
+ devs_max = 0; /* 0 == as many as possible */
+ devs_min = 1;
- if (type & (BTRFS_BLOCK_GROUP_RAID1 |
- BTRFS_BLOCK_GROUP_DUP |
- BTRFS_BLOCK_GROUP_RAID10))
+ /*
+ * define the properties of each RAID type.
+ * FIXME: move this to a global table and use it in all RAID
+ * calculation code
+ */
+ if (type & (BTRFS_BLOCK_GROUP_DUP)) {
+ dev_stripes = 2;
+ ncopies = 2;
+ devs_max = 1;
+ } else if (type & (BTRFS_BLOCK_GROUP_RAID0)) {
+ devs_min = 2;
+ } else if (type & (BTRFS_BLOCK_GROUP_RAID1)) {
+ devs_increment = 2;
ncopies = 2;
+ devs_max = 2;
+ devs_min = 2;
+ } else if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
+ sub_stripes = 2;
+ devs_increment = 2;
+ ncopies = 2;
+ devs_min = 4;
+ } else {
+ devs_max = 1;
+ }
if (type & BTRFS_BLOCK_GROUP_DATA) {
- max_chunk_size = 10 * calc_size;
- min_stripe_size = 64 * 1024 * 1024;
+ max_stripe_size = 1024 * 1024 * 1024;
+ max_chunk_size = 10 * max_stripe_size;
} else if (type & BTRFS_BLOCK_GROUP_METADATA) {
- max_chunk_size = 256 * 1024 * 1024;
- min_stripe_size = 32 * 1024 * 1024;
+ max_stripe_size = 256 * 1024 * 1024;
+ max_chunk_size = max_stripe_size;
} else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
- calc_size = 8 * 1024 * 1024;
- max_chunk_size = calc_size * 2;
- min_stripe_size = 1 * 1024 * 1024;
+ max_stripe_size = 8 * 1024 * 1024;
+ max_chunk_size = 2 * max_stripe_size;
+ } else {
+ printk(KERN_ERR "btrfs: invalid chunk type 0x%llx requested\n",
+ type);
+ BUG_ON(1);
}
/* we don't want a chunk larger than 10% of writeable space */
max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
max_chunk_size);
- if (calc_size * num_stripes > max_chunk_size * ncopies) {
- calc_size = max_chunk_size * ncopies;
- do_div(calc_size, num_stripes);
- do_div(calc_size, BTRFS_STRIPE_LEN);
- calc_size *= BTRFS_STRIPE_LEN;
- }
+ devices_info = kzalloc(sizeof(*devices_info) * fs_devices->rw_devices,
+ GFP_NOFS);
+ if (!devices_info)
+ return -ENOMEM;
- /* we don't want tiny stripes */
- if (!small_stripe)
- calc_size = max_t(u64, min_stripe_size, calc_size);
+ cur = fs_devices->alloc_list.next;
/*
- * we're about to do_div by the BTRFS_STRIPE_LEN so lets make sure
- * we end up with something bigger than a stripe
+ * in the first pass through the devices list, we gather information
+ * about the available holes on each device.
*/
- calc_size = max_t(u64, calc_size, BTRFS_STRIPE_LEN);
-
- do_div(calc_size, BTRFS_STRIPE_LEN);
- calc_size *= BTRFS_STRIPE_LEN;
-
- return calc_size;
-}
-
-static struct map_lookup *__shrink_map_lookup_stripes(struct map_lookup *map,
- int num_stripes)
-{
- struct map_lookup *new;
- size_t len = map_lookup_size(num_stripes);
-
- BUG_ON(map->num_stripes < num_stripes);
-
- if (map->num_stripes == num_stripes)
- return map;
-
- new = kmalloc(len, GFP_NOFS);
- if (!new) {
- /* just change map->num_stripes */
- map->num_stripes = num_stripes;
- return map;
- }
-
- memcpy(new, map, len);
- new->num_stripes = num_stripes;
- kfree(map);
- return new;
-}
+ ndevs = 0;
+ while (cur != &fs_devices->alloc_list) {
+ struct btrfs_device *device;
+ u64 max_avail;
+ u64 dev_offset;
-/*
- * helper to allocate device space from btrfs_device_info, in which we stored
- * max free space information of every device. It is used when we can not
- * allocate chunks by default size.
- *
- * By this helper, we can allocate a new chunk as larger as possible.
- */
-static int __btrfs_alloc_tiny_space(struct btrfs_trans_handle *trans,
- struct btrfs_fs_devices *fs_devices,
- struct btrfs_device_info *devices,
- int nr_device, u64 type,
- struct map_lookup **map_lookup,
- int min_stripes, u64 *stripe_size)
-{
- int i, index, sort_again = 0;
- int min_devices = min_stripes;
- u64 max_avail, min_free;
- struct map_lookup *map = *map_lookup;
- int ret;
+ device = list_entry(cur, struct btrfs_device, dev_alloc_list);
- if (nr_device < min_stripes)
- return -ENOSPC;
+ cur = cur->next;
- btrfs_descending_sort_devices(devices, nr_device);
+ if (!device->writeable) {
+ printk(KERN_ERR
+ "btrfs: read-only device in alloc_list\n");
+ WARN_ON(1);
+ continue;
+ }
- max_avail = devices[0].max_avail;
- if (!max_avail)
- return -ENOSPC;
+ if (!device->in_fs_metadata)
+ continue;
- for (i = 0; i < nr_device; i++) {
- /*
- * if dev_offset = 0, it means the free space of this device
- * is less than what we need, and we didn't search max avail
- * extent on this device, so do it now.
+ if (device->total_bytes > device->bytes_used)
+ total_avail = device->total_bytes - device->bytes_used;
+ else
+ total_avail = 0;
+ /* avail is off by max(alloc_start, 1MB), but that is the same
+ * for all devices, so it doesn't hurt the sorting later on
*/
- if (!devices[i].dev_offset) {
- ret = find_free_dev_extent(trans, devices[i].dev,
- max_avail,
- &devices[i].dev_offset,
- &devices[i].max_avail);
- if (ret != 0 && ret != -ENOSPC)
- return ret;
- sort_again = 1;
- }
- }
-
- /* we update the max avail free extent of each devices, sort again */
- if (sort_again)
- btrfs_descending_sort_devices(devices, nr_device);
- if (type & BTRFS_BLOCK_GROUP_DUP)
- min_devices = 1;
+ ret = find_free_dev_extent(trans, device,
+ max_stripe_size * dev_stripes,
+ &dev_offset, &max_avail);
+ if (ret && ret != -ENOSPC)
+ goto error;
- if (!devices[min_devices - 1].max_avail)
- return -ENOSPC;
+ if (ret == 0)
+ max_avail = max_stripe_size * dev_stripes;
- max_avail = devices[min_devices - 1].max_avail;
- if (type & BTRFS_BLOCK_GROUP_DUP)
- do_div(max_avail, 2);
+ if (max_avail < BTRFS_STRIPE_LEN * dev_stripes)
+ continue;
- max_avail = __btrfs_calc_stripe_size(fs_devices, max_avail, type,
- min_stripes, 1);
- if (type & BTRFS_BLOCK_GROUP_DUP)
- min_free = max_avail * 2;
- else
- min_free = max_avail;
+ devices_info[ndevs].dev_offset = dev_offset;
+ devices_info[ndevs].max_avail = max_avail;
+ devices_info[ndevs].total_avail = total_avail;
+ devices_info[ndevs].dev = device;
+ ++ndevs;
+ }
- if (min_free > devices[min_devices - 1].max_avail)
- return -ENOSPC;
+ /*
+ * now sort the devices by hole size / available space
+ */
+ sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
+ btrfs_cmp_device_info, NULL);
- map = __shrink_map_lookup_stripes(map, min_stripes);
- *stripe_size = max_avail;
+ /* round down to number of usable stripes */
+ ndevs -= ndevs % devs_increment;
- index = 0;
- for (i = 0; i < min_stripes; i++) {
- map->stripes[i].dev = devices[index].dev;
- map->stripes[i].physical = devices[index].dev_offset;
- if (type & BTRFS_BLOCK_GROUP_DUP) {
- i++;
- map->stripes[i].dev = devices[index].dev;
- map->stripes[i].physical = devices[index].dev_offset +
- max_avail;
- }
- index++;
+ if (ndevs < devs_increment * sub_stripes || ndevs < devs_min) {
+ ret = -ENOSPC;
+ goto error;
}
- *map_lookup = map;
- return 0;
-}
-
-static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
- struct btrfs_root *extent_root,
- struct map_lookup **map_ret,
- u64 *num_bytes, u64 *stripe_size,
- u64 start, u64 type)
-{
- struct btrfs_fs_info *info = extent_root->fs_info;
- struct btrfs_device *device = NULL;
- struct btrfs_fs_devices *fs_devices = info->fs_devices;
- struct list_head *cur;
- struct map_lookup *map;
- struct extent_map_tree *em_tree;
- struct extent_map *em;
- struct btrfs_device_info *devices_info;
- struct list_head private_devs;
- u64 calc_size = 1024 * 1024 * 1024;
- u64 min_free;
- u64 avail;
- u64 dev_offset;
- int num_stripes;
- int min_stripes;
- int sub_stripes;
- int min_devices; /* the min number of devices we need */
- int i;
- int ret;
- int index;
+ if (devs_max && ndevs > devs_max)
+ ndevs = devs_max;
+ /*
+ * the primary goal is to maximize the number of stripes, so use as many
+ * devices as possible, even if the stripes are not maximum sized.
+ */
+ stripe_size = devices_info[ndevs-1].max_avail;
+ num_stripes = ndevs * dev_stripes;
- if ((type & BTRFS_BLOCK_GROUP_RAID1) &&
- (type & BTRFS_BLOCK_GROUP_DUP)) {
- WARN_ON(1);
- type &= ~BTRFS_BLOCK_GROUP_DUP;
+ if (stripe_size * num_stripes > max_chunk_size * ncopies) {
+ stripe_size = max_chunk_size * ncopies;
+ do_div(stripe_size, num_stripes);
}
- if (list_empty(&fs_devices->alloc_list))
- return -ENOSPC;
-
- ret = __btrfs_calc_nstripes(fs_devices, type, &num_stripes,
- &min_stripes, &sub_stripes);
- if (ret)
- return ret;
- devices_info = kzalloc(sizeof(*devices_info) * fs_devices->rw_devices,
- GFP_NOFS);
- if (!devices_info)
- return -ENOMEM;
+ do_div(stripe_size, dev_stripes);
+ do_div(stripe_size, BTRFS_STRIPE_LEN);
+ stripe_size *= BTRFS_STRIPE_LEN;
map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
if (!map) {
@@ -2514,85 +2475,12 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
}
map->num_stripes = num_stripes;
- cur = fs_devices->alloc_list.next;
- index = 0;
- i = 0;
-
- calc_size = __btrfs_calc_stripe_size(fs_devices, calc_size, type,
- num_stripes, 0);
-
- if (type & BTRFS_BLOCK_GROUP_DUP) {
- min_free = calc_size * 2;
- min_devices = 1;
- } else {
- min_free = calc_size;
- min_devices = min_stripes;
- }
-
- INIT_LIST_HEAD(&private_devs);
- while (index < num_stripes) {
- device = list_entry(cur, struct btrfs_device, dev_alloc_list);
- BUG_ON(!device->writeable);
- if (device->total_bytes > device->bytes_used)
- avail = device->total_bytes - device->bytes_used;
- else
- avail = 0;
- cur = cur->next;
-
- if (device->in_fs_metadata && avail >= min_free) {
- ret = find_free_dev_extent(trans, device, min_free,
- &devices_info[i].dev_offset,
- &devices_info[i].max_avail);
- if (ret == 0) {
- list_move_tail(&device->dev_alloc_list,
- &private_devs);
- map->stripes[index].dev = device;
- map->stripes[index].physical =
- devices_info[i].dev_offset;
- index++;
- if (type & BTRFS_BLOCK_GROUP_DUP) {
- map->stripes[index].dev = device;
- map->stripes[index].physical =
- devices_info[i].dev_offset +
- calc_size;
- index++;
- }
- } else if (ret != -ENOSPC)
- goto error;
-
- devices_info[i].dev = device;
- i++;
- } else if (device->in_fs_metadata &&
- avail >= BTRFS_STRIPE_LEN) {
- devices_info[i].dev = device;
- devices_info[i].max_avail = avail;
- i++;
- }
-
- if (cur == &fs_devices->alloc_list)
- break;
- }
-
- list_splice(&private_devs, &fs_devices->alloc_list);
- if (index < num_stripes) {
- if (index >= min_stripes) {
- num_stripes = index;
- if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
- num_stripes /= sub_stripes;
- num_stripes *= sub_stripes;
- }
-
- map = __shrink_map_lookup_stripes(map, num_stripes);
- } else if (i >= min_devices) {
- ret = __btrfs_alloc_tiny_space(trans, fs_devices,
- devices_info, i, type,
- &map, min_stripes,
- &calc_size);
- if (ret)
- goto error;
- } else {
- ret = -ENOSPC;
- goto error;
+ for (i = 0; i < ndevs; ++i) {
+ for (j = 0; j < dev_stripes; ++j) {
+ int s = i * dev_stripes + j;
+ map->stripes[s].dev = devices_info[i].dev;
+ map->stripes[s].physical = devices_info[i].dev_offset +
+ j * stripe_size;
}
}
map->sector_size = extent_root->sectorsize;
@@ -2603,20 +2491,21 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
map->sub_stripes = sub_stripes;
*map_ret = map;
- *stripe_size = calc_size;
- *num_bytes = chunk_bytes_by_type(type, calc_size,
- map->num_stripes, sub_stripes);
+ num_bytes = stripe_size * (num_stripes / ncopies);
- trace_btrfs_chunk_alloc(info->chunk_root, map, start, *num_bytes);
+ *stripe_size_out = stripe_size;
+ *num_bytes_out = num_bytes;
- em = alloc_extent_map(GFP_NOFS);
+ trace_btrfs_chunk_alloc(info->chunk_root, map, start, num_bytes);
+
+ em = alloc_extent_map();
if (!em) {
ret = -ENOMEM;
goto error;
}
em->bdev = (struct block_device *)map;
em->start = start;
- em->len = *num_bytes;
+ em->len = num_bytes;
em->block_start = 0;
em->block_len = em->len;
@@ -2629,20 +2518,21 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
ret = btrfs_make_block_group(trans, extent_root, 0, type,
BTRFS_FIRST_CHUNK_TREE_OBJECTID,
- start, *num_bytes);
+ start, num_bytes);
BUG_ON(ret);
- index = 0;
- while (index < map->num_stripes) {
- device = map->stripes[index].dev;
- dev_offset = map->stripes[index].physical;
+ for (i = 0; i < map->num_stripes; ++i) {
+ struct btrfs_device *device;
+ u64 dev_offset;
+
+ device = map->stripes[i].dev;
+ dev_offset = map->stripes[i].physical;
ret = btrfs_alloc_dev_extent(trans, device,
info->chunk_root->root_key.objectid,
BTRFS_FIRST_CHUNK_TREE_OBJECTID,
- start, dev_offset, calc_size);
+ start, dev_offset, stripe_size);
BUG_ON(ret);
- index++;
}
kfree(devices_info);
@@ -2849,7 +2739,7 @@ int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
{
- extent_map_tree_init(&tree->map_tree, GFP_NOFS);
+ extent_map_tree_init(&tree->map_tree);
}
void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
@@ -3499,7 +3389,7 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
free_extent_map(em);
}
- em = alloc_extent_map(GFP_NOFS);
+ em = alloc_extent_map();
if (!em)
return -ENOMEM;
num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
@@ -3688,15 +3578,6 @@ static int read_one_dev(struct btrfs_root *root,
return ret;
}
-int btrfs_read_super_device(struct btrfs_root *root, struct extent_buffer *buf)
-{
- struct btrfs_dev_item *dev_item;
-
- dev_item = (struct btrfs_dev_item *)offsetof(struct btrfs_super_block,
- dev_item);
- return read_one_dev(root, buf, dev_item);
-}
-
int btrfs_read_sys_array(struct btrfs_root *root)
{
struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
@@ -3813,7 +3694,7 @@ again:
}
if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
key.objectid = 0;
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
goto again;
}
ret = 0;
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index cc2eadaf7a27..7c12d61ae7ae 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -85,7 +85,12 @@ struct btrfs_device {
/* physical drive uuid (or lvm uuid) */
u8 uuid[BTRFS_UUID_SIZE];
+ /* per-device scrub information */
+ struct scrub_dev *scrub_device;
+
struct btrfs_work work;
+ struct rcu_head rcu;
+ struct work_struct rcu_work;
};
struct btrfs_fs_devices {
@@ -144,6 +149,7 @@ struct btrfs_device_info {
struct btrfs_device *dev;
u64 dev_offset;
u64 max_avail;
+ u64 total_avail;
};
struct map_lookup {
@@ -157,20 +163,8 @@ struct map_lookup {
struct btrfs_bio_stripe stripes[];
};
-/* Used to sort the devices by max_avail(descending sort) */
-int btrfs_cmp_device_free_bytes(const void *dev_info1, const void *dev_info2);
-
-/*
- * sort the devices by max_avail, in which max free extent size of each device
- * is stored.(Descending Sort)
- */
-static inline void btrfs_descending_sort_devices(
- struct btrfs_device_info *devices,
- size_t nr_devices)
-{
- sort(devices, nr_devices, sizeof(struct btrfs_device_info),
- btrfs_cmp_device_free_bytes, NULL);
-}
+#define map_lookup_size(n) (sizeof(struct map_lookup) + \
+ (sizeof(struct btrfs_bio_stripe) * (n)))
int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
u64 end, u64 *length);
@@ -196,7 +190,6 @@ void btrfs_mapping_init(struct btrfs_mapping_tree *tree);
void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree);
int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
int mirror_num, int async_submit);
-int btrfs_read_super_device(struct btrfs_root *root, struct extent_buffer *buf);
int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
fmode_t flags, void *holder);
int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
@@ -209,8 +202,6 @@ int btrfs_add_device(struct btrfs_trans_handle *trans,
int btrfs_rm_device(struct btrfs_root *root, char *device_path);
int btrfs_cleanup_fs_uuids(void);
int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len);
-int btrfs_unplug_page(struct btrfs_mapping_tree *map_tree,
- u64 logical, struct page *page);
int btrfs_grow_device(struct btrfs_trans_handle *trans,
struct btrfs_device *device, u64 new_size);
struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid,
@@ -218,8 +209,6 @@ struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid,
int btrfs_shrink_device(struct btrfs_device *device, u64 new_size);
int btrfs_init_new_device(struct btrfs_root *root, char *path);
int btrfs_balance(struct btrfs_root *dev_root);
-void btrfs_unlock_volumes(void);
-void btrfs_lock_volumes(void);
int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset);
int find_free_dev_extent(struct btrfs_trans_handle *trans,
struct btrfs_device *device, u64 num_bytes,
diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
index cfd660550ded..f3107e4b4d56 100644
--- a/fs/btrfs/xattr.c
+++ b/fs/btrfs/xattr.c
@@ -44,7 +44,7 @@ ssize_t __btrfs_getxattr(struct inode *inode, const char *name,
return -ENOMEM;
/* lookup the xattr by name */
- di = btrfs_lookup_xattr(NULL, root, path, inode->i_ino, name,
+ di = btrfs_lookup_xattr(NULL, root, path, btrfs_ino(inode), name,
strlen(name), 0);
if (!di) {
ret = -ENODATA;
@@ -103,7 +103,7 @@ static int do_setxattr(struct btrfs_trans_handle *trans,
return -ENOMEM;
/* first lets see if we already have this xattr */
- di = btrfs_lookup_xattr(trans, root, path, inode->i_ino, name,
+ di = btrfs_lookup_xattr(trans, root, path, btrfs_ino(inode), name,
strlen(name), -1);
if (IS_ERR(di)) {
ret = PTR_ERR(di);
@@ -120,13 +120,13 @@ static int do_setxattr(struct btrfs_trans_handle *trans,
ret = btrfs_delete_one_dir_name(trans, root, path, di);
BUG_ON(ret);
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
/* if we don't have a value then we are removing the xattr */
if (!value)
goto out;
} else {
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
if (flags & XATTR_REPLACE) {
/* we couldn't find the attr to replace */
@@ -136,7 +136,7 @@ static int do_setxattr(struct btrfs_trans_handle *trans,
}
/* ok we have to create a completely new xattr */
- ret = btrfs_insert_xattr_item(trans, root, path, inode->i_ino,
+ ret = btrfs_insert_xattr_item(trans, root, path, btrfs_ino(inode),
name, name_len, value, size);
BUG_ON(ret);
out:
@@ -190,7 +190,7 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size)
* NOTE: we set key.offset = 0; because we want to start with the
* first xattr that we find and walk forward
*/
- key.objectid = inode->i_ino;
+ key.objectid = btrfs_ino(inode);
btrfs_set_key_type(&key, BTRFS_XATTR_ITEM_KEY);
key.offset = 0;
diff --git a/fs/buffer.c b/fs/buffer.c
index 698c6b2cc462..49c9aada0374 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -2382,6 +2382,7 @@ int __block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
ret = -EAGAIN;
goto out_unlock;
}
+ wait_on_page_writeback(page);
return 0;
out_unlock:
unlock_page(page);
diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig
index 75c47cd8d086..1cd4c3a1862d 100644
--- a/fs/cifs/Kconfig
+++ b/fs/cifs/Kconfig
@@ -153,26 +153,6 @@ config CIFS_ACL
Allows to fetch CIFS/NTFS ACL from the server. The DACL blob
is handed over to the application/caller.
-config CIFS_SMB2
- bool "SMB2 network file system support (EXPERIMENTAL)"
- depends on EXPERIMENTAL && INET && BROKEN
- select NLS
- select KEYS
- select FSCACHE
- select DNS_RESOLVER
-
- help
- This enables experimental support for the SMB2 (Server Message Block
- version 2) protocol. The SMB2 protocol is the successor to the
- popular CIFS and SMB network file sharing protocols. SMB2 is the
- native file sharing mechanism for recent versions of Windows
- operating systems (since Vista). SMB2 enablement will eventually
- allow users better performance, security and features, than would be
- possible with cifs. Note that smb2 mount options also are simpler
- (compared to cifs) due to protocol improvements.
-
- Unless you are a developer or tester, say N.
-
config CIFS_NFSD_EXPORT
bool "Allow nfsd to export CIFS file system (EXPERIMENTAL)"
depends on CIFS && EXPERIMENTAL
diff --git a/fs/cifs/README b/fs/cifs/README
index 4a3ca0e5ca24..c5c2c5e5f0f2 100644
--- a/fs/cifs/README
+++ b/fs/cifs/README
@@ -457,6 +457,9 @@ A partial list of the supported mount options follows:
otherwise - read from the server. All written data are stored
in the cache, but if the client doesn't have Exclusive Oplock,
it writes the data to the server.
+ rwpidforward Forward pid of a process who opened a file to any read or write
+ operation on that file. This prevent applications like WINE
+ from failing on read and write if we use mandatory brlock style.
acl Allow setfacl and getfacl to manage posix ACLs if server
supports them. (default)
noacl Do not allow setfacl and getfacl calls on this mount
diff --git a/fs/cifs/cache.c b/fs/cifs/cache.c
index 53d57a3fe427..dd8584d35a14 100644
--- a/fs/cifs/cache.c
+++ b/fs/cifs/cache.c
@@ -146,7 +146,7 @@ static char *extract_sharename(const char *treename)
static uint16_t cifs_super_get_key(const void *cookie_netfs_data, void *buffer,
uint16_t maxbuf)
{
- const struct cifsTconInfo *tcon = cookie_netfs_data;
+ const struct cifs_tcon *tcon = cookie_netfs_data;
char *sharename;
uint16_t len;
@@ -173,7 +173,7 @@ cifs_fscache_super_get_aux(const void *cookie_netfs_data, void *buffer,
uint16_t maxbuf)
{
struct cifs_fscache_super_auxdata auxdata;
- const struct cifsTconInfo *tcon = cookie_netfs_data;
+ const struct cifs_tcon *tcon = cookie_netfs_data;
memset(&auxdata, 0, sizeof(auxdata));
auxdata.resource_id = tcon->resource_id;
@@ -192,7 +192,7 @@ fscache_checkaux cifs_fscache_super_check_aux(void *cookie_netfs_data,
uint16_t datalen)
{
struct cifs_fscache_super_auxdata auxdata;
- const struct cifsTconInfo *tcon = cookie_netfs_data;
+ const struct cifs_tcon *tcon = cookie_netfs_data;
if (datalen != sizeof(auxdata))
return FSCACHE_CHECKAUX_OBSOLETE;
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index 18f4272d9047..2fe3cf13b2e9 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -110,8 +110,8 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
struct list_head *tmp1, *tmp2, *tmp3;
struct mid_q_entry *mid_entry;
struct TCP_Server_Info *server;
- struct cifsSesInfo *ses;
- struct cifsTconInfo *tcon;
+ struct cifs_ses *ses;
+ struct cifs_tcon *tcon;
int i, j;
__u32 dev_type;
@@ -152,7 +152,7 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
tcp_ses_list);
i++;
list_for_each(tmp2, &server->smb_ses_list) {
- ses = list_entry(tmp2, struct cifsSesInfo,
+ ses = list_entry(tmp2, struct cifs_ses,
smb_ses_list);
if ((ses->serverDomain == NULL) ||
(ses->serverOS == NULL) ||
@@ -171,7 +171,7 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
seq_printf(m, "TCP status: %d\n\tLocal Users To "
"Server: %d SecMode: 0x%x Req On Wire: %d",
server->tcpStatus, server->srv_count,
- server->secMode,
+ server->sec_mode,
atomic_read(&server->inFlight));
#ifdef CONFIG_CIFS_STATS2
@@ -183,7 +183,7 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
seq_puts(m, "\n\tShares:");
j = 0;
list_for_each(tmp3, &ses->tcon_list) {
- tcon = list_entry(tmp3, struct cifsTconInfo,
+ tcon = list_entry(tmp3, struct cifs_tcon,
tcon_list);
++j;
dev_type = le32_to_cpu(tcon->fsDevInfo.DeviceType);
@@ -256,8 +256,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
int rc;
struct list_head *tmp1, *tmp2, *tmp3;
struct TCP_Server_Info *server;
- struct cifsSesInfo *ses;
- struct cifsTconInfo *tcon;
+ struct cifs_ses *ses;
+ struct cifs_tcon *tcon;
rc = get_user(c, buffer);
if (rc)
@@ -273,11 +273,11 @@ static ssize_t cifs_stats_proc_write(struct file *file,
server = list_entry(tmp1, struct TCP_Server_Info,
tcp_ses_list);
list_for_each(tmp2, &server->smb_ses_list) {
- ses = list_entry(tmp2, struct cifsSesInfo,
+ ses = list_entry(tmp2, struct cifs_ses,
smb_ses_list);
list_for_each(tmp3, &ses->tcon_list) {
tcon = list_entry(tmp3,
- struct cifsTconInfo,
+ struct cifs_tcon,
tcon_list);
atomic_set(&tcon->num_smbs_sent, 0);
atomic_set(&tcon->num_writes, 0);
@@ -312,8 +312,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
int i;
struct list_head *tmp1, *tmp2, *tmp3;
struct TCP_Server_Info *server;
- struct cifsSesInfo *ses;
- struct cifsTconInfo *tcon;
+ struct cifs_ses *ses;
+ struct cifs_tcon *tcon;
seq_printf(m,
"Resources in use\nCIFS Session: %d\n",
@@ -346,11 +346,11 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
server = list_entry(tmp1, struct TCP_Server_Info,
tcp_ses_list);
list_for_each(tmp2, &server->smb_ses_list) {
- ses = list_entry(tmp2, struct cifsSesInfo,
+ ses = list_entry(tmp2, struct cifs_ses,
smb_ses_list);
list_for_each(tmp3, &ses->tcon_list) {
tcon = list_entry(tmp3,
- struct cifsTconInfo,
+ struct cifs_tcon,
tcon_list);
i++;
seq_printf(m, "\n%d) %s", i, tcon->treeName);
diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c
index 2b68ac57d97d..8d8f28c94c0f 100644
--- a/fs/cifs/cifs_dfs_ref.c
+++ b/fs/cifs/cifs_dfs_ref.c
@@ -272,7 +272,7 @@ static struct vfsmount *cifs_dfs_do_automount(struct dentry *mntpt)
struct dfs_info3_param *referrals = NULL;
unsigned int num_referrals = 0;
struct cifs_sb_info *cifs_sb;
- struct cifsSesInfo *ses;
+ struct cifs_ses *ses;
char *full_path;
int xid, i;
int rc;
diff --git a/fs/cifs/cifs_fs_sb.h b/fs/cifs/cifs_fs_sb.h
index a9d5692e0c20..ffb1459dc6ec 100644
--- a/fs/cifs/cifs_fs_sb.h
+++ b/fs/cifs/cifs_fs_sb.h
@@ -41,6 +41,7 @@
#define CIFS_MOUNT_MF_SYMLINKS 0x10000 /* Minshall+French Symlinks enabled */
#define CIFS_MOUNT_MULTIUSER 0x20000 /* multiuser mount */
#define CIFS_MOUNT_STRICT_IO 0x40000 /* strict cache mode */
+#define CIFS_MOUNT_RWPIDFORWARD 0x80000 /* use pid forwarding for rw */
struct cifs_sb_info {
struct rb_root tlink_tree;
@@ -56,8 +57,6 @@ struct cifs_sb_info {
mode_t mnt_file_mode;
mode_t mnt_dir_mode;
unsigned int mnt_cifs_flags;
- int prepathlen;
- char *prepath; /* relative path under the share to mount to */
char *mountdata; /* options received at mount time or via DFS refs */
struct backing_dev_info bdi;
struct delayed_work prune_tlinks;
diff --git a/fs/cifs/cifs_spnego.c b/fs/cifs/cifs_spnego.c
index 33d221394aca..2272fd5fe5b7 100644
--- a/fs/cifs/cifs_spnego.c
+++ b/fs/cifs/cifs_spnego.c
@@ -95,7 +95,7 @@ struct key_type cifs_spnego_key_type = {
/* get a key struct with a SPNEGO security blob, suitable for session setup */
struct key *
-cifs_get_spnego_key(struct cifsSesInfo *sesInfo)
+cifs_get_spnego_key(struct cifs_ses *sesInfo)
{
struct TCP_Server_Info *server = sesInfo->server;
struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr;
diff --git a/fs/cifs/cifs_spnego.h b/fs/cifs/cifs_spnego.h
index e4041ec4d712..31bef9ee078b 100644
--- a/fs/cifs/cifs_spnego.h
+++ b/fs/cifs/cifs_spnego.h
@@ -41,7 +41,7 @@ struct cifs_spnego_msg {
#ifdef __KERNEL__
extern struct key_type cifs_spnego_key_type;
-extern struct key *cifs_get_spnego_key(struct cifsSesInfo *sesInfo);
+extern struct key *cifs_get_spnego_key(struct cifs_ses *sesInfo);
#endif /* KERNEL */
#endif /* _CIFS_SPNEGO_H */
diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
index f3c6fb9942ac..8f1700623b41 100644
--- a/fs/cifs/cifsacl.c
+++ b/fs/cifs/cifsacl.c
@@ -38,7 +38,7 @@ static const struct cifs_sid sid_everyone = {
1, 1, {0, 0, 0, 0, 0, 1}, {0} };
/* security id for Authenticated Users system group */
static const struct cifs_sid sid_authusers = {
- 1, 1, {0, 0, 0, 0, 0, 5}, {11} };
+ 1, 1, {0, 0, 0, 0, 0, 5}, {__constant_cpu_to_le32(11)} };
/* group users */
static const struct cifs_sid sid_user = {1, 2 , {0, 0, 0, 0, 0, 5}, {} };
@@ -458,7 +458,8 @@ int compare_sids(const struct cifs_sid *ctsid, const struct cifs_sid *cwsid)
if (num_subauth) {
for (i = 0; i < num_subauth; ++i) {
if (ctsid->sub_auth[i] != cwsid->sub_auth[i]) {
- if (ctsid->sub_auth[i] > cwsid->sub_auth[i])
+ if (le32_to_cpu(ctsid->sub_auth[i]) >
+ le32_to_cpu(cwsid->sub_auth[i]))
return 1;
else
return -1;
@@ -945,7 +946,7 @@ static struct cifs_ntsd *get_cifs_acl_by_path(struct cifs_sb_info *cifs_sb,
int oplock = 0;
int xid, rc;
__u16 fid;
- struct cifsTconInfo *tcon;
+ struct cifs_tcon *tcon;
struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
if (IS_ERR(tlink))
@@ -1013,7 +1014,7 @@ static int set_cifs_acl_by_path(struct cifs_sb_info *cifs_sb, const char *path,
int oplock = 0;
int xid, rc;
__u16 fid;
- struct cifsTconInfo *tcon;
+ struct cifs_tcon *tcon;
struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
if (IS_ERR(tlink))
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index 45c3f78c8f81..dfbd9f1f373d 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -229,7 +229,7 @@ int cifs_verify_signature(struct smb_hdr *cifs_pdu,
}
/* first calculate 24 bytes ntlm response and then 16 byte session key */
-int setup_ntlm_response(struct cifsSesInfo *ses)
+int setup_ntlm_response(struct cifs_ses *ses)
{
int rc = 0;
unsigned int temp_len = CIFS_SESS_KEY_SIZE + CIFS_AUTH_RESP_SIZE;
@@ -312,7 +312,7 @@ int calc_lanman_hash(const char *password, const char *cryptkey, bool encrypt,
* Allocate domain name which gets freed when session struct is deallocated.
*/
static int
-build_avpair_blob(struct cifsSesInfo *ses, const struct nls_table *nls_cp)
+build_avpair_blob(struct cifs_ses *ses, const struct nls_table *nls_cp)
{
unsigned int dlen;
unsigned int wlen;
@@ -400,7 +400,7 @@ build_avpair_blob(struct cifsSesInfo *ses, const struct nls_table *nls_cp)
* about target string i.e. for some, just user name might suffice.
*/
static int
-find_domain_name(struct cifsSesInfo *ses, const struct nls_table *nls_cp)
+find_domain_name(struct cifs_ses *ses, const struct nls_table *nls_cp)
{
unsigned int attrsize;
unsigned int type;
@@ -445,7 +445,7 @@ find_domain_name(struct cifsSesInfo *ses, const struct nls_table *nls_cp)
return 0;
}
-static int calc_ntlmv2_hash(struct cifsSesInfo *ses, char *ntlmv2_hash,
+static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash,
const struct nls_table *nls_cp)
{
int rc = 0;
@@ -527,7 +527,7 @@ calc_exit_2:
}
static int
-CalcNTLMv2_response(const struct cifsSesInfo *ses, char *ntlmv2_hash)
+CalcNTLMv2_response(const struct cifs_ses *ses, char *ntlmv2_hash)
{
int rc;
unsigned int offset = CIFS_SESS_KEY_SIZE + 8;
@@ -563,7 +563,7 @@ CalcNTLMv2_response(const struct cifsSesInfo *ses, char *ntlmv2_hash)
int
-setup_ntlmv2_rsp(struct cifsSesInfo *ses, const struct nls_table *nls_cp)
+setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
{
int rc;
int baselen;
@@ -649,7 +649,7 @@ setup_ntlmv2_rsp_ret:
}
int
-calc_seckey(struct cifsSesInfo *ses)
+calc_seckey(struct cifs_ses *ses)
{
int rc;
struct crypto_blkcipher *tfm_arc4;
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 493b74ca5648..989442dcfb45 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -104,46 +104,25 @@ cifs_sb_deactive(struct super_block *sb)
}
static int
-cifs_read_super(struct super_block *sb, void *data,
+cifs_read_super(struct super_block *sb, struct smb_vol *volume_info,
const char *devname, int silent)
{
struct inode *inode;
struct cifs_sb_info *cifs_sb;
int rc = 0;
- /* BB should we make this contingent on mount parm? */
- sb->s_flags |= MS_NODIRATIME | MS_NOATIME;
- sb->s_fs_info = kzalloc(sizeof(struct cifs_sb_info), GFP_KERNEL);
cifs_sb = CIFS_SB(sb);
- if (cifs_sb == NULL)
- return -ENOMEM;
spin_lock_init(&cifs_sb->tlink_tree_lock);
cifs_sb->tlink_tree = RB_ROOT;
rc = bdi_setup_and_register(&cifs_sb->bdi, "cifs", BDI_CAP_MAP_COPY);
- if (rc) {
- kfree(cifs_sb);
+ if (rc)
return rc;
- }
- cifs_sb->bdi.ra_pages = default_backing_dev_info.ra_pages;
- /*
- * Copy mount params to sb for use in submounts. Better to do
- * the copy here and deal with the error before cleanup gets
- * complicated post-mount.
- */
- if (data) {
- cifs_sb->mountdata = kstrndup(data, PAGE_SIZE, GFP_KERNEL);
- if (cifs_sb->mountdata == NULL) {
- bdi_destroy(&cifs_sb->bdi);
- kfree(sb->s_fs_info);
- sb->s_fs_info = NULL;
- return -ENOMEM;
- }
- }
+ cifs_sb->bdi.ra_pages = default_backing_dev_info.ra_pages;
- rc = cifs_mount(sb, cifs_sb, devname);
+ rc = cifs_mount(sb, cifs_sb, volume_info, devname);
if (rc) {
if (!silent)
@@ -194,15 +173,7 @@ out_no_root:
cifs_umount(sb, cifs_sb);
out_mount_failed:
- if (cifs_sb) {
- if (cifs_sb->mountdata) {
- kfree(cifs_sb->mountdata);
- cifs_sb->mountdata = NULL;
- }
- unload_nls(cifs_sb->local_nls);
- bdi_destroy(&cifs_sb->bdi);
- kfree(cifs_sb);
- }
+ bdi_destroy(&cifs_sb->bdi);
return rc;
}
@@ -237,7 +208,7 @@ cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct super_block *sb = dentry->d_sb;
struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
- struct cifsTconInfo *tcon = cifs_sb_master_tcon(cifs_sb);
+ struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
int rc = -EOPNOTSUPP;
int xid;
@@ -390,7 +361,7 @@ static int
cifs_show_options(struct seq_file *s, struct vfsmount *m)
{
struct cifs_sb_info *cifs_sb = CIFS_SB(m->mnt_sb);
- struct cifsTconInfo *tcon = cifs_sb_master_tcon(cifs_sb);
+ struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
struct sockaddr *srcaddr;
srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr;
@@ -444,14 +415,20 @@ cifs_show_options(struct seq_file *s, struct vfsmount *m)
seq_printf(s, ",nocase");
if (tcon->retry)
seq_printf(s, ",hard");
- if (cifs_sb->prepath)
- seq_printf(s, ",prepath=%s", cifs_sb->prepath);
+ if (tcon->unix_ext)
+ seq_printf(s, ",unix");
+ else
+ seq_printf(s, ",nounix");
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
seq_printf(s, ",posixpaths");
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)
seq_printf(s, ",setuids");
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
seq_printf(s, ",serverino");
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
+ seq_printf(s, ",rwpidforward");
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL)
+ seq_printf(s, ",forcemand");
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
seq_printf(s, ",directio");
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
@@ -484,7 +461,7 @@ cifs_show_options(struct seq_file *s, struct vfsmount *m)
static void cifs_umount_begin(struct super_block *sb)
{
struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
- struct cifsTconInfo *tcon;
+ struct cifs_tcon *tcon;
if (cifs_sb == NULL)
return;
@@ -559,29 +536,189 @@ static const struct super_operations cifs_super_ops = {
#endif
};
+/*
+ * Get root dentry from superblock according to prefix path mount option.
+ * Return dentry with refcount + 1 on success and NULL otherwise.
+ */
+static struct dentry *
+cifs_get_root(struct smb_vol *vol, struct super_block *sb)
+{
+ int xid, rc;
+ struct inode *inode;
+ struct qstr name;
+ struct dentry *dparent = NULL, *dchild = NULL, *alias;
+ struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
+ unsigned int i, full_len, len;
+ char *full_path = NULL, *pstart;
+ char sep;
+
+ full_path = cifs_build_path_to_root(vol, cifs_sb,
+ cifs_sb_master_tcon(cifs_sb));
+ if (full_path == NULL)
+ return NULL;
+
+ cFYI(1, "Get root dentry for %s", full_path);
+
+ xid = GetXid();
+ sep = CIFS_DIR_SEP(cifs_sb);
+ dparent = dget(sb->s_root);
+ full_len = strlen(full_path);
+ full_path[full_len] = sep;
+ pstart = full_path + 1;
+
+ for (i = 1, len = 0; i <= full_len; i++) {
+ if (full_path[i] != sep || !len) {
+ len++;
+ continue;
+ }
+
+ full_path[i] = 0;
+ cFYI(1, "get dentry for %s", pstart);
+
+ name.name = pstart;
+ name.len = len;
+ name.hash = full_name_hash(pstart, len);
+ dchild = d_lookup(dparent, &name);
+ if (dchild == NULL) {
+ cFYI(1, "not exists");
+ dchild = d_alloc(dparent, &name);
+ if (dchild == NULL) {
+ dput(dparent);
+ dparent = NULL;
+ goto out;
+ }
+ }
+
+ cFYI(1, "get inode");
+ if (dchild->d_inode == NULL) {
+ cFYI(1, "not exists");
+ inode = NULL;
+ if (cifs_sb_master_tcon(CIFS_SB(sb))->unix_ext)
+ rc = cifs_get_inode_info_unix(&inode, full_path,
+ sb, xid);
+ else
+ rc = cifs_get_inode_info(&inode, full_path,
+ NULL, sb, xid, NULL);
+ if (rc) {
+ dput(dchild);
+ dput(dparent);
+ dparent = NULL;
+ goto out;
+ }
+ alias = d_materialise_unique(dchild, inode);
+ if (alias != NULL) {
+ dput(dchild);
+ if (IS_ERR(alias)) {
+ dput(dparent);
+ dparent = NULL;
+ goto out;
+ }
+ dchild = alias;
+ }
+ }
+ cFYI(1, "parent %p, child %p", dparent, dchild);
+
+ dput(dparent);
+ dparent = dchild;
+ len = 0;
+ pstart = full_path + i + 1;
+ full_path[i] = sep;
+ }
+out:
+ _FreeXid(xid);
+ kfree(full_path);
+ return dparent;
+}
+
static struct dentry *
cifs_do_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+ int flags, const char *dev_name, void *data)
{
int rc;
struct super_block *sb;
-
- sb = sget(fs_type, NULL, set_anon_super, NULL);
+ struct cifs_sb_info *cifs_sb;
+ struct smb_vol *volume_info;
+ struct cifs_mnt_data mnt_data;
+ struct dentry *root;
cFYI(1, "Devname: %s flags: %d ", dev_name, flags);
- if (IS_ERR(sb))
- return ERR_CAST(sb);
+ rc = cifs_setup_volume_info(&volume_info, (char *)data, dev_name);
+ if (rc)
+ return ERR_PTR(rc);
+
+ cifs_sb = kzalloc(sizeof(struct cifs_sb_info), GFP_KERNEL);
+ if (cifs_sb == NULL) {
+ root = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+
+ cifs_setup_cifs_sb(volume_info, cifs_sb);
+
+ mnt_data.vol = volume_info;
+ mnt_data.cifs_sb = cifs_sb;
+ mnt_data.flags = flags;
+
+ sb = sget(fs_type, cifs_match_super, set_anon_super, &mnt_data);
+ if (IS_ERR(sb)) {
+ root = ERR_CAST(sb);
+ goto out_cifs_sb;
+ }
+
+ if (sb->s_fs_info) {
+ cFYI(1, "Use existing superblock");
+ goto out_shared;
+ }
+
+ /*
+ * Copy mount params for use in submounts. Better to do
+ * the copy here and deal with the error before cleanup gets
+ * complicated post-mount.
+ */
+ cifs_sb->mountdata = kstrndup(data, PAGE_SIZE, GFP_KERNEL);
+ if (cifs_sb->mountdata == NULL) {
+ root = ERR_PTR(-ENOMEM);
+ goto out_super;
+ }
sb->s_flags = flags;
+ /* BB should we make this contingent on mount parm? */
+ sb->s_flags |= MS_NODIRATIME | MS_NOATIME;
+ sb->s_fs_info = cifs_sb;
- rc = cifs_read_super(sb, data, dev_name, flags & MS_SILENT ? 1 : 0);
+ rc = cifs_read_super(sb, volume_info, dev_name,
+ flags & MS_SILENT ? 1 : 0);
if (rc) {
- deactivate_locked_super(sb);
- return ERR_PTR(rc);
+ root = ERR_PTR(rc);
+ goto out_super;
}
+
sb->s_flags |= MS_ACTIVE;
- return dget(sb->s_root);
+
+ root = cifs_get_root(volume_info, sb);
+ if (root == NULL)
+ goto out_super;
+
+ cFYI(1, "dentry root is: %p", root);
+ goto out;
+
+out_shared:
+ root = cifs_get_root(volume_info, sb);
+ if (root)
+ cFYI(1, "dentry root is: %p", root);
+ goto out;
+
+out_super:
+ kfree(cifs_sb->mountdata);
+ deactivate_locked_super(sb);
+
+out_cifs_sb:
+ unload_nls(cifs_sb->local_nls);
+ kfree(cifs_sb);
+
+out:
+ cifs_cleanup_volume_info(&volume_info);
+ return root;
}
static ssize_t cifs_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 76b4517e74b0..6255fa812c7a 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -155,6 +155,81 @@ struct cifs_cred {
*****************************************************************
*/
+struct smb_vol {
+ char *username;
+ char *password;
+ char *domainname;
+ char *UNC;
+ char *UNCip;
+ char *iocharset; /* local code page for mapping to and from Unicode */
+ char source_rfc1001_name[RFC1001_NAME_LEN_WITH_NULL]; /* clnt nb name */
+ char target_rfc1001_name[RFC1001_NAME_LEN_WITH_NULL]; /* srvr nb name */
+ uid_t cred_uid;
+ uid_t linux_uid;
+ gid_t linux_gid;
+ mode_t file_mode;
+ mode_t dir_mode;
+ unsigned secFlg;
+ bool retry:1;
+ bool intr:1;
+ bool setuids:1;
+ bool override_uid:1;
+ bool override_gid:1;
+ bool dynperm:1;
+ bool noperm:1;
+ bool no_psx_acl:1; /* set if posix acl support should be disabled */
+ bool cifs_acl:1;
+ bool no_xattr:1; /* set if xattr (EA) support should be disabled*/
+ bool server_ino:1; /* use inode numbers from server ie UniqueId */
+ bool direct_io:1;
+ bool strict_io:1; /* strict cache behavior */
+ bool remap:1; /* set to remap seven reserved chars in filenames */
+ bool posix_paths:1; /* unset to not ask for posix pathnames. */
+ bool no_linux_ext:1;
+ bool sfu_emul:1;
+ bool nullauth:1; /* attempt to authenticate with null user */
+ bool nocase:1; /* request case insensitive filenames */
+ bool nobrl:1; /* disable sending byte range locks to srv */
+ bool mand_lock:1; /* send mandatory not posix byte range lock reqs */
+ bool seal:1; /* request transport encryption on share */
+ bool nodfs:1; /* Do not request DFS, even if available */
+ bool local_lease:1; /* check leases only on local system, not remote */
+ bool noblocksnd:1;
+ bool noautotune:1;
+ bool nostrictsync:1; /* do not force expensive SMBflush on every sync */
+ bool fsc:1; /* enable fscache */
+ bool mfsymlinks:1; /* use Minshall+French Symlinks */
+ bool multiuser:1;
+ bool rwpidforward:1; /* pid forward for read/write operations */
+ unsigned int rsize;
+ unsigned int wsize;
+ bool sockopt_tcp_nodelay:1;
+ unsigned short int port;
+ unsigned long actimeo; /* attribute cache timeout (jiffies) */
+ char *prepath;
+ struct sockaddr_storage srcaddr; /* allow binding to a local IP */
+ struct nls_table *local_nls;
+};
+
+#define CIFS_MOUNT_MASK (CIFS_MOUNT_NO_PERM | CIFS_MOUNT_SET_UID | \
+ CIFS_MOUNT_SERVER_INUM | CIFS_MOUNT_DIRECT_IO | \
+ CIFS_MOUNT_NO_XATTR | CIFS_MOUNT_MAP_SPECIAL_CHR | \
+ CIFS_MOUNT_UNX_EMUL | CIFS_MOUNT_NO_BRL | \
+ CIFS_MOUNT_CIFS_ACL | CIFS_MOUNT_OVERR_UID | \
+ CIFS_MOUNT_OVERR_GID | CIFS_MOUNT_DYNPERM | \
+ CIFS_MOUNT_NOPOSIXBRL | CIFS_MOUNT_NOSSYNC | \
+ CIFS_MOUNT_FSCACHE | CIFS_MOUNT_MF_SYMLINKS | \
+ CIFS_MOUNT_MULTIUSER | CIFS_MOUNT_STRICT_IO)
+
+#define CIFS_MS_MASK (MS_RDONLY | MS_MANDLOCK | MS_NOEXEC | MS_NOSUID | \
+ MS_NODEV | MS_SYNCHRONOUS)
+
+struct cifs_mnt_data {
+ struct cifs_sb_info *cifs_sb;
+ struct smb_vol *vol;
+ int flags;
+};
+
struct TCP_Server_Info {
struct list_head tcp_ses_list;
struct list_head smb_ses_list;
@@ -179,7 +254,7 @@ struct TCP_Server_Info {
struct mutex srv_mutex;
struct task_struct *tsk;
char server_GUID[16];
- char secMode;
+ char sec_mode;
bool session_estab; /* mark when very first sess is established */
u16 dialect; /* dialect index that server chose */
enum securityEnum secType;
@@ -254,7 +329,7 @@ static inline void cifs_set_net_ns(struct TCP_Server_Info *srv, struct net *net)
/*
* Session structure. One of these for each uid session with a particular host
*/
-struct cifsSesInfo {
+struct cifs_ses {
struct list_head smb_ses_list;
struct list_head tcon_list;
struct mutex session_mutex;
@@ -294,11 +369,11 @@ struct cifsSesInfo {
* there is one of these for each connection to a resource on a particular
* session
*/
-struct cifsTconInfo {
+struct cifs_tcon {
struct list_head tcon_list;
int tc_count;
struct list_head openFileList;
- struct cifsSesInfo *ses; /* pointer to session associated with */
+ struct cifs_ses *ses; /* pointer to session associated with */
char treeName[MAX_TREE_SIZE + 1]; /* UNC name of resource in ASCII */
char *nativeFileSystem;
char *password; /* for share-level security */
@@ -380,12 +455,12 @@ struct tcon_link {
#define TCON_LINK_IN_TREE 2
unsigned long tl_time;
atomic_t tl_count;
- struct cifsTconInfo *tl_tcon;
+ struct cifs_tcon *tl_tcon;
};
extern struct tcon_link *cifs_sb_tlink(struct cifs_sb_info *cifs_sb);
-static inline struct cifsTconInfo *
+static inline struct cifs_tcon *
tlink_tcon(struct tcon_link *tlink)
{
return tlink->tl_tcon;
@@ -402,7 +477,7 @@ cifs_get_tlink(struct tcon_link *tlink)
}
/* This function is always expected to succeed */
-extern struct cifsTconInfo *cifs_sb_master_tcon(struct cifs_sb_info *cifs_sb);
+extern struct cifs_tcon *cifs_sb_master_tcon(struct cifs_sb_info *cifs_sb);
/*
* This info hangs off the cifsFileInfo structure, pointed to by llist.
@@ -455,6 +530,14 @@ struct cifsFileInfo {
struct work_struct oplock_break; /* work for oplock breaks */
};
+struct cifs_io_parms {
+ __u16 netfid;
+ __u32 pid;
+ __u64 offset;
+ unsigned int length;
+ struct cifs_tcon *tcon;
+};
+
/*
* Take a reference on the file private data. Must be called with
* cifs_file_list_lock held.
@@ -509,10 +592,30 @@ static inline char CIFS_DIR_SEP(const struct cifs_sb_info *cifs_sb)
return '\\';
}
+static inline void
+convert_delimiter(char *path, char delim)
+{
+ int i;
+ char old_delim;
+
+ if (path == NULL)
+ return;
+
+ if (delim == '/')
+ old_delim = '\\';
+ else
+ old_delim = '/';
+
+ for (i = 0; path[i] != '\0'; i++) {
+ if (path[i] == old_delim)
+ path[i] = delim;
+ }
+}
+
#ifdef CONFIG_CIFS_STATS
#define cifs_stats_inc atomic_inc
-static inline void cifs_stats_bytes_written(struct cifsTconInfo *tcon,
+static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
unsigned int bytes)
{
if (bytes) {
@@ -522,7 +625,7 @@ static inline void cifs_stats_bytes_written(struct cifsTconInfo *tcon,
}
}
-static inline void cifs_stats_bytes_read(struct cifsTconInfo *tcon,
+static inline void cifs_stats_bytes_read(struct cifs_tcon *tcon,
unsigned int bytes)
{
spin_lock(&tcon->stat_lock);
@@ -543,9 +646,8 @@ struct mid_q_entry;
* This is the prototype for the mid callback function. When creating one,
* take special care to avoid deadlocks. Things to bear in mind:
*
- * - it will be called by cifsd
- * - the GlobalMid_Lock will be held
- * - the mid will be removed from the pending_mid_q list
+ * - it will be called by cifsd, with no locks held
+ * - the mid will be removed from any lists
*/
typedef void (mid_callback_t)(struct mid_q_entry *mid);
@@ -573,7 +675,7 @@ struct mid_q_entry {
struct oplock_q_entry {
struct list_head qhead;
struct inode *pinode;
- struct cifsTconInfo *tcon;
+ struct cifs_tcon *tcon;
__u16 netfid;
};
@@ -656,6 +758,7 @@ static inline void free_dfs_info_array(struct dfs_info3_param *param,
#define MID_RESPONSE_RECEIVED 4
#define MID_RETRY_NEEDED 8 /* session closed while this request out */
#define MID_RESPONSE_MALFORMED 0x10
+#define MID_SHUTDOWN 0x20
/* Types of response buffer returned from SendReceive2 */
#define CIFS_NO_BUFFER 0 /* Response buffer not returned */
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 6e69e06a30b3..953f84413c77 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -57,8 +57,9 @@ extern int init_cifs_idmap(void);
extern void exit_cifs_idmap(void);
extern void cifs_destroy_idmaptrees(void);
extern char *build_path_from_dentry(struct dentry *);
-extern char *cifs_build_path_to_root(struct cifs_sb_info *cifs_sb,
- struct cifsTconInfo *tcon);
+extern char *cifs_build_path_to_root(struct smb_vol *vol,
+ struct cifs_sb_info *cifs_sb,
+ struct cifs_tcon *tcon);
extern char *build_wildcard_path_from_dentry(struct dentry *direntry);
extern char *cifs_compose_mount_options(const char *sb_mountdata,
const char *fullpath, const struct dfs_info3_param *ref,
@@ -67,20 +68,22 @@ extern char *cifs_compose_mount_options(const char *sb_mountdata,
extern struct mid_q_entry *AllocMidQEntry(const struct smb_hdr *smb_buffer,
struct TCP_Server_Info *server);
extern void DeleteMidQEntry(struct mid_q_entry *midEntry);
-extern int cifs_call_async(struct TCP_Server_Info *server,
- struct smb_hdr *in_buf, mid_callback_t *callback,
- void *cbdata);
-extern int SendReceive(const unsigned int /* xid */ , struct cifsSesInfo *,
+extern int cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov,
+ unsigned int nvec, mid_callback_t *callback,
+ void *cbdata, bool ignore_pend);
+extern int SendReceive(const unsigned int /* xid */ , struct cifs_ses *,
struct smb_hdr * /* input */ ,
struct smb_hdr * /* out */ ,
int * /* bytes returned */ , const int long_op);
-extern int SendReceiveNoRsp(const unsigned int xid, struct cifsSesInfo *ses,
+extern int SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
struct smb_hdr *in_buf, int flags);
-extern int SendReceive2(const unsigned int /* xid */ , struct cifsSesInfo *,
+extern int cifs_check_receive(struct mid_q_entry *mid,
+ struct TCP_Server_Info *server, bool log_error);
+extern int SendReceive2(const unsigned int /* xid */ , struct cifs_ses *,
struct kvec *, int /* nvec to send */,
int * /* type of buf returned */ , const int flags);
extern int SendReceiveBlockingLock(const unsigned int xid,
- struct cifsTconInfo *ptcon,
+ struct cifs_tcon *ptcon,
struct smb_hdr *in_buf ,
struct smb_hdr *out_buf,
int *bytes_returned);
@@ -99,14 +102,14 @@ extern int cifs_convert_address(struct sockaddr *dst, const char *src, int len);
extern int cifs_set_port(struct sockaddr *addr, const unsigned short int port);
extern int cifs_fill_sockaddr(struct sockaddr *dst, const char *src, int len,
const unsigned short int port);
-extern int map_smb_to_linux_error(struct smb_hdr *smb, int logErr);
+extern int map_smb_to_linux_error(struct smb_hdr *smb, bool logErr);
extern void header_assemble(struct smb_hdr *, char /* command */ ,
- const struct cifsTconInfo *, int /* length of
+ const struct cifs_tcon *, int /* length of
fixed section (word count) in two byte units */);
extern int small_smb_init_no_tc(const int smb_cmd, const int wct,
- struct cifsSesInfo *ses,
+ struct cifs_ses *ses,
void **request_buf);
-extern int CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses,
+extern int CIFS_SessSetup(unsigned int xid, struct cifs_ses *ses,
const struct nls_table *nls_cp);
extern __u16 GetNextMid(struct TCP_Server_Info *server);
extern struct timespec cifs_NTtimeToUnix(__le64 utc_nanoseconds_since_1601);
@@ -148,102 +151,108 @@ extern struct cifs_ntsd *get_cifs_acl(struct cifs_sb_info *, struct inode *,
extern int set_cifs_acl(struct cifs_ntsd *, __u32, struct inode *,
const char *);
+extern void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
+ struct cifs_sb_info *cifs_sb);
+extern int cifs_match_super(struct super_block *, void *);
+extern void cifs_cleanup_volume_info(struct smb_vol **pvolume_info);
+extern int cifs_setup_volume_info(struct smb_vol **pvolume_info,
+ char *mount_data, const char *devname);
extern int cifs_mount(struct super_block *, struct cifs_sb_info *,
- const char *);
+ struct smb_vol *, const char *);
extern int cifs_umount(struct super_block *, struct cifs_sb_info *);
extern void cifs_dfs_release_automount_timer(void);
void cifs_proc_init(void);
void cifs_proc_clean(void);
extern int cifs_negotiate_protocol(unsigned int xid,
- struct cifsSesInfo *ses);
-extern int cifs_setup_session(unsigned int xid, struct cifsSesInfo *ses,
+ struct cifs_ses *ses);
+extern int cifs_setup_session(unsigned int xid, struct cifs_ses *ses,
struct nls_table *nls_info);
-extern int CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses);
+extern int CIFSSMBNegotiate(unsigned int xid, struct cifs_ses *ses);
-extern int CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
- const char *tree, struct cifsTconInfo *tcon,
+extern int CIFSTCon(unsigned int xid, struct cifs_ses *ses,
+ const char *tree, struct cifs_tcon *tcon,
const struct nls_table *);
-extern int CIFSFindFirst(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSFindFirst(const int xid, struct cifs_tcon *tcon,
const char *searchName, const struct nls_table *nls_codepage,
__u16 *searchHandle, struct cifs_search_info *psrch_inf,
int map, const char dirsep);
-extern int CIFSFindNext(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSFindNext(const int xid, struct cifs_tcon *tcon,
__u16 searchHandle, struct cifs_search_info *psrch_inf);
-extern int CIFSFindClose(const int, struct cifsTconInfo *tcon,
+extern int CIFSFindClose(const int, struct cifs_tcon *tcon,
const __u16 search_handle);
-extern int CIFSSMBQFileInfo(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSSMBQFileInfo(const int xid, struct cifs_tcon *tcon,
u16 netfid, FILE_ALL_INFO *pFindData);
-extern int CIFSSMBQPathInfo(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSSMBQPathInfo(const int xid, struct cifs_tcon *tcon,
const unsigned char *searchName,
FILE_ALL_INFO *findData,
int legacy /* whether to use old info level */,
const struct nls_table *nls_codepage, int remap);
-extern int SMBQueryInformation(const int xid, struct cifsTconInfo *tcon,
+extern int SMBQueryInformation(const int xid, struct cifs_tcon *tcon,
const unsigned char *searchName,
FILE_ALL_INFO *findData,
const struct nls_table *nls_codepage, int remap);
-extern int CIFSSMBUnixQFileInfo(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSSMBUnixQFileInfo(const int xid, struct cifs_tcon *tcon,
u16 netfid, FILE_UNIX_BASIC_INFO *pFindData);
extern int CIFSSMBUnixQPathInfo(const int xid,
- struct cifsTconInfo *tcon,
+ struct cifs_tcon *tcon,
const unsigned char *searchName,
FILE_UNIX_BASIC_INFO *pFindData,
const struct nls_table *nls_codepage, int remap);
-extern int CIFSGetDFSRefer(const int xid, struct cifsSesInfo *ses,
+extern int CIFSGetDFSRefer(const int xid, struct cifs_ses *ses,
const unsigned char *searchName,
struct dfs_info3_param **target_nodes,
unsigned int *number_of_nodes_in_array,
const struct nls_table *nls_codepage, int remap);
-extern int get_dfs_path(int xid, struct cifsSesInfo *pSesInfo,
+extern int get_dfs_path(int xid, struct cifs_ses *pSesInfo,
const char *old_path,
const struct nls_table *nls_codepage,
unsigned int *pnum_referrals,
struct dfs_info3_param **preferrals,
int remap);
-extern void reset_cifs_unix_caps(int xid, struct cifsTconInfo *tcon,
+extern void reset_cifs_unix_caps(int xid, struct cifs_tcon *tcon,
struct super_block *sb, struct smb_vol *vol);
-extern int CIFSSMBQFSInfo(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSSMBQFSInfo(const int xid, struct cifs_tcon *tcon,
struct kstatfs *FSData);
-extern int SMBOldQFSInfo(const int xid, struct cifsTconInfo *tcon,
+extern int SMBOldQFSInfo(const int xid, struct cifs_tcon *tcon,
struct kstatfs *FSData);
-extern int CIFSSMBSetFSUnixInfo(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSSMBSetFSUnixInfo(const int xid, struct cifs_tcon *tcon,
__u64 cap);
extern int CIFSSMBQFSAttributeInfo(const int xid,
- struct cifsTconInfo *tcon);
-extern int CIFSSMBQFSDeviceInfo(const int xid, struct cifsTconInfo *tcon);
-extern int CIFSSMBQFSUnixInfo(const int xid, struct cifsTconInfo *tcon);
-extern int CIFSSMBQFSPosixInfo(const int xid, struct cifsTconInfo *tcon,
+ struct cifs_tcon *tcon);
+extern int CIFSSMBQFSDeviceInfo(const int xid, struct cifs_tcon *tcon);
+extern int CIFSSMBQFSUnixInfo(const int xid, struct cifs_tcon *tcon);
+extern int CIFSSMBQFSPosixInfo(const int xid, struct cifs_tcon *tcon,
struct kstatfs *FSData);
-extern int CIFSSMBSetPathInfo(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSSMBSetPathInfo(const int xid, struct cifs_tcon *tcon,
const char *fileName, const FILE_BASIC_INFO *data,
const struct nls_table *nls_codepage,
int remap_special_chars);
-extern int CIFSSMBSetFileInfo(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSSMBSetFileInfo(const int xid, struct cifs_tcon *tcon,
const FILE_BASIC_INFO *data, __u16 fid,
__u32 pid_of_opener);
-extern int CIFSSMBSetFileDisposition(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSSMBSetFileDisposition(const int xid, struct cifs_tcon *tcon,
bool delete_file, __u16 fid, __u32 pid_of_opener);
#if 0
-extern int CIFSSMBSetAttrLegacy(int xid, struct cifsTconInfo *tcon,
+extern int CIFSSMBSetAttrLegacy(int xid, struct cifs_tcon *tcon,
char *fileName, __u16 dos_attributes,
const struct nls_table *nls_codepage);
#endif /* possibly unneeded function */
-extern int CIFSSMBSetEOF(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSSMBSetEOF(const int xid, struct cifs_tcon *tcon,
const char *fileName, __u64 size,
bool setAllocationSizeFlag,
const struct nls_table *nls_codepage,
int remap_special_chars);
-extern int CIFSSMBSetFileSize(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSSMBSetFileSize(const int xid, struct cifs_tcon *tcon,
__u64 size, __u16 fileHandle, __u32 opener_pid,
bool AllocSizeFlag);
@@ -257,120 +266,116 @@ struct cifs_unix_set_info_args {
dev_t device;
};
-extern int CIFSSMBUnixSetFileInfo(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSSMBUnixSetFileInfo(const int xid, struct cifs_tcon *tcon,
const struct cifs_unix_set_info_args *args,
u16 fid, u32 pid_of_opener);
-extern int CIFSSMBUnixSetPathInfo(const int xid, struct cifsTconInfo *pTcon,
+extern int CIFSSMBUnixSetPathInfo(const int xid, struct cifs_tcon *pTcon,
char *fileName,
const struct cifs_unix_set_info_args *args,
const struct nls_table *nls_codepage,
int remap_special_chars);
-extern int CIFSSMBMkDir(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSSMBMkDir(const int xid, struct cifs_tcon *tcon,
const char *newName,
const struct nls_table *nls_codepage,
int remap_special_chars);
-extern int CIFSSMBRmDir(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSSMBRmDir(const int xid, struct cifs_tcon *tcon,
const char *name, const struct nls_table *nls_codepage,
int remap_special_chars);
-extern int CIFSPOSIXDelFile(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSPOSIXDelFile(const int xid, struct cifs_tcon *tcon,
const char *name, __u16 type,
const struct nls_table *nls_codepage,
int remap_special_chars);
-extern int CIFSSMBDelFile(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSSMBDelFile(const int xid, struct cifs_tcon *tcon,
const char *name,
const struct nls_table *nls_codepage,
int remap_special_chars);
-extern int CIFSSMBRename(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSSMBRename(const int xid, struct cifs_tcon *tcon,
const char *fromName, const char *toName,
const struct nls_table *nls_codepage,
int remap_special_chars);
-extern int CIFSSMBRenameOpenFile(const int xid, struct cifsTconInfo *pTcon,
+extern int CIFSSMBRenameOpenFile(const int xid, struct cifs_tcon *pTcon,
int netfid, const char *target_name,
const struct nls_table *nls_codepage,
int remap_special_chars);
extern int CIFSCreateHardLink(const int xid,
- struct cifsTconInfo *tcon,
+ struct cifs_tcon *tcon,
const char *fromName, const char *toName,
const struct nls_table *nls_codepage,
int remap_special_chars);
extern int CIFSUnixCreateHardLink(const int xid,
- struct cifsTconInfo *tcon,
+ struct cifs_tcon *tcon,
const char *fromName, const char *toName,
const struct nls_table *nls_codepage,
int remap_special_chars);
extern int CIFSUnixCreateSymLink(const int xid,
- struct cifsTconInfo *tcon,
+ struct cifs_tcon *tcon,
const char *fromName, const char *toName,
const struct nls_table *nls_codepage);
extern int CIFSSMBUnixQuerySymLink(const int xid,
- struct cifsTconInfo *tcon,
+ struct cifs_tcon *tcon,
const unsigned char *searchName, char **syminfo,
const struct nls_table *nls_codepage);
#ifdef CONFIG_CIFS_SYMLINK_EXPERIMENTAL
extern int CIFSSMBQueryReparseLinkInfo(const int xid,
- struct cifsTconInfo *tcon,
+ struct cifs_tcon *tcon,
const unsigned char *searchName,
char *symlinkinfo, const int buflen, __u16 fid,
const struct nls_table *nls_codepage);
#endif /* temporarily unused until cifs_symlink fixed */
-extern int CIFSSMBOpen(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSSMBOpen(const int xid, struct cifs_tcon *tcon,
const char *fileName, const int disposition,
const int access_flags, const int omode,
__u16 *netfid, int *pOplock, FILE_ALL_INFO *,
const struct nls_table *nls_codepage, int remap);
-extern int SMBLegacyOpen(const int xid, struct cifsTconInfo *tcon,
+extern int SMBLegacyOpen(const int xid, struct cifs_tcon *tcon,
const char *fileName, const int disposition,
const int access_flags, const int omode,
__u16 *netfid, int *pOplock, FILE_ALL_INFO *,
const struct nls_table *nls_codepage, int remap);
-extern int CIFSPOSIXCreate(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSPOSIXCreate(const int xid, struct cifs_tcon *tcon,
u32 posix_flags, __u64 mode, __u16 *netfid,
FILE_UNIX_BASIC_INFO *pRetData,
__u32 *pOplock, const char *name,
const struct nls_table *nls_codepage, int remap);
-extern int CIFSSMBClose(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSSMBClose(const int xid, struct cifs_tcon *tcon,
const int smb_file_id);
-extern int CIFSSMBFlush(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSSMBFlush(const int xid, struct cifs_tcon *tcon,
const int smb_file_id);
-extern int CIFSSMBRead(const int xid, struct cifsTconInfo *tcon,
- const int netfid, unsigned int count,
- const __u64 lseek, unsigned int *nbytes, char **buf,
+extern int CIFSSMBRead(const int xid, struct cifs_io_parms *io_parms,
+ unsigned int *nbytes, char **buf,
int *return_buf_type);
-extern int CIFSSMBWrite(const int xid, struct cifsTconInfo *tcon,
- const int netfid, const unsigned int count,
- const __u64 lseek, unsigned int *nbytes,
- const char *buf, const char __user *ubuf,
+extern int CIFSSMBWrite(const int xid, struct cifs_io_parms *io_parms,
+ unsigned int *nbytes, const char *buf,
+ const char __user *ubuf, const int long_op);
+extern int CIFSSMBWrite2(const int xid, struct cifs_io_parms *io_parms,
+ unsigned int *nbytes, struct kvec *iov, const int nvec,
const int long_op);
-extern int CIFSSMBWrite2(const int xid, struct cifsTconInfo *tcon,
- const int netfid, const unsigned int count,
- const __u64 offset, unsigned int *nbytes,
- struct kvec *iov, const int nvec, const int long_op);
-extern int CIFSGetSrvInodeNumber(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSGetSrvInodeNumber(const int xid, struct cifs_tcon *tcon,
const unsigned char *searchName, __u64 *inode_number,
const struct nls_table *nls_codepage,
int remap_special_chars);
-extern int CIFSSMBLock(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSSMBLock(const int xid, struct cifs_tcon *tcon,
const __u16 netfid, const __u64 len,
const __u64 offset, const __u32 numUnlock,
const __u32 numLock, const __u8 lockType,
const bool waitFlag, const __u8 oplock_level);
-extern int CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSSMBPosixLock(const int xid, struct cifs_tcon *tcon,
const __u16 smb_file_id, const int get_flag,
const __u64 len, struct file_lock *,
const __u16 lock_type, const bool waitFlag);
-extern int CIFSSMBTDis(const int xid, struct cifsTconInfo *tcon);
+extern int CIFSSMBTDis(const int xid, struct cifs_tcon *tcon);
extern int CIFSSMBEcho(struct TCP_Server_Info *server);
-extern int CIFSSMBLogoff(const int xid, struct cifsSesInfo *ses);
+extern int CIFSSMBLogoff(const int xid, struct cifs_ses *ses);
-extern struct cifsSesInfo *sesInfoAlloc(void);
-extern void sesInfoFree(struct cifsSesInfo *);
-extern struct cifsTconInfo *tconInfoAlloc(void);
-extern void tconInfoFree(struct cifsTconInfo *);
+extern struct cifs_ses *sesInfoAlloc(void);
+extern void sesInfoFree(struct cifs_ses *);
+extern struct cifs_tcon *tconInfoAlloc(void);
+extern void tconInfoFree(struct cifs_tcon *);
extern int cifs_sign_smb(struct smb_hdr *, struct TCP_Server_Info *, __u32 *);
extern int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *,
@@ -379,51 +384,51 @@ extern int cifs_verify_signature(struct smb_hdr *,
struct TCP_Server_Info *server,
__u32 expected_sequence_number);
extern int SMBNTencrypt(unsigned char *, unsigned char *, unsigned char *);
-extern int setup_ntlm_response(struct cifsSesInfo *);
-extern int setup_ntlmv2_rsp(struct cifsSesInfo *, const struct nls_table *);
+extern int setup_ntlm_response(struct cifs_ses *);
+extern int setup_ntlmv2_rsp(struct cifs_ses *, const struct nls_table *);
extern int cifs_crypto_shash_allocate(struct TCP_Server_Info *);
extern void cifs_crypto_shash_release(struct TCP_Server_Info *);
-extern int calc_seckey(struct cifsSesInfo *);
+extern int calc_seckey(struct cifs_ses *);
#ifdef CONFIG_CIFS_WEAK_PW_HASH
extern int calc_lanman_hash(const char *password, const char *cryptkey,
bool encrypt, char *lnm_session_key);
#endif /* CIFS_WEAK_PW_HASH */
#ifdef CONFIG_CIFS_DNOTIFY_EXPERIMENTAL /* unused temporarily */
-extern int CIFSSMBNotify(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSSMBNotify(const int xid, struct cifs_tcon *tcon,
const int notify_subdirs, const __u16 netfid,
__u32 filter, struct file *file, int multishot,
const struct nls_table *nls_codepage);
#endif /* was needed for dnotify, and will be needed for inotify when VFS fix */
extern int CIFSSMBCopy(int xid,
- struct cifsTconInfo *source_tcon,
+ struct cifs_tcon *source_tcon,
const char *fromName,
const __u16 target_tid,
const char *toName, const int flags,
const struct nls_table *nls_codepage,
int remap_special_chars);
-extern ssize_t CIFSSMBQAllEAs(const int xid, struct cifsTconInfo *tcon,
+extern ssize_t CIFSSMBQAllEAs(const int xid, struct cifs_tcon *tcon,
const unsigned char *searchName,
const unsigned char *ea_name, char *EAData,
size_t bufsize, const struct nls_table *nls_codepage,
int remap_special_chars);
-extern int CIFSSMBSetEA(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSSMBSetEA(const int xid, struct cifs_tcon *tcon,
const char *fileName, const char *ea_name,
const void *ea_value, const __u16 ea_value_len,
const struct nls_table *nls_codepage, int remap_special_chars);
-extern int CIFSSMBGetCIFSACL(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSSMBGetCIFSACL(const int xid, struct cifs_tcon *tcon,
__u16 fid, struct cifs_ntsd **acl_inf, __u32 *buflen);
-extern int CIFSSMBSetCIFSACL(const int, struct cifsTconInfo *, __u16,
+extern int CIFSSMBSetCIFSACL(const int, struct cifs_tcon *, __u16,
struct cifs_ntsd *, __u32);
-extern int CIFSSMBGetPosixACL(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSSMBGetPosixACL(const int xid, struct cifs_tcon *tcon,
const unsigned char *searchName,
char *acl_inf, const int buflen, const int acl_type,
const struct nls_table *nls_codepage, int remap_special_chars);
-extern int CIFSSMBSetPosixACL(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSSMBSetPosixACL(const int xid, struct cifs_tcon *tcon,
const unsigned char *fileName,
const char *local_acl, const int buflen, const int acl_type,
const struct nls_table *nls_codepage, int remap_special_chars);
-extern int CIFSGetExtAttr(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSGetExtAttr(const int xid, struct cifs_tcon *tcon,
const int netfid, __u64 *pExtAttrBits, __u64 *pMask);
extern void cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb);
extern bool CIFSCouldBeMFSymlink(const struct cifs_fattr *fattr);
@@ -434,4 +439,22 @@ extern int mdfour(unsigned char *, unsigned char *, int);
extern int E_md4hash(const unsigned char *passwd, unsigned char *p16);
extern int SMBencrypt(unsigned char *passwd, const unsigned char *c8,
unsigned char *p24);
+
+/* asynchronous write support */
+struct cifs_writedata {
+ struct kref refcount;
+ enum writeback_sync_modes sync_mode;
+ struct work_struct work;
+ struct cifsFileInfo *cfile;
+ __u64 offset;
+ unsigned int bytes;
+ int result;
+ unsigned int nr_pages;
+ struct page *pages[1];
+};
+
+int cifs_async_writev(struct cifs_writedata *wdata);
+struct cifs_writedata *cifs_writedata_alloc(unsigned int nr_pages);
+void cifs_writedata_release(struct kref *refcount);
+
#endif /* _CIFSPROTO_H */
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 83df937b814e..1a9fe7f816d1 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -32,6 +32,7 @@
#include <linux/vfs.h>
#include <linux/slab.h>
#include <linux/posix_acl_xattr.h>
+#include <linux/pagemap.h>
#include <asm/uaccess.h>
#include "cifspdu.h"
#include "cifsglob.h"
@@ -84,7 +85,7 @@ static struct {
/* Mark as invalid, all open files on tree connections since they
were closed when session to server was lost */
-static void mark_open_files_invalid(struct cifsTconInfo *pTcon)
+static void mark_open_files_invalid(struct cifs_tcon *pTcon)
{
struct cifsFileInfo *open_file = NULL;
struct list_head *tmp;
@@ -104,10 +105,10 @@ static void mark_open_files_invalid(struct cifsTconInfo *pTcon)
/* reconnect the socket, tcon, and smb session if needed */
static int
-cifs_reconnect_tcon(struct cifsTconInfo *tcon, int smb_command)
+cifs_reconnect_tcon(struct cifs_tcon *tcon, int smb_command)
{
int rc = 0;
- struct cifsSesInfo *ses;
+ struct cifs_ses *ses;
struct TCP_Server_Info *server;
struct nls_table *nls_codepage;
@@ -226,7 +227,7 @@ out:
SMB information in the SMB header. If the return code is zero, this
function must have filled in request_buf pointer */
static int
-small_smb_init(int smb_command, int wct, struct cifsTconInfo *tcon,
+small_smb_init(int smb_command, int wct, struct cifs_tcon *tcon,
void **request_buf)
{
int rc;
@@ -252,7 +253,7 @@ small_smb_init(int smb_command, int wct, struct cifsTconInfo *tcon,
int
small_smb_init_no_tc(const int smb_command, const int wct,
- struct cifsSesInfo *ses, void **request_buf)
+ struct cifs_ses *ses, void **request_buf)
{
int rc;
struct smb_hdr *buffer;
@@ -278,7 +279,7 @@ small_smb_init_no_tc(const int smb_command, const int wct,
/* If the return code is zero, this function must fill in request_buf pointer */
static int
-__smb_init(int smb_command, int wct, struct cifsTconInfo *tcon,
+__smb_init(int smb_command, int wct, struct cifs_tcon *tcon,
void **request_buf, void **response_buf)
{
*request_buf = cifs_buf_get();
@@ -304,7 +305,7 @@ __smb_init(int smb_command, int wct, struct cifsTconInfo *tcon,
/* If the return code is zero, this function must fill in request_buf pointer */
static int
-smb_init(int smb_command, int wct, struct cifsTconInfo *tcon,
+smb_init(int smb_command, int wct, struct cifs_tcon *tcon,
void **request_buf, void **response_buf)
{
int rc;
@@ -317,7 +318,7 @@ smb_init(int smb_command, int wct, struct cifsTconInfo *tcon,
}
static int
-smb_init_no_reconnect(int smb_command, int wct, struct cifsTconInfo *tcon,
+smb_init_no_reconnect(int smb_command, int wct, struct cifs_tcon *tcon,
void **request_buf, void **response_buf)
{
if (tcon->ses->need_reconnect || tcon->need_reconnect)
@@ -366,7 +367,7 @@ static inline void inc_rfc1001_len(void *pSMB, int count)
}
int
-CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
+CIFSSMBNegotiate(unsigned int xid, struct cifs_ses *ses)
{
NEGOTIATE_REQ *pSMB;
NEGOTIATE_RSP *pSMBr;
@@ -450,7 +451,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
rc = -EOPNOTSUPP;
goto neg_err_exit;
}
- server->secMode = (__u8)le16_to_cpu(rsp->SecurityMode);
+ server->sec_mode = (__u8)le16_to_cpu(rsp->SecurityMode);
server->maxReq = le16_to_cpu(rsp->MaxMpxCount);
server->maxBuf = min((__u32)le16_to_cpu(rsp->MaxBufSize),
(__u32)CIFSMaxBufSize + MAX_CIFS_HDR_SIZE);
@@ -504,7 +505,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
cpu_to_le16(CIFS_CRYPTO_KEY_SIZE)) {
memcpy(ses->server->cryptkey, rsp->EncryptionKey,
CIFS_CRYPTO_KEY_SIZE);
- } else if (server->secMode & SECMODE_PW_ENCRYPT) {
+ } else if (server->sec_mode & SECMODE_PW_ENCRYPT) {
rc = -EIO; /* need cryptkey unless plain text */
goto neg_err_exit;
}
@@ -526,11 +527,11 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
goto neg_err_exit;
}
/* else wct == 17 NTLM */
- server->secMode = pSMBr->SecurityMode;
- if ((server->secMode & SECMODE_USER) == 0)
+ server->sec_mode = pSMBr->SecurityMode;
+ if ((server->sec_mode & SECMODE_USER) == 0)
cFYI(1, "share mode security");
- if ((server->secMode & SECMODE_PW_ENCRYPT) == 0)
+ if ((server->sec_mode & SECMODE_PW_ENCRYPT) == 0)
#ifdef CONFIG_CIFS_WEAK_PW_HASH
if ((secFlags & CIFSSEC_MAY_PLNTXT) == 0)
#endif /* CIFS_WEAK_PW_HASH */
@@ -570,18 +571,10 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
if (pSMBr->EncryptionKeyLength == CIFS_CRYPTO_KEY_SIZE) {
memcpy(ses->server->cryptkey, pSMBr->u.EncryptionKey,
CIFS_CRYPTO_KEY_SIZE);
- } else if ((pSMBr->hdr.Flags2 & SMBFLG2_EXT_SEC)
- && (pSMBr->EncryptionKeyLength == 0)) {
+ } else if ((pSMBr->hdr.Flags2 & SMBFLG2_EXT_SEC ||
+ server->capabilities & CAP_EXTENDED_SECURITY) &&
+ (pSMBr->EncryptionKeyLength == 0)) {
/* decode security blob */
- } else if (server->secMode & SECMODE_PW_ENCRYPT) {
- rc = -EIO; /* no crypt key only if plain text pwd */
- goto neg_err_exit;
- }
-
- /* BB might be helpful to save off the domain of server here */
-
- if ((pSMBr->hdr.Flags2 & SMBFLG2_EXT_SEC) &&
- (server->capabilities & CAP_EXTENDED_SECURITY)) {
count = get_bcc(&pSMBr->hdr);
if (count < 16) {
rc = -EIO;
@@ -624,6 +617,9 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
} else
rc = -EOPNOTSUPP;
}
+ } else if (server->sec_mode & SECMODE_PW_ENCRYPT) {
+ rc = -EIO; /* no crypt key only if plain text pwd */
+ goto neg_err_exit;
} else
server->capabilities &= ~CAP_EXTENDED_SECURITY;
@@ -634,27 +630,27 @@ signing_check:
/* MUST_SIGN already includes the MAY_SIGN FLAG
so if this is zero it means that signing is disabled */
cFYI(1, "Signing disabled");
- if (server->secMode & SECMODE_SIGN_REQUIRED) {
+ if (server->sec_mode & SECMODE_SIGN_REQUIRED) {
cERROR(1, "Server requires "
"packet signing to be enabled in "
"/proc/fs/cifs/SecurityFlags.");
rc = -EOPNOTSUPP;
}
- server->secMode &=
+ server->sec_mode &=
~(SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED);
} else if ((secFlags & CIFSSEC_MUST_SIGN) == CIFSSEC_MUST_SIGN) {
/* signing required */
cFYI(1, "Must sign - secFlags 0x%x", secFlags);
- if ((server->secMode &
+ if ((server->sec_mode &
(SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED)) == 0) {
cERROR(1, "signing required but server lacks support");
rc = -EOPNOTSUPP;
} else
- server->secMode |= SECMODE_SIGN_REQUIRED;
+ server->sec_mode |= SECMODE_SIGN_REQUIRED;
} else {
/* signing optional ie CIFSSEC_MAY_SIGN */
- if ((server->secMode & SECMODE_SIGN_REQUIRED) == 0)
- server->secMode &=
+ if ((server->sec_mode & SECMODE_SIGN_REQUIRED) == 0)
+ server->sec_mode &=
~(SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED);
}
@@ -666,7 +662,7 @@ neg_err_exit:
}
int
-CIFSSMBTDis(const int xid, struct cifsTconInfo *tcon)
+CIFSSMBTDis(const int xid, struct cifs_tcon *tcon)
{
struct smb_hdr *smb_buffer;
int rc = 0;
@@ -725,6 +721,7 @@ CIFSSMBEcho(struct TCP_Server_Info *server)
{
ECHO_REQ *smb;
int rc = 0;
+ struct kvec iov;
cFYI(1, "In echo request");
@@ -739,9 +736,10 @@ CIFSSMBEcho(struct TCP_Server_Info *server)
put_bcc(1, &smb->hdr);
smb->Data[0] = 'a';
inc_rfc1001_len(smb, 3);
+ iov.iov_base = smb;
+ iov.iov_len = be32_to_cpu(smb->hdr.smb_buf_length) + 4;
- rc = cifs_call_async(server, (struct smb_hdr *)smb,
- cifs_echo_callback, server);
+ rc = cifs_call_async(server, &iov, 1, cifs_echo_callback, server, true);
if (rc)
cFYI(1, "Echo request failed: %d", rc);
@@ -751,7 +749,7 @@ CIFSSMBEcho(struct TCP_Server_Info *server)
}
int
-CIFSSMBLogoff(const int xid, struct cifsSesInfo *ses)
+CIFSSMBLogoff(const int xid, struct cifs_ses *ses)
{
LOGOFF_ANDX_REQ *pSMB;
int rc = 0;
@@ -778,7 +776,7 @@ CIFSSMBLogoff(const int xid, struct cifsSesInfo *ses)
pSMB->hdr.Mid = GetNextMid(ses->server);
- if (ses->server->secMode &
+ if (ses->server->sec_mode &
(SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
pSMB->hdr.Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
@@ -798,7 +796,7 @@ session_already_dead:
}
int
-CIFSPOSIXDelFile(const int xid, struct cifsTconInfo *tcon, const char *fileName,
+CIFSPOSIXDelFile(const int xid, struct cifs_tcon *tcon, const char *fileName,
__u16 type, const struct nls_table *nls_codepage, int remap)
{
TRANSACTION2_SPI_REQ *pSMB = NULL;
@@ -873,7 +871,7 @@ PsxDelete:
}
int
-CIFSSMBDelFile(const int xid, struct cifsTconInfo *tcon, const char *fileName,
+CIFSSMBDelFile(const int xid, struct cifs_tcon *tcon, const char *fileName,
const struct nls_table *nls_codepage, int remap)
{
DELETE_FILE_REQ *pSMB = NULL;
@@ -918,7 +916,7 @@ DelFileRetry:
}
int
-CIFSSMBRmDir(const int xid, struct cifsTconInfo *tcon, const char *dirName,
+CIFSSMBRmDir(const int xid, struct cifs_tcon *tcon, const char *dirName,
const struct nls_table *nls_codepage, int remap)
{
DELETE_DIRECTORY_REQ *pSMB = NULL;
@@ -961,7 +959,7 @@ RmDirRetry:
}
int
-CIFSSMBMkDir(const int xid, struct cifsTconInfo *tcon,
+CIFSSMBMkDir(const int xid, struct cifs_tcon *tcon,
const char *name, const struct nls_table *nls_codepage, int remap)
{
int rc = 0;
@@ -1004,7 +1002,7 @@ MkDirRetry:
}
int
-CIFSPOSIXCreate(const int xid, struct cifsTconInfo *tcon, __u32 posix_flags,
+CIFSPOSIXCreate(const int xid, struct cifs_tcon *tcon, __u32 posix_flags,
__u64 mode, __u16 *netfid, FILE_UNIX_BASIC_INFO *pRetData,
__u32 *pOplock, const char *name,
const struct nls_table *nls_codepage, int remap)
@@ -1170,7 +1168,7 @@ access_flags_to_smbopen_mode(const int access_flags)
}
int
-SMBLegacyOpen(const int xid, struct cifsTconInfo *tcon,
+SMBLegacyOpen(const int xid, struct cifs_tcon *tcon,
const char *fileName, const int openDisposition,
const int access_flags, const int create_options, __u16 *netfid,
int *pOplock, FILE_ALL_INFO *pfile_info,
@@ -1277,7 +1275,7 @@ OldOpenRetry:
}
int
-CIFSSMBOpen(const int xid, struct cifsTconInfo *tcon,
+CIFSSMBOpen(const int xid, struct cifs_tcon *tcon,
const char *fileName, const int openDisposition,
const int access_flags, const int create_options, __u16 *netfid,
int *pOplock, FILE_ALL_INFO *pfile_info,
@@ -1379,8 +1377,7 @@ openRetry:
}
int
-CIFSSMBRead(const int xid, struct cifsTconInfo *tcon, const int netfid,
- const unsigned int count, const __u64 lseek, unsigned int *nbytes,
+CIFSSMBRead(const int xid, struct cifs_io_parms *io_parms, unsigned int *nbytes,
char **buf, int *pbuf_type)
{
int rc = -EACCES;
@@ -1390,13 +1387,18 @@ CIFSSMBRead(const int xid, struct cifsTconInfo *tcon, const int netfid,
int wct;
int resp_buf_type = 0;
struct kvec iov[1];
+ __u32 pid = io_parms->pid;
+ __u16 netfid = io_parms->netfid;
+ __u64 offset = io_parms->offset;
+ struct cifs_tcon *tcon = io_parms->tcon;
+ unsigned int count = io_parms->length;
cFYI(1, "Reading %d bytes on fid %d", count, netfid);
if (tcon->ses->capabilities & CAP_LARGE_FILES)
wct = 12;
else {
wct = 10; /* old style read */
- if ((lseek >> 32) > 0) {
+ if ((offset >> 32) > 0) {
/* can not handle this big offset for old */
return -EIO;
}
@@ -1407,15 +1409,18 @@ CIFSSMBRead(const int xid, struct cifsTconInfo *tcon, const int netfid,
if (rc)
return rc;
+ pSMB->hdr.Pid = cpu_to_le16((__u16)pid);
+ pSMB->hdr.PidHigh = cpu_to_le16((__u16)(pid >> 16));
+
/* tcon and ses pointer are checked in smb_init */
if (tcon->ses->server == NULL)
return -ECONNABORTED;
pSMB->AndXCommand = 0xFF; /* none */
pSMB->Fid = netfid;
- pSMB->OffsetLow = cpu_to_le32(lseek & 0xFFFFFFFF);
+ pSMB->OffsetLow = cpu_to_le32(offset & 0xFFFFFFFF);
if (wct == 12)
- pSMB->OffsetHigh = cpu_to_le32(lseek >> 32);
+ pSMB->OffsetHigh = cpu_to_le32(offset >> 32);
pSMB->Remaining = 0;
pSMB->MaxCount = cpu_to_le16(count & 0xFFFF);
@@ -1484,9 +1489,8 @@ CIFSSMBRead(const int xid, struct cifsTconInfo *tcon, const int netfid,
int
-CIFSSMBWrite(const int xid, struct cifsTconInfo *tcon,
- const int netfid, const unsigned int count,
- const __u64 offset, unsigned int *nbytes, const char *buf,
+CIFSSMBWrite(const int xid, struct cifs_io_parms *io_parms,
+ unsigned int *nbytes, const char *buf,
const char __user *ubuf, const int long_op)
{
int rc = -EACCES;
@@ -1495,6 +1499,11 @@ CIFSSMBWrite(const int xid, struct cifsTconInfo *tcon,
int bytes_returned, wct;
__u32 bytes_sent;
__u16 byte_count;
+ __u32 pid = io_parms->pid;
+ __u16 netfid = io_parms->netfid;
+ __u64 offset = io_parms->offset;
+ struct cifs_tcon *tcon = io_parms->tcon;
+ unsigned int count = io_parms->length;
*nbytes = 0;
@@ -1516,6 +1525,10 @@ CIFSSMBWrite(const int xid, struct cifsTconInfo *tcon,
(void **) &pSMBr);
if (rc)
return rc;
+
+ pSMB->hdr.Pid = cpu_to_le16((__u16)pid);
+ pSMB->hdr.PidHigh = cpu_to_le16((__u16)(pid >> 16));
+
/* tcon and ses pointer are checked in smb_init */
if (tcon->ses->server == NULL)
return -ECONNABORTED;
@@ -1602,17 +1615,259 @@ CIFSSMBWrite(const int xid, struct cifsTconInfo *tcon,
return rc;
}
+void
+cifs_writedata_release(struct kref *refcount)
+{
+ struct cifs_writedata *wdata = container_of(refcount,
+ struct cifs_writedata, refcount);
+
+ if (wdata->cfile)
+ cifsFileInfo_put(wdata->cfile);
+
+ kfree(wdata);
+}
+
+/*
+ * Write failed with a retryable error. Resend the write request. It's also
+ * possible that the page was redirtied so re-clean the page.
+ */
+static void
+cifs_writev_requeue(struct cifs_writedata *wdata)
+{
+ int i, rc;
+ struct inode *inode = wdata->cfile->dentry->d_inode;
+
+ for (i = 0; i < wdata->nr_pages; i++) {
+ lock_page(wdata->pages[i]);
+ clear_page_dirty_for_io(wdata->pages[i]);
+ }
+
+ do {
+ rc = cifs_async_writev(wdata);
+ } while (rc == -EAGAIN);
+
+ for (i = 0; i < wdata->nr_pages; i++) {
+ if (rc != 0)
+ SetPageError(wdata->pages[i]);
+ unlock_page(wdata->pages[i]);
+ }
+
+ mapping_set_error(inode->i_mapping, rc);
+ kref_put(&wdata->refcount, cifs_writedata_release);
+}
+
+static void
+cifs_writev_complete(struct work_struct *work)
+{
+ struct cifs_writedata *wdata = container_of(work,
+ struct cifs_writedata, work);
+ struct inode *inode = wdata->cfile->dentry->d_inode;
+ int i = 0;
+
+ if (wdata->result == 0) {
+ cifs_update_eof(CIFS_I(inode), wdata->offset, wdata->bytes);
+ cifs_stats_bytes_written(tlink_tcon(wdata->cfile->tlink),
+ wdata->bytes);
+ } else if (wdata->sync_mode == WB_SYNC_ALL && wdata->result == -EAGAIN)
+ return cifs_writev_requeue(wdata);
+
+ for (i = 0; i < wdata->nr_pages; i++) {
+ struct page *page = wdata->pages[i];
+ if (wdata->result == -EAGAIN)
+ __set_page_dirty_nobuffers(page);
+ else if (wdata->result < 0)
+ SetPageError(page);
+ end_page_writeback(page);
+ page_cache_release(page);
+ }
+ if (wdata->result != -EAGAIN)
+ mapping_set_error(inode->i_mapping, wdata->result);
+ kref_put(&wdata->refcount, cifs_writedata_release);
+}
+
+struct cifs_writedata *
+cifs_writedata_alloc(unsigned int nr_pages)
+{
+ struct cifs_writedata *wdata;
+
+ /* this would overflow */
+ if (nr_pages == 0) {
+ cERROR(1, "%s: called with nr_pages == 0!", __func__);
+ return NULL;
+ }
+
+ /* writedata + number of page pointers */
+ wdata = kzalloc(sizeof(*wdata) +
+ sizeof(struct page *) * (nr_pages - 1), GFP_NOFS);
+ if (wdata != NULL) {
+ INIT_WORK(&wdata->work, cifs_writev_complete);
+ kref_init(&wdata->refcount);
+ }
+ return wdata;
+}
+
+/*
+ * Check the midState and signature on received buffer (if any), and queue the
+ * workqueue completion task.
+ */
+static void
+cifs_writev_callback(struct mid_q_entry *mid)
+{
+ struct cifs_writedata *wdata = mid->callback_data;
+ struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
+ unsigned int written;
+ WRITE_RSP *smb = (WRITE_RSP *)mid->resp_buf;
+
+ switch (mid->midState) {
+ case MID_RESPONSE_RECEIVED:
+ wdata->result = cifs_check_receive(mid, tcon->ses->server, 0);
+ if (wdata->result != 0)
+ break;
+
+ written = le16_to_cpu(smb->CountHigh);
+ written <<= 16;
+ written += le16_to_cpu(smb->Count);
+ /*
+ * Mask off high 16 bits when bytes written as returned
+ * by the server is greater than bytes requested by the
+ * client. OS/2 servers are known to set incorrect
+ * CountHigh values.
+ */
+ if (written > wdata->bytes)
+ written &= 0xFFFF;
+
+ if (written < wdata->bytes)
+ wdata->result = -ENOSPC;
+ else
+ wdata->bytes = written;
+ break;
+ case MID_REQUEST_SUBMITTED:
+ case MID_RETRY_NEEDED:
+ wdata->result = -EAGAIN;
+ break;
+ default:
+ wdata->result = -EIO;
+ break;
+ }
+
+ queue_work(system_nrt_wq, &wdata->work);
+ DeleteMidQEntry(mid);
+ atomic_dec(&tcon->ses->server->inFlight);
+ wake_up(&tcon->ses->server->request_q);
+}
+
+/* cifs_async_writev - send an async write, and set up mid to handle result */
+int
+cifs_async_writev(struct cifs_writedata *wdata)
+{
+ int i, rc = -EACCES;
+ WRITE_REQ *smb = NULL;
+ int wct;
+ struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
+ struct inode *inode = wdata->cfile->dentry->d_inode;
+ struct kvec *iov = NULL;
+
+ if (tcon->ses->capabilities & CAP_LARGE_FILES) {
+ wct = 14;
+ } else {
+ wct = 12;
+ if (wdata->offset >> 32 > 0) {
+ /* can not handle big offset for old srv */
+ return -EIO;
+ }
+ }
+
+ rc = small_smb_init(SMB_COM_WRITE_ANDX, wct, tcon, (void **)&smb);
+ if (rc)
+ goto async_writev_out;
+
+ /* 1 iov per page + 1 for header */
+ iov = kzalloc((wdata->nr_pages + 1) * sizeof(*iov), GFP_NOFS);
+ if (iov == NULL) {
+ rc = -ENOMEM;
+ goto async_writev_out;
+ }
+
+ smb->hdr.Pid = cpu_to_le16((__u16)wdata->cfile->pid);
+ smb->hdr.PidHigh = cpu_to_le16((__u16)(wdata->cfile->pid >> 16));
+
+ smb->AndXCommand = 0xFF; /* none */
+ smb->Fid = wdata->cfile->netfid;
+ smb->OffsetLow = cpu_to_le32(wdata->offset & 0xFFFFFFFF);
+ if (wct == 14)
+ smb->OffsetHigh = cpu_to_le32(wdata->offset >> 32);
+ smb->Reserved = 0xFFFFFFFF;
+ smb->WriteMode = 0;
+ smb->Remaining = 0;
+
+ smb->DataOffset =
+ cpu_to_le16(offsetof(struct smb_com_write_req, Data) - 4);
+
+ /* 4 for RFC1001 length + 1 for BCC */
+ iov[0].iov_len = be32_to_cpu(smb->hdr.smb_buf_length) + 4 + 1;
+ iov[0].iov_base = smb;
+
+ /* marshal up the pages into iov array */
+ wdata->bytes = 0;
+ for (i = 0; i < wdata->nr_pages; i++) {
+ iov[i + 1].iov_len = min(inode->i_size -
+ page_offset(wdata->pages[i]),
+ (loff_t)PAGE_CACHE_SIZE);
+ iov[i + 1].iov_base = kmap(wdata->pages[i]);
+ wdata->bytes += iov[i + 1].iov_len;
+ }
+
+ cFYI(1, "async write at %llu %u bytes", wdata->offset, wdata->bytes);
+
+ smb->DataLengthLow = cpu_to_le16(wdata->bytes & 0xFFFF);
+ smb->DataLengthHigh = cpu_to_le16(wdata->bytes >> 16);
+
+ if (wct == 14) {
+ inc_rfc1001_len(&smb->hdr, wdata->bytes + 1);
+ put_bcc(wdata->bytes + 1, &smb->hdr);
+ } else {
+ /* wct == 12 */
+ struct smb_com_writex_req *smbw =
+ (struct smb_com_writex_req *)smb;
+ inc_rfc1001_len(&smbw->hdr, wdata->bytes + 5);
+ put_bcc(wdata->bytes + 5, &smbw->hdr);
+ iov[0].iov_len += 4; /* pad bigger by four bytes */
+ }
+
+ kref_get(&wdata->refcount);
+ rc = cifs_call_async(tcon->ses->server, iov, wdata->nr_pages + 1,
+ cifs_writev_callback, wdata, false);
+
+ if (rc == 0)
+ cifs_stats_inc(&tcon->num_writes);
+ else
+ kref_put(&wdata->refcount, cifs_writedata_release);
+
+ /* send is done, unmap pages */
+ for (i = 0; i < wdata->nr_pages; i++)
+ kunmap(wdata->pages[i]);
+
+async_writev_out:
+ cifs_small_buf_release(smb);
+ kfree(iov);
+ return rc;
+}
+
int
-CIFSSMBWrite2(const int xid, struct cifsTconInfo *tcon,
- const int netfid, const unsigned int count,
- const __u64 offset, unsigned int *nbytes, struct kvec *iov,
- int n_vec, const int long_op)
+CIFSSMBWrite2(const int xid, struct cifs_io_parms *io_parms,
+ unsigned int *nbytes, struct kvec *iov, int n_vec,
+ const int long_op)
{
int rc = -EACCES;
WRITE_REQ *pSMB = NULL;
int wct;
int smb_hdr_len;
int resp_buf_type = 0;
+ __u32 pid = io_parms->pid;
+ __u16 netfid = io_parms->netfid;
+ __u64 offset = io_parms->offset;
+ struct cifs_tcon *tcon = io_parms->tcon;
+ unsigned int count = io_parms->length;
*nbytes = 0;
@@ -1630,6 +1885,10 @@ CIFSSMBWrite2(const int xid, struct cifsTconInfo *tcon,
rc = small_smb_init(SMB_COM_WRITE_ANDX, wct, tcon, (void **) &pSMB);
if (rc)
return rc;
+
+ pSMB->hdr.Pid = cpu_to_le16((__u16)pid);
+ pSMB->hdr.PidHigh = cpu_to_le16((__u16)(pid >> 16));
+
/* tcon and ses pointer are checked in smb_init */
if (tcon->ses->server == NULL)
return -ECONNABORTED;
@@ -1705,7 +1964,7 @@ CIFSSMBWrite2(const int xid, struct cifsTconInfo *tcon,
int
-CIFSSMBLock(const int xid, struct cifsTconInfo *tcon,
+CIFSSMBLock(const int xid, struct cifs_tcon *tcon,
const __u16 smb_file_id, const __u64 len,
const __u64 offset, const __u32 numUnlock,
const __u32 numLock, const __u8 lockType,
@@ -1775,7 +2034,7 @@ CIFSSMBLock(const int xid, struct cifsTconInfo *tcon,
}
int
-CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon,
+CIFSSMBPosixLock(const int xid, struct cifs_tcon *tcon,
const __u16 smb_file_id, const int get_flag, const __u64 len,
struct file_lock *pLockData, const __u16 lock_type,
const bool waitFlag)
@@ -1913,7 +2172,7 @@ plk_err_exit:
int
-CIFSSMBClose(const int xid, struct cifsTconInfo *tcon, int smb_file_id)
+CIFSSMBClose(const int xid, struct cifs_tcon *tcon, int smb_file_id)
{
int rc = 0;
CLOSE_REQ *pSMB = NULL;
@@ -1946,7 +2205,7 @@ CIFSSMBClose(const int xid, struct cifsTconInfo *tcon, int smb_file_id)
}
int
-CIFSSMBFlush(const int xid, struct cifsTconInfo *tcon, int smb_file_id)
+CIFSSMBFlush(const int xid, struct cifs_tcon *tcon, int smb_file_id)
{
int rc = 0;
FLUSH_REQ *pSMB = NULL;
@@ -1967,7 +2226,7 @@ CIFSSMBFlush(const int xid, struct cifsTconInfo *tcon, int smb_file_id)
}
int
-CIFSSMBRename(const int xid, struct cifsTconInfo *tcon,
+CIFSSMBRename(const int xid, struct cifs_tcon *tcon,
const char *fromName, const char *toName,
const struct nls_table *nls_codepage, int remap)
{
@@ -2034,7 +2293,7 @@ renameRetry:
return rc;
}
-int CIFSSMBRenameOpenFile(const int xid, struct cifsTconInfo *pTcon,
+int CIFSSMBRenameOpenFile(const int xid, struct cifs_tcon *pTcon,
int netfid, const char *target_name,
const struct nls_table *nls_codepage, int remap)
{
@@ -2114,7 +2373,7 @@ int CIFSSMBRenameOpenFile(const int xid, struct cifsTconInfo *pTcon,
}
int
-CIFSSMBCopy(const int xid, struct cifsTconInfo *tcon, const char *fromName,
+CIFSSMBCopy(const int xid, struct cifs_tcon *tcon, const char *fromName,
const __u16 target_tid, const char *toName, const int flags,
const struct nls_table *nls_codepage, int remap)
{
@@ -2182,7 +2441,7 @@ copyRetry:
}
int
-CIFSUnixCreateSymLink(const int xid, struct cifsTconInfo *tcon,
+CIFSUnixCreateSymLink(const int xid, struct cifs_tcon *tcon,
const char *fromName, const char *toName,
const struct nls_table *nls_codepage)
{
@@ -2271,7 +2530,7 @@ createSymLinkRetry:
}
int
-CIFSUnixCreateHardLink(const int xid, struct cifsTconInfo *tcon,
+CIFSUnixCreateHardLink(const int xid, struct cifs_tcon *tcon,
const char *fromName, const char *toName,
const struct nls_table *nls_codepage, int remap)
{
@@ -2356,7 +2615,7 @@ createHardLinkRetry:
}
int
-CIFSCreateHardLink(const int xid, struct cifsTconInfo *tcon,
+CIFSCreateHardLink(const int xid, struct cifs_tcon *tcon,
const char *fromName, const char *toName,
const struct nls_table *nls_codepage, int remap)
{
@@ -2428,7 +2687,7 @@ winCreateHardLinkRetry:
}
int
-CIFSSMBUnixQuerySymLink(const int xid, struct cifsTconInfo *tcon,
+CIFSSMBUnixQuerySymLink(const int xid, struct cifs_tcon *tcon,
const unsigned char *searchName, char **symlinkinfo,
const struct nls_table *nls_codepage)
{
@@ -2533,7 +2792,7 @@ querySymLinkRetry:
* it is not compiled in by default until callers fixed up and more tested.
*/
int
-CIFSSMBQueryReparseLinkInfo(const int xid, struct cifsTconInfo *tcon,
+CIFSSMBQueryReparseLinkInfo(const int xid, struct cifs_tcon *tcon,
const unsigned char *searchName,
char *symlinkinfo, const int buflen, __u16 fid,
const struct nls_table *nls_codepage)
@@ -2771,7 +3030,7 @@ static __u16 ACL_to_cifs_posix(char *parm_data, const char *pACL,
}
int
-CIFSSMBGetPosixACL(const int xid, struct cifsTconInfo *tcon,
+CIFSSMBGetPosixACL(const int xid, struct cifs_tcon *tcon,
const unsigned char *searchName,
char *acl_inf, const int buflen, const int acl_type,
const struct nls_table *nls_codepage, int remap)
@@ -2859,7 +3118,7 @@ queryAclRetry:
}
int
-CIFSSMBSetPosixACL(const int xid, struct cifsTconInfo *tcon,
+CIFSSMBSetPosixACL(const int xid, struct cifs_tcon *tcon,
const unsigned char *fileName,
const char *local_acl, const int buflen,
const int acl_type,
@@ -2939,7 +3198,7 @@ setACLerrorExit:
/* BB fix tabs in this function FIXME BB */
int
-CIFSGetExtAttr(const int xid, struct cifsTconInfo *tcon,
+CIFSGetExtAttr(const int xid, struct cifs_tcon *tcon,
const int netfid, __u64 *pExtAttrBits, __u64 *pMask)
{
int rc = 0;
@@ -3032,7 +3291,7 @@ GetExtAttrOut:
*/
static int
smb_init_nttransact(const __u16 sub_command, const int setup_count,
- const int parm_len, struct cifsTconInfo *tcon,
+ const int parm_len, struct cifs_tcon *tcon,
void **ret_buf)
{
int rc;
@@ -3115,7 +3374,7 @@ validate_ntransact(char *buf, char **ppparm, char **ppdata,
/* Get Security Descriptor (by handle) from remote server for a file or dir */
int
-CIFSSMBGetCIFSACL(const int xid, struct cifsTconInfo *tcon, __u16 fid,
+CIFSSMBGetCIFSACL(const int xid, struct cifs_tcon *tcon, __u16 fid,
struct cifs_ntsd **acl_inf, __u32 *pbuflen)
{
int rc = 0;
@@ -3207,7 +3466,7 @@ qsec_out:
}
int
-CIFSSMBSetCIFSACL(const int xid, struct cifsTconInfo *tcon, __u16 fid,
+CIFSSMBSetCIFSACL(const int xid, struct cifs_tcon *tcon, __u16 fid,
struct cifs_ntsd *pntsd, __u32 acllen)
{
__u16 byte_count, param_count, data_count, param_offset, data_offset;
@@ -3273,7 +3532,7 @@ setCifsAclRetry:
/* Legacy Query Path Information call for lookup to old servers such
as Win9x/WinME */
-int SMBQueryInformation(const int xid, struct cifsTconInfo *tcon,
+int SMBQueryInformation(const int xid, struct cifs_tcon *tcon,
const unsigned char *searchName,
FILE_ALL_INFO *pFinfo,
const struct nls_table *nls_codepage, int remap)
@@ -3341,7 +3600,7 @@ QInfRetry:
}
int
-CIFSSMBQFileInfo(const int xid, struct cifsTconInfo *tcon,
+CIFSSMBQFileInfo(const int xid, struct cifs_tcon *tcon,
u16 netfid, FILE_ALL_INFO *pFindData)
{
struct smb_t2_qfi_req *pSMB = NULL;
@@ -3408,7 +3667,7 @@ QFileInfoRetry:
}
int
-CIFSSMBQPathInfo(const int xid, struct cifsTconInfo *tcon,
+CIFSSMBQPathInfo(const int xid, struct cifs_tcon *tcon,
const unsigned char *searchName,
FILE_ALL_INFO *pFindData,
int legacy /* old style infolevel */,
@@ -3509,7 +3768,7 @@ QPathInfoRetry:
}
int
-CIFSSMBUnixQFileInfo(const int xid, struct cifsTconInfo *tcon,
+CIFSSMBUnixQFileInfo(const int xid, struct cifs_tcon *tcon,
u16 netfid, FILE_UNIX_BASIC_INFO *pFindData)
{
struct smb_t2_qfi_req *pSMB = NULL;
@@ -3578,7 +3837,7 @@ UnixQFileInfoRetry:
}
int
-CIFSSMBUnixQPathInfo(const int xid, struct cifsTconInfo *tcon,
+CIFSSMBUnixQPathInfo(const int xid, struct cifs_tcon *tcon,
const unsigned char *searchName,
FILE_UNIX_BASIC_INFO *pFindData,
const struct nls_table *nls_codepage, int remap)
@@ -3664,7 +3923,7 @@ UnixQPathInfoRetry:
/* xid, tcon, searchName and codepage are input parms, rest are returned */
int
-CIFSFindFirst(const int xid, struct cifsTconInfo *tcon,
+CIFSFindFirst(const int xid, struct cifs_tcon *tcon,
const char *searchName,
const struct nls_table *nls_codepage,
__u16 *pnetfid,
@@ -3812,7 +4071,7 @@ findFirstRetry:
return rc;
}
-int CIFSFindNext(const int xid, struct cifsTconInfo *tcon,
+int CIFSFindNext(const int xid, struct cifs_tcon *tcon,
__u16 searchHandle, struct cifs_search_info *psrch_inf)
{
TRANSACTION2_FNEXT_REQ *pSMB = NULL;
@@ -3950,7 +4209,7 @@ FNext2_err_exit:
}
int
-CIFSFindClose(const int xid, struct cifsTconInfo *tcon,
+CIFSFindClose(const int xid, struct cifs_tcon *tcon,
const __u16 searchHandle)
{
int rc = 0;
@@ -3982,7 +4241,7 @@ CIFSFindClose(const int xid, struct cifsTconInfo *tcon,
}
int
-CIFSGetSrvInodeNumber(const int xid, struct cifsTconInfo *tcon,
+CIFSGetSrvInodeNumber(const int xid, struct cifs_tcon *tcon,
const unsigned char *searchName,
__u64 *inode_number,
const struct nls_table *nls_codepage, int remap)
@@ -4184,7 +4443,7 @@ parse_DFS_referrals_exit:
}
int
-CIFSGetDFSRefer(const int xid, struct cifsSesInfo *ses,
+CIFSGetDFSRefer(const int xid, struct cifs_ses *ses,
const unsigned char *searchName,
struct dfs_info3_param **target_nodes,
unsigned int *num_of_nodes,
@@ -4233,7 +4492,7 @@ getDFSRetry:
}
if (ses->server) {
- if (ses->server->secMode &
+ if (ses->server->sec_mode &
(SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
pSMB->hdr.Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
}
@@ -4298,7 +4557,7 @@ GetDFSRefExit:
/* Query File System Info such as free space to old servers such as Win 9x */
int
-SMBOldQFSInfo(const int xid, struct cifsTconInfo *tcon, struct kstatfs *FSData)
+SMBOldQFSInfo(const int xid, struct cifs_tcon *tcon, struct kstatfs *FSData)
{
/* level 0x01 SMB_QUERY_FILE_SYSTEM_INFO */
TRANSACTION2_QFSI_REQ *pSMB = NULL;
@@ -4377,7 +4636,7 @@ oldQFSInfoRetry:
}
int
-CIFSSMBQFSInfo(const int xid, struct cifsTconInfo *tcon, struct kstatfs *FSData)
+CIFSSMBQFSInfo(const int xid, struct cifs_tcon *tcon, struct kstatfs *FSData)
{
/* level 0x103 SMB_QUERY_FILE_SYSTEM_INFO */
TRANSACTION2_QFSI_REQ *pSMB = NULL;
@@ -4456,7 +4715,7 @@ QFSInfoRetry:
}
int
-CIFSSMBQFSAttributeInfo(const int xid, struct cifsTconInfo *tcon)
+CIFSSMBQFSAttributeInfo(const int xid, struct cifs_tcon *tcon)
{
/* level 0x105 SMB_QUERY_FILE_SYSTEM_INFO */
TRANSACTION2_QFSI_REQ *pSMB = NULL;
@@ -4526,7 +4785,7 @@ QFSAttributeRetry:
}
int
-CIFSSMBQFSDeviceInfo(const int xid, struct cifsTconInfo *tcon)
+CIFSSMBQFSDeviceInfo(const int xid, struct cifs_tcon *tcon)
{
/* level 0x104 SMB_QUERY_FILE_SYSTEM_INFO */
TRANSACTION2_QFSI_REQ *pSMB = NULL;
@@ -4597,7 +4856,7 @@ QFSDeviceRetry:
}
int
-CIFSSMBQFSUnixInfo(const int xid, struct cifsTconInfo *tcon)
+CIFSSMBQFSUnixInfo(const int xid, struct cifs_tcon *tcon)
{
/* level 0x200 SMB_QUERY_CIFS_UNIX_INFO */
TRANSACTION2_QFSI_REQ *pSMB = NULL;
@@ -4667,7 +4926,7 @@ QFSUnixRetry:
}
int
-CIFSSMBSetFSUnixInfo(const int xid, struct cifsTconInfo *tcon, __u64 cap)
+CIFSSMBSetFSUnixInfo(const int xid, struct cifs_tcon *tcon, __u64 cap)
{
/* level 0x200 SMB_SET_CIFS_UNIX_INFO */
TRANSACTION2_SETFSI_REQ *pSMB = NULL;
@@ -4741,7 +5000,7 @@ SETFSUnixRetry:
int
-CIFSSMBQFSPosixInfo(const int xid, struct cifsTconInfo *tcon,
+CIFSSMBQFSPosixInfo(const int xid, struct cifs_tcon *tcon,
struct kstatfs *FSData)
{
/* level 0x201 SMB_QUERY_CIFS_POSIX_INFO */
@@ -4834,7 +5093,7 @@ QFSPosixRetry:
in Samba which this routine can run into */
int
-CIFSSMBSetEOF(const int xid, struct cifsTconInfo *tcon, const char *fileName,
+CIFSSMBSetEOF(const int xid, struct cifs_tcon *tcon, const char *fileName,
__u64 size, bool SetAllocation,
const struct nls_table *nls_codepage, int remap)
{
@@ -4923,7 +5182,7 @@ SetEOFRetry:
}
int
-CIFSSMBSetFileSize(const int xid, struct cifsTconInfo *tcon, __u64 size,
+CIFSSMBSetFileSize(const int xid, struct cifs_tcon *tcon, __u64 size,
__u16 fid, __u32 pid_of_opener, bool SetAllocation)
{
struct smb_com_transaction2_sfi_req *pSMB = NULL;
@@ -5005,7 +5264,7 @@ CIFSSMBSetFileSize(const int xid, struct cifsTconInfo *tcon, __u64 size,
time and resort to the original setpathinfo level which takes the ancient
DOS time format with 2 second granularity */
int
-CIFSSMBSetFileInfo(const int xid, struct cifsTconInfo *tcon,
+CIFSSMBSetFileInfo(const int xid, struct cifs_tcon *tcon,
const FILE_BASIC_INFO *data, __u16 fid, __u32 pid_of_opener)
{
struct smb_com_transaction2_sfi_req *pSMB = NULL;
@@ -5067,7 +5326,7 @@ CIFSSMBSetFileInfo(const int xid, struct cifsTconInfo *tcon,
}
int
-CIFSSMBSetFileDisposition(const int xid, struct cifsTconInfo *tcon,
+CIFSSMBSetFileDisposition(const int xid, struct cifs_tcon *tcon,
bool delete_file, __u16 fid, __u32 pid_of_opener)
{
struct smb_com_transaction2_sfi_req *pSMB = NULL;
@@ -5123,7 +5382,7 @@ CIFSSMBSetFileDisposition(const int xid, struct cifsTconInfo *tcon,
}
int
-CIFSSMBSetPathInfo(const int xid, struct cifsTconInfo *tcon,
+CIFSSMBSetPathInfo(const int xid, struct cifs_tcon *tcon,
const char *fileName, const FILE_BASIC_INFO *data,
const struct nls_table *nls_codepage, int remap)
{
@@ -5207,7 +5466,7 @@ SetTimesRetry:
handling it anyway and NT4 was what we thought it would be needed for
Do not delete it until we prove whether needed for Win9x though */
int
-CIFSSMBSetAttrLegacy(int xid, struct cifsTconInfo *tcon, char *fileName,
+CIFSSMBSetAttrLegacy(int xid, struct cifs_tcon *tcon, char *fileName,
__u16 dos_attrs, const struct nls_table *nls_codepage)
{
SETATTR_REQ *pSMB = NULL;
@@ -5295,7 +5554,7 @@ cifs_fill_unix_set_info(FILE_UNIX_BASIC_INFO *data_offset,
}
int
-CIFSSMBUnixSetFileInfo(const int xid, struct cifsTconInfo *tcon,
+CIFSSMBUnixSetFileInfo(const int xid, struct cifs_tcon *tcon,
const struct cifs_unix_set_info_args *args,
u16 fid, u32 pid_of_opener)
{
@@ -5358,7 +5617,7 @@ CIFSSMBUnixSetFileInfo(const int xid, struct cifsTconInfo *tcon,
}
int
-CIFSSMBUnixSetPathInfo(const int xid, struct cifsTconInfo *tcon, char *fileName,
+CIFSSMBUnixSetPathInfo(const int xid, struct cifs_tcon *tcon, char *fileName,
const struct cifs_unix_set_info_args *args,
const struct nls_table *nls_codepage, int remap)
{
@@ -5445,7 +5704,7 @@ setPermsRetry:
* the data isn't copied to it, but the length is returned.
*/
ssize_t
-CIFSSMBQAllEAs(const int xid, struct cifsTconInfo *tcon,
+CIFSSMBQAllEAs(const int xid, struct cifs_tcon *tcon,
const unsigned char *searchName, const unsigned char *ea_name,
char *EAData, size_t buf_size,
const struct nls_table *nls_codepage, int remap)
@@ -5626,7 +5885,7 @@ QAllEAsOut:
}
int
-CIFSSMBSetEA(const int xid, struct cifsTconInfo *tcon, const char *fileName,
+CIFSSMBSetEA(const int xid, struct cifs_tcon *tcon, const char *fileName,
const char *ea_name, const void *ea_value,
const __u16 ea_value_len, const struct nls_table *nls_codepage,
int remap)
@@ -5753,7 +6012,7 @@ SetEARetry:
* incompatible for network fs clients, we could instead simply
* expose this config flag by adding a future cifs (and smb2) notify ioctl.
*/
-int CIFSSMBNotify(const int xid, struct cifsTconInfo *tcon,
+int CIFSSMBNotify(const int xid, struct cifs_tcon *tcon,
const int notify_subdirs, const __u16 netfid,
__u32 filter, struct file *pfile, int multishot,
const struct nls_table *nls_codepage)
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index da284e3cb653..6d88b82537c3 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -57,62 +57,6 @@
extern mempool_t *cifs_req_poolp;
-struct smb_vol {
- char *username;
- char *password;
- char *domainname;
- char *UNC;
- char *UNCip;
- char *iocharset; /* local code page for mapping to and from Unicode */
- char source_rfc1001_name[RFC1001_NAME_LEN_WITH_NULL]; /* clnt nb name */
- char target_rfc1001_name[RFC1001_NAME_LEN_WITH_NULL]; /* srvr nb name */
- uid_t cred_uid;
- uid_t linux_uid;
- gid_t linux_gid;
- mode_t file_mode;
- mode_t dir_mode;
- unsigned secFlg;
- bool retry:1;
- bool intr:1;
- bool setuids:1;
- bool override_uid:1;
- bool override_gid:1;
- bool dynperm:1;
- bool noperm:1;
- bool no_psx_acl:1; /* set if posix acl support should be disabled */
- bool cifs_acl:1;
- bool no_xattr:1; /* set if xattr (EA) support should be disabled*/
- bool server_ino:1; /* use inode numbers from server ie UniqueId */
- bool direct_io:1;
- bool strict_io:1; /* strict cache behavior */
- bool remap:1; /* set to remap seven reserved chars in filenames */
- bool posix_paths:1; /* unset to not ask for posix pathnames. */
- bool no_linux_ext:1;
- bool sfu_emul:1;
- bool nullauth:1; /* attempt to authenticate with null user */
- bool nocase:1; /* request case insensitive filenames */
- bool nobrl:1; /* disable sending byte range locks to srv */
- bool mand_lock:1; /* send mandatory not posix byte range lock reqs */
- bool seal:1; /* request transport encryption on share */
- bool nodfs:1; /* Do not request DFS, even if available */
- bool local_lease:1; /* check leases only on local system, not remote */
- bool noblocksnd:1;
- bool noautotune:1;
- bool nostrictsync:1; /* do not force expensive SMBflush on every sync */
- bool fsc:1; /* enable fscache */
- bool mfsymlinks:1; /* use Minshall+French Symlinks */
- bool multiuser:1;
- bool use_smb2:1; /* force smb2 use on mount instead of cifs */
- unsigned int rsize;
- unsigned int wsize;
- bool sockopt_tcp_nodelay:1;
- unsigned short int port;
- unsigned long actimeo; /* attribute cache timeout (jiffies) */
- char *prepath;
- struct sockaddr_storage srcaddr; /* allow binding to a local IP */
- struct nls_table *local_nls;
-};
-
/* FIXME: should these be tunable? */
#define TLINK_ERROR_EXPIRE (1 * HZ)
#define TLINK_IDLE_EXPIRE (600 * HZ)
@@ -135,9 +79,10 @@ cifs_reconnect(struct TCP_Server_Info *server)
{
int rc = 0;
struct list_head *tmp, *tmp2;
- struct cifsSesInfo *ses;
- struct cifsTconInfo *tcon;
+ struct cifs_ses *ses;
+ struct cifs_tcon *tcon;
struct mid_q_entry *mid_entry;
+ struct list_head retry_list;
spin_lock(&GlobalMid_Lock);
if (server->tcpStatus == CifsExiting) {
@@ -157,11 +102,11 @@ cifs_reconnect(struct TCP_Server_Info *server)
cFYI(1, "%s: marking sessions and tcons for reconnect", __func__);
spin_lock(&cifs_tcp_ses_lock);
list_for_each(tmp, &server->smb_ses_list) {
- ses = list_entry(tmp, struct cifsSesInfo, smb_ses_list);
+ ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
ses->need_reconnect = true;
ses->ipc_tid = 0;
list_for_each(tmp2, &ses->tcon_list) {
- tcon = list_entry(tmp2, struct cifsTconInfo, tcon_list);
+ tcon = list_entry(tmp2, struct cifs_tcon, tcon_list);
tcon->need_reconnect = true;
}
}
@@ -189,16 +134,23 @@ cifs_reconnect(struct TCP_Server_Info *server)
mutex_unlock(&server->srv_mutex);
/* mark submitted MIDs for retry and issue callback */
- cFYI(1, "%s: issuing mid callbacks", __func__);
+ INIT_LIST_HEAD(&retry_list);
+ cFYI(1, "%s: moving mids to private list", __func__);
spin_lock(&GlobalMid_Lock);
list_for_each_safe(tmp, tmp2, &server->pending_mid_q) {
mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
if (mid_entry->midState == MID_REQUEST_SUBMITTED)
mid_entry->midState = MID_RETRY_NEEDED;
+ list_move(&mid_entry->qhead, &retry_list);
+ }
+ spin_unlock(&GlobalMid_Lock);
+
+ cFYI(1, "%s: issuing mid callbacks", __func__);
+ list_for_each_safe(tmp, tmp2, &retry_list) {
+ mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
list_del_init(&mid_entry->qhead);
mid_entry->callback(mid_entry);
}
- spin_unlock(&GlobalMid_Lock);
while (server->tcpStatus == CifsNeedReconnect) {
try_to_freeze();
@@ -672,12 +624,12 @@ multi_t2_fnd:
mid_entry->when_received = jiffies;
#endif
list_del_init(&mid_entry->qhead);
- mid_entry->callback(mid_entry);
break;
}
spin_unlock(&GlobalMid_Lock);
if (mid_entry != NULL) {
+ mid_entry->callback(mid_entry);
/* Was previous buf put in mpx struct for multi-rsp? */
if (!isMultiRsp) {
/* smb buffer will be freed by user thread */
@@ -741,15 +693,25 @@ multi_t2_fnd:
cifs_small_buf_release(smallbuf);
if (!list_empty(&server->pending_mid_q)) {
+ struct list_head dispose_list;
+
+ INIT_LIST_HEAD(&dispose_list);
spin_lock(&GlobalMid_Lock);
list_for_each_safe(tmp, tmp2, &server->pending_mid_q) {
mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
- cFYI(1, "Clearing Mid 0x%x - issuing callback",
- mid_entry->mid);
+ cFYI(1, "Clearing mid 0x%x", mid_entry->mid);
+ mid_entry->midState = MID_SHUTDOWN;
+ list_move(&mid_entry->qhead, &dispose_list);
+ }
+ spin_unlock(&GlobalMid_Lock);
+
+ /* now walk dispose list and issue callbacks */
+ list_for_each_safe(tmp, tmp2, &dispose_list) {
+ mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
+ cFYI(1, "Callback mid 0x%x", mid_entry->mid);
list_del_init(&mid_entry->qhead);
mid_entry->callback(mid_entry);
}
- spin_unlock(&GlobalMid_Lock);
/* 1/8th of sec is more than enough time for them to exit */
msleep(125);
}
@@ -1062,13 +1024,6 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
(strnicmp(value, "1", 1) == 0)) {
/* this is the default */
continue;
- } else if ((strnicmp(value, "smb2", 4) == 0) ||
- (strnicmp(value, "2", 1) == 0)) {
-#ifdef CONFIG_CIFS_SMB2
- vol->use_smb2 = true;
-#else
- cERROR(1, "smb2 support not enabled");
-#endif /* CONFIG_CIFS_SMB2 */
}
} else if ((strnicmp(data, "unc", 3) == 0)
|| (strnicmp(data, "target", 6) == 0)
@@ -1404,6 +1359,8 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
vol->server_ino = 1;
} else if (strnicmp(data, "noserverino", 9) == 0) {
vol->server_ino = 0;
+ } else if (strnicmp(data, "rwpidforward", 4) == 0) {
+ vol->rwpidforward = 1;
} else if (strnicmp(data, "cifsacl", 7) == 0) {
vol->cifs_acl = 1;
} else if (strnicmp(data, "nocifsacl", 9) == 0) {
@@ -1640,16 +1597,35 @@ match_security(struct TCP_Server_Info *server, struct smb_vol *vol)
/* now check if signing mode is acceptable */
if ((secFlags & CIFSSEC_MAY_SIGN) == 0 &&
- (server->secMode & SECMODE_SIGN_REQUIRED))
+ (server->sec_mode & SECMODE_SIGN_REQUIRED))
return false;
else if (((secFlags & CIFSSEC_MUST_SIGN) == CIFSSEC_MUST_SIGN) &&
- (server->secMode &
+ (server->sec_mode &
(SECMODE_SIGN_ENABLED|SECMODE_SIGN_REQUIRED)) == 0)
return false;
return true;
}
+static int match_server(struct TCP_Server_Info *server, struct sockaddr *addr,
+ struct smb_vol *vol)
+{
+ if (!net_eq(cifs_net_ns(server), current->nsproxy->net_ns))
+ return 0;
+
+ if (!match_address(server, addr,
+ (struct sockaddr *)&vol->srcaddr))
+ return 0;
+
+ if (!match_port(server, addr))
+ return 0;
+
+ if (!match_security(server, vol))
+ return 0;
+
+ return 1;
+}
+
static struct TCP_Server_Info *
cifs_find_tcp_session(struct sockaddr *addr, struct smb_vol *vol)
{
@@ -1657,17 +1633,7 @@ cifs_find_tcp_session(struct sockaddr *addr, struct smb_vol *vol)
spin_lock(&cifs_tcp_ses_lock);
list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
- if (!net_eq(cifs_net_ns(server), current->nsproxy->net_ns))
- continue;
-
- if (!match_address(server, addr,
- (struct sockaddr *)&vol->srcaddr))
- continue;
-
- if (!match_port(server, addr))
- continue;
-
- if (!match_security(server, vol))
+ if (!match_server(server, addr, vol))
continue;
++server->srv_count;
@@ -1861,32 +1827,39 @@ out_err:
return ERR_PTR(rc);
}
-static struct cifsSesInfo *
+static int match_session(struct cifs_ses *ses, struct smb_vol *vol)
+{
+ switch (ses->server->secType) {
+ case Kerberos:
+ if (vol->cred_uid != ses->cred_uid)
+ return 0;
+ break;
+ default:
+ /* anything else takes username/password */
+ if (ses->user_name == NULL)
+ return 0;
+ if (strncmp(ses->user_name, vol->username,
+ MAX_USERNAME_SIZE))
+ return 0;
+ if (strlen(vol->username) != 0 &&
+ ses->password != NULL &&
+ strncmp(ses->password,
+ vol->password ? vol->password : "",
+ MAX_PASSWORD_SIZE))
+ return 0;
+ }
+ return 1;
+}
+
+static struct cifs_ses *
cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb_vol *vol)
{
- struct cifsSesInfo *ses;
+ struct cifs_ses *ses;
spin_lock(&cifs_tcp_ses_lock);
list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
- switch (server->secType) {
- case Kerberos:
- if (vol->cred_uid != ses->cred_uid)
- continue;
- break;
- default:
- /* anything else takes username/password */
- if (ses->user_name == NULL)
- continue;
- if (strncmp(ses->user_name, vol->username,
- MAX_USERNAME_SIZE))
- continue;
- if (strlen(vol->username) != 0 &&
- ses->password != NULL &&
- strncmp(ses->password,
- vol->password ? vol->password : "",
- MAX_PASSWORD_SIZE))
- continue;
- }
+ if (!match_session(ses, vol))
+ continue;
++ses->ses_count;
spin_unlock(&cifs_tcp_ses_lock);
return ses;
@@ -1896,7 +1869,7 @@ cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb_vol *vol)
}
static void
-cifs_put_smb_ses(struct cifsSesInfo *ses)
+cifs_put_smb_ses(struct cifs_ses *ses)
{
int xid;
struct TCP_Server_Info *server = ses->server;
@@ -1922,11 +1895,11 @@ cifs_put_smb_ses(struct cifsSesInfo *ses)
static bool warned_on_ntlm; /* globals init to false automatically */
-static struct cifsSesInfo *
+static struct cifs_ses *
cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
{
int rc = -ENOMEM, xid;
- struct cifsSesInfo *ses;
+ struct cifs_ses *ses;
struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr;
struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&server->dstaddr;
@@ -2029,20 +2002,26 @@ get_ses_fail:
return ERR_PTR(rc);
}
-static struct cifsTconInfo *
-cifs_find_tcon(struct cifsSesInfo *ses, const char *unc)
+static int match_tcon(struct cifs_tcon *tcon, const char *unc)
+{
+ if (tcon->tidStatus == CifsExiting)
+ return 0;
+ if (strncmp(tcon->treeName, unc, MAX_TREE_SIZE))
+ return 0;
+ return 1;
+}
+
+static struct cifs_tcon *
+cifs_find_tcon(struct cifs_ses *ses, const char *unc)
{
struct list_head *tmp;
- struct cifsTconInfo *tcon;
+ struct cifs_tcon *tcon;
spin_lock(&cifs_tcp_ses_lock);
list_for_each(tmp, &ses->tcon_list) {
- tcon = list_entry(tmp, struct cifsTconInfo, tcon_list);
- if (tcon->tidStatus == CifsExiting)
- continue;
- if (strncmp(tcon->treeName, unc, MAX_TREE_SIZE))
+ tcon = list_entry(tmp, struct cifs_tcon, tcon_list);
+ if (!match_tcon(tcon, unc))
continue;
-
++tcon->tc_count;
spin_unlock(&cifs_tcp_ses_lock);
return tcon;
@@ -2052,10 +2031,10 @@ cifs_find_tcon(struct cifsSesInfo *ses, const char *unc)
}
static void
-cifs_put_tcon(struct cifsTconInfo *tcon)
+cifs_put_tcon(struct cifs_tcon *tcon)
{
int xid;
- struct cifsSesInfo *ses = tcon->ses;
+ struct cifs_ses *ses = tcon->ses;
cFYI(1, "%s: tc_count=%d\n", __func__, tcon->tc_count);
spin_lock(&cifs_tcp_ses_lock);
@@ -2076,11 +2055,11 @@ cifs_put_tcon(struct cifsTconInfo *tcon)
cifs_put_smb_ses(ses);
}
-static struct cifsTconInfo *
-cifs_get_tcon(struct cifsSesInfo *ses, struct smb_vol *volume_info)
+static struct cifs_tcon *
+cifs_get_tcon(struct cifs_ses *ses, struct smb_vol *volume_info)
{
int rc, xid;
- struct cifsTconInfo *tcon;
+ struct cifs_tcon *tcon;
tcon = cifs_find_tcon(ses, volume_info->UNC);
if (tcon) {
@@ -2169,8 +2148,102 @@ cifs_put_tlink(struct tcon_link *tlink)
return;
}
+static inline struct tcon_link *
+cifs_sb_master_tlink(struct cifs_sb_info *cifs_sb);
+
+static int
+compare_mount_options(struct super_block *sb, struct cifs_mnt_data *mnt_data)
+{
+ struct cifs_sb_info *old = CIFS_SB(sb);
+ struct cifs_sb_info *new = mnt_data->cifs_sb;
+
+ if ((sb->s_flags & CIFS_MS_MASK) != (mnt_data->flags & CIFS_MS_MASK))
+ return 0;
+
+ if ((old->mnt_cifs_flags & CIFS_MOUNT_MASK) !=
+ (new->mnt_cifs_flags & CIFS_MOUNT_MASK))
+ return 0;
+
+ if (old->rsize != new->rsize)
+ return 0;
+
+ /*
+ * We want to share sb only if we don't specify wsize or specified wsize
+ * is greater or equal than existing one.
+ */
+ if (new->wsize && new->wsize < old->wsize)
+ return 0;
+
+ if (old->mnt_uid != new->mnt_uid || old->mnt_gid != new->mnt_gid)
+ return 0;
+
+ if (old->mnt_file_mode != new->mnt_file_mode ||
+ old->mnt_dir_mode != new->mnt_dir_mode)
+ return 0;
+
+ if (strcmp(old->local_nls->charset, new->local_nls->charset))
+ return 0;
+
+ if (old->actimeo != new->actimeo)
+ return 0;
+
+ return 1;
+}
+
int
-get_dfs_path(int xid, struct cifsSesInfo *pSesInfo, const char *old_path,
+cifs_match_super(struct super_block *sb, void *data)
+{
+ struct cifs_mnt_data *mnt_data = (struct cifs_mnt_data *)data;
+ struct smb_vol *volume_info;
+ struct cifs_sb_info *cifs_sb;
+ struct TCP_Server_Info *tcp_srv;
+ struct cifs_ses *ses;
+ struct cifs_tcon *tcon;
+ struct tcon_link *tlink;
+ struct sockaddr_storage addr;
+ int rc = 0;
+
+ memset(&addr, 0, sizeof(struct sockaddr_storage));
+
+ spin_lock(&cifs_tcp_ses_lock);
+ cifs_sb = CIFS_SB(sb);
+ tlink = cifs_get_tlink(cifs_sb_master_tlink(cifs_sb));
+ if (IS_ERR(tlink)) {
+ spin_unlock(&cifs_tcp_ses_lock);
+ return rc;
+ }
+ tcon = tlink_tcon(tlink);
+ ses = tcon->ses;
+ tcp_srv = ses->server;
+
+ volume_info = mnt_data->vol;
+
+ if (!volume_info->UNCip || !volume_info->UNC)
+ goto out;
+
+ rc = cifs_fill_sockaddr((struct sockaddr *)&addr,
+ volume_info->UNCip,
+ strlen(volume_info->UNCip),
+ volume_info->port);
+ if (!rc)
+ goto out;
+
+ if (!match_server(tcp_srv, (struct sockaddr *)&addr, volume_info) ||
+ !match_session(ses, volume_info) ||
+ !match_tcon(tcon, volume_info->UNC)) {
+ rc = 0;
+ goto out;
+ }
+
+ rc = compare_mount_options(sb, mnt_data);
+out:
+ cifs_put_tlink(tlink);
+ spin_unlock(&cifs_tcp_ses_lock);
+ return rc;
+}
+
+int
+get_dfs_path(int xid, struct cifs_ses *pSesInfo, const char *old_path,
const struct nls_table *nls_codepage, unsigned int *pnum_referrals,
struct dfs_info3_param **preferrals, int remap)
{
@@ -2469,7 +2542,7 @@ ip_connect(struct TCP_Server_Info *server)
return generic_ip_connect(server);
}
-void reset_cifs_unix_caps(int xid, struct cifsTconInfo *tcon,
+void reset_cifs_unix_caps(int xid, struct cifs_tcon *tcon,
struct super_block *sb, struct smb_vol *vol_info)
{
/* if we are reconnecting then should we check to see if
@@ -2498,7 +2571,7 @@ void reset_cifs_unix_caps(int xid, struct cifsTconInfo *tcon,
if (!CIFSSMBQFSUnixInfo(xid, tcon)) {
__u64 cap = le64_to_cpu(tcon->fsUnixInfo.Capability);
-
+ cFYI(1, "unix caps which server supports %lld", cap);
/* check for reconnect case in which we do not
want to change the mount behavior if we can avoid it */
if (vol_info == NULL) {
@@ -2516,6 +2589,9 @@ void reset_cifs_unix_caps(int xid, struct cifsTconInfo *tcon,
}
}
+ if (cap & CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP)
+ cERROR(1, "per-share encryption not supported yet");
+
cap &= CIFS_UNIX_CAP_MASK;
if (vol_info && vol_info->no_psx_acl)
cap &= ~CIFS_UNIX_POSIX_ACL_CAP;
@@ -2534,12 +2610,6 @@ void reset_cifs_unix_caps(int xid, struct cifsTconInfo *tcon,
CIFS_MOUNT_POSIX_PATHS;
}
- /* We might be setting the path sep back to a different
- form if we are reconnecting and the server switched its
- posix path capability for this share */
- if (sb && (CIFS_SB(sb)->prepathlen > 0))
- CIFS_SB(sb)->prepath[0] = CIFS_DIR_SEP(CIFS_SB(sb));
-
if (sb && (CIFS_SB(sb)->rsize > 127 * 1024)) {
if ((cap & CIFS_UNIX_LARGE_READ_CAP) == 0) {
CIFS_SB(sb)->rsize = 127 * 1024;
@@ -2564,6 +2634,10 @@ void reset_cifs_unix_caps(int xid, struct cifsTconInfo *tcon,
cFYI(1, "very large read cap");
if (cap & CIFS_UNIX_LARGE_WRITE_CAP)
cFYI(1, "very large write cap");
+ if (cap & CIFS_UNIX_TRANSPORT_ENCRYPTION_CAP)
+ cFYI(1, "transport encryption cap");
+ if (cap & CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP)
+ cFYI(1, "mandatory transport encryption cap");
#endif /* CIFS_DEBUG2 */
if (CIFSSMBSetFSUnixInfo(xid, tcon, cap)) {
if (vol_info == NULL) {
@@ -2580,28 +2654,8 @@ void reset_cifs_unix_caps(int xid, struct cifsTconInfo *tcon,
}
}
-static void
-convert_delimiter(char *path, char delim)
-{
- int i;
- char old_delim;
-
- if (path == NULL)
- return;
-
- if (delim == '/')
- old_delim = '\\';
- else
- old_delim = '/';
-
- for (i = 0; path[i] != '\0'; i++) {
- if (path[i] == old_delim)
- path[i] = delim;
- }
-}
-
-static void setup_cifs_sb(struct smb_vol *pvolume_info,
- struct cifs_sb_info *cifs_sb)
+void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
+ struct cifs_sb_info *cifs_sb)
{
INIT_DELAYED_WORK(&cifs_sb->prune_tlinks, cifs_prune_tlinks);
@@ -2615,40 +2669,19 @@ static void setup_cifs_sb(struct smb_vol *pvolume_info,
else /* default */
cifs_sb->rsize = CIFSMaxBufSize;
- if (pvolume_info->wsize > PAGEVEC_SIZE * PAGE_CACHE_SIZE) {
- cERROR(1, "wsize %d too large, using 4096 instead",
- pvolume_info->wsize);
- cifs_sb->wsize = 4096;
- } else if (pvolume_info->wsize)
- cifs_sb->wsize = pvolume_info->wsize;
- else
- cifs_sb->wsize = min_t(const int,
- PAGEVEC_SIZE * PAGE_CACHE_SIZE,
- 127*1024);
- /* old default of CIFSMaxBufSize was too small now
- that SMB Write2 can send multiple pages in kvec.
- RFC1001 does not describe what happens when frame
- bigger than 128K is sent so use that as max in
- conjunction with 52K kvec constraint on arch with 4K
- page size */
-
if (cifs_sb->rsize < 2048) {
cifs_sb->rsize = 2048;
/* Windows ME may prefer this */
cFYI(1, "readsize set to minimum: 2048");
}
- /* calculate prepath */
- cifs_sb->prepath = pvolume_info->prepath;
- if (cifs_sb->prepath) {
- cifs_sb->prepathlen = strlen(cifs_sb->prepath);
- /* we can not convert the / to \ in the path
- separators in the prefixpath yet because we do not
- know (until reset_cifs_unix_caps is called later)
- whether POSIX PATH CAP is available. We normalize
- the / to \ after reset_cifs_unix_caps is called */
- pvolume_info->prepath = NULL;
- } else
- cifs_sb->prepathlen = 0;
+
+ /*
+ * Temporarily set wsize for matching superblock. If we end up using
+ * new sb then cifs_negotiate_wsize will later negotiate it downward
+ * if needed.
+ */
+ cifs_sb->wsize = pvolume_info->wsize;
+
cifs_sb->mnt_uid = pvolume_info->linux_uid;
cifs_sb->mnt_gid = pvolume_info->linux_gid;
cifs_sb->mnt_file_mode = pvolume_info->file_mode;
@@ -2657,6 +2690,7 @@ static void setup_cifs_sb(struct smb_vol *pvolume_info,
cifs_sb->mnt_file_mode, cifs_sb->mnt_dir_mode);
cifs_sb->actimeo = pvolume_info->actimeo;
+ cifs_sb->local_nls = pvolume_info->local_nls;
if (pvolume_info->noperm)
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_PERM;
@@ -2676,6 +2710,8 @@ static void setup_cifs_sb(struct smb_vol *pvolume_info,
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NOSSYNC;
if (pvolume_info->mand_lock)
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NOPOSIXBRL;
+ if (pvolume_info->rwpidforward)
+ cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_RWPIDFORWARD;
if (pvolume_info->cifs_acl)
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_CIFS_ACL;
if (pvolume_info->override_uid)
@@ -2709,8 +2745,55 @@ static void setup_cifs_sb(struct smb_vol *pvolume_info,
"mount option supported");
}
+/*
+ * When the server supports very large writes via POSIX extensions, we can
+ * allow up to 2^24 - PAGE_CACHE_SIZE.
+ *
+ * Note that this might make for "interesting" allocation problems during
+ * writeback however (as we have to allocate an array of pointers for the
+ * pages). A 16M write means ~32kb page array with PAGE_CACHE_SIZE == 4096.
+ */
+#define CIFS_MAX_WSIZE ((1<<24) - PAGE_CACHE_SIZE)
+
+/*
+ * When the server doesn't allow large posix writes, default to a wsize of
+ * 128k - PAGE_CACHE_SIZE -- one page less than the largest frame size
+ * described in RFC1001. This allows space for the header without going over
+ * that by default.
+ */
+#define CIFS_MAX_RFC1001_WSIZE (128 * 1024 - PAGE_CACHE_SIZE)
+
+/*
+ * The default wsize is 1M. find_get_pages seems to return a maximum of 256
+ * pages in a single call. With PAGE_CACHE_SIZE == 4k, this means we can fill
+ * a single wsize request with a single call.
+ */
+#define CIFS_DEFAULT_WSIZE (1024 * 1024)
+
+static unsigned int
+cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info)
+{
+ __u64 unix_cap = le64_to_cpu(tcon->fsUnixInfo.Capability);
+ struct TCP_Server_Info *server = tcon->ses->server;
+ unsigned int wsize = pvolume_info->wsize ? pvolume_info->wsize :
+ CIFS_DEFAULT_WSIZE;
+
+ /* can server support 24-bit write sizes? (via UNIX extensions) */
+ if (!tcon->unix_ext || !(unix_cap & CIFS_UNIX_LARGE_WRITE_CAP))
+ wsize = min_t(unsigned int, wsize, CIFS_MAX_RFC1001_WSIZE);
+
+ /* no CAP_LARGE_WRITE_X? Limit it to 16 bits */
+ if (!(server->capabilities & CAP_LARGE_WRITE_X))
+ wsize = min_t(unsigned int, wsize, USHRT_MAX);
+
+ /* hard limit of CIFS_MAX_WSIZE */
+ wsize = min_t(unsigned int, wsize, CIFS_MAX_WSIZE);
+
+ return wsize;
+}
+
static int
-is_path_accessible(int xid, struct cifsTconInfo *tcon,
+is_path_accessible(int xid, struct cifs_tcon *tcon,
struct cifs_sb_info *cifs_sb, const char *full_path)
{
int rc;
@@ -2733,8 +2816,8 @@ is_path_accessible(int xid, struct cifsTconInfo *tcon,
return rc;
}
-static void
-cleanup_volume_info(struct smb_vol **pvolume_info)
+void
+cifs_cleanup_volume_info(struct smb_vol **pvolume_info)
{
struct smb_vol *volume_info;
@@ -2764,24 +2847,13 @@ build_unc_path_to_root(const struct smb_vol *volume_info,
char *full_path;
int unc_len = strnlen(volume_info->UNC, MAX_TREE_SIZE + 1);
- full_path = kmalloc(unc_len + cifs_sb->prepathlen + 1, GFP_KERNEL);
+ full_path = kmalloc(unc_len + 1, GFP_KERNEL);
if (full_path == NULL)
return ERR_PTR(-ENOMEM);
strncpy(full_path, volume_info->UNC, unc_len);
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) {
- int i;
- for (i = 0; i < unc_len; i++) {
- if (full_path[i] == '\\')
- full_path[i] = '/';
- }
- }
-
- if (cifs_sb->prepathlen)
- strncpy(full_path + unc_len, cifs_sb->prepath,
- cifs_sb->prepathlen);
-
- full_path[unc_len + cifs_sb->prepathlen] = 0; /* add trailing null */
+ full_path[unc_len] = 0; /* add trailing null */
+ convert_delimiter(full_path, CIFS_DIR_SEP(cifs_sb));
return full_path;
}
@@ -2796,7 +2868,7 @@ build_unc_path_to_root(const struct smb_vol *volume_info,
* determine whether there were referrals.
*/
static int
-expand_dfs_referral(int xid, struct cifsSesInfo *pSesInfo,
+expand_dfs_referral(int xid, struct cifs_ses *pSesInfo,
struct smb_vol *volume_info, struct cifs_sb_info *cifs_sb,
int check_prefix)
{
@@ -2840,40 +2912,13 @@ expand_dfs_referral(int xid, struct cifsSesInfo *pSesInfo,
}
#endif
-int
-cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
- const char *devname)
+int cifs_setup_volume_info(struct smb_vol **pvolume_info, char *mount_data,
+ const char *devname)
{
- int rc;
- int xid;
struct smb_vol *volume_info;
- struct cifsSesInfo *pSesInfo;
- struct cifsTconInfo *tcon;
- struct TCP_Server_Info *srvTcp;
- char *full_path;
- struct tcon_link *tlink;
-#ifdef CONFIG_CIFS_DFS_UPCALL
- int referral_walks_count = 0;
-try_mount_again:
- /* cleanup activities if we're chasing a referral */
- if (referral_walks_count) {
- if (tcon)
- cifs_put_tcon(tcon);
- else if (pSesInfo)
- cifs_put_smb_ses(pSesInfo);
-
- cleanup_volume_info(&volume_info);
- FreeXid(xid);
- }
-#endif
- rc = 0;
- tcon = NULL;
- pSesInfo = NULL;
- srvTcp = NULL;
- full_path = NULL;
- tlink = NULL;
+ int rc = 0;
- xid = GetXid();
+ *pvolume_info = NULL;
volume_info = kzalloc(sizeof(struct smb_vol), GFP_KERNEL);
if (!volume_info) {
@@ -2881,7 +2926,7 @@ try_mount_again:
goto out;
}
- if (cifs_parse_mount_options(cifs_sb->mountdata, devname,
+ if (cifs_parse_mount_options(mount_data, devname,
volume_info)) {
rc = -EINVAL;
goto out;
@@ -2914,7 +2959,46 @@ try_mount_again:
goto out;
}
}
- cifs_sb->local_nls = volume_info->local_nls;
+
+ *pvolume_info = volume_info;
+ return rc;
+out:
+ cifs_cleanup_volume_info(&volume_info);
+ return rc;
+}
+
+int
+cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
+ struct smb_vol *volume_info, const char *devname)
+{
+ int rc = 0;
+ int xid;
+ struct cifs_ses *pSesInfo;
+ struct cifs_tcon *tcon;
+ struct TCP_Server_Info *srvTcp;
+ char *full_path;
+ struct tcon_link *tlink;
+#ifdef CONFIG_CIFS_DFS_UPCALL
+ int referral_walks_count = 0;
+try_mount_again:
+ /* cleanup activities if we're chasing a referral */
+ if (referral_walks_count) {
+ if (tcon)
+ cifs_put_tcon(tcon);
+ else if (pSesInfo)
+ cifs_put_smb_ses(pSesInfo);
+
+ cifs_cleanup_volume_info(&volume_info);
+ FreeXid(xid);
+ }
+#endif
+ tcon = NULL;
+ pSesInfo = NULL;
+ srvTcp = NULL;
+ full_path = NULL;
+ tlink = NULL;
+
+ xid = GetXid();
/* get a reference to a tcp session */
srvTcp = cifs_get_tcp_session(volume_info);
@@ -2931,7 +3015,6 @@ try_mount_again:
goto mount_fail_check;
}
- setup_cifs_sb(volume_info, cifs_sb);
if (pSesInfo->capabilities & CAP_LARGE_FILES)
sb->s_maxbytes = MAX_LFS_FILESIZE;
else
@@ -2948,35 +3031,36 @@ try_mount_again:
goto remote_path_check;
}
- /* do not care if following two calls succeed - informational */
- if (!tcon->ipc) {
- CIFSSMBQFSDeviceInfo(xid, tcon);
- CIFSSMBQFSAttributeInfo(xid, tcon);
- }
-
/* tell server which Unix caps we support */
- if (tcon->ses->capabilities & CAP_UNIX)
+ if (tcon->ses->capabilities & CAP_UNIX) {
/* reset of caps checks mount to see if unix extensions
disabled for just this mount */
reset_cifs_unix_caps(xid, tcon, sb, volume_info);
- else
+ if ((tcon->ses->server->tcpStatus == CifsNeedReconnect) &&
+ (le64_to_cpu(tcon->fsUnixInfo.Capability) &
+ CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP)) {
+ rc = -EACCES;
+ goto mount_fail_check;
+ }
+ } else
tcon->unix_ext = 0; /* server does not support them */
- /* convert forward to back slashes in prepath here if needed */
- if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) == 0)
- convert_delimiter(cifs_sb->prepath, CIFS_DIR_SEP(cifs_sb));
+ /* do not care if following two calls succeed - informational */
+ if (!tcon->ipc) {
+ CIFSSMBQFSDeviceInfo(xid, tcon);
+ CIFSSMBQFSAttributeInfo(xid, tcon);
+ }
if ((tcon->unix_ext == 0) && (cifs_sb->rsize > (1024 * 127))) {
cifs_sb->rsize = 1024 * 127;
cFYI(DBG2, "no very large read support, rsize now 127K");
}
- if (!(tcon->ses->capabilities & CAP_LARGE_WRITE_X))
- cifs_sb->wsize = min(cifs_sb->wsize,
- (tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE));
if (!(tcon->ses->capabilities & CAP_LARGE_READ_X))
cifs_sb->rsize = min(cifs_sb->rsize,
(tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE));
+ cifs_sb->wsize = cifs_negotiate_wsize(tcon, volume_info);
+
remote_path_check:
#ifdef CONFIG_CIFS_DFS_UPCALL
/*
@@ -2996,10 +3080,10 @@ remote_path_check:
}
#endif
- /* check if a whole path (including prepath) is not remote */
+ /* check if a whole path is not remote */
if (!rc && tcon) {
/* build_path_to_root works only when we have a valid tcon */
- full_path = cifs_build_path_to_root(cifs_sb, tcon);
+ full_path = cifs_build_path_to_root(volume_info, cifs_sb, tcon);
if (full_path == NULL) {
rc = -ENOMEM;
goto mount_fail_check;
@@ -3025,10 +3109,6 @@ remote_path_check:
rc = -ELOOP;
goto mount_fail_check;
}
- /* convert forward to back slashes in prepath here if needed */
- if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) == 0)
- convert_delimiter(cifs_sb->prepath,
- CIFS_DIR_SEP(cifs_sb));
rc = expand_dfs_referral(xid, pSesInfo, volume_info, cifs_sb,
true);
@@ -3087,14 +3167,13 @@ mount_fail_check:
password will be freed at unmount time) */
out:
/* zero out password before freeing */
- cleanup_volume_info(&volume_info);
FreeXid(xid);
return rc;
}
int
-CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
- const char *tree, struct cifsTconInfo *tcon,
+CIFSTCon(unsigned int xid, struct cifs_ses *ses,
+ const char *tree, struct cifs_tcon *tcon,
const struct nls_table *nls_codepage)
{
struct smb_hdr *smb_buffer;
@@ -3126,7 +3205,7 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
pSMB->AndXCommand = 0xFF;
pSMB->Flags = cpu_to_le16(TCON_EXTENDED_SECINFO);
bcc_ptr = &pSMB->Password[0];
- if ((ses->server->secMode) & SECMODE_USER) {
+ if ((ses->server->sec_mode) & SECMODE_USER) {
pSMB->PasswordLength = cpu_to_le16(1); /* minimum */
*bcc_ptr = 0; /* password is null byte */
bcc_ptr++; /* skip password */
@@ -3143,7 +3222,7 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
if ((global_secflags & CIFSSEC_MAY_LANMAN) &&
(ses->server->secType == LANMAN))
calc_lanman_hash(tcon->password, ses->server->cryptkey,
- ses->server->secMode &
+ ses->server->sec_mode &
SECMODE_PW_ENCRYPT ? true : false,
bcc_ptr);
else
@@ -3159,7 +3238,7 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
}
}
- if (ses->server->secMode &
+ if (ses->server->sec_mode &
(SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
smb_buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
@@ -3255,7 +3334,6 @@ cifs_umount(struct super_block *sb, struct cifs_sb_info *cifs_sb)
struct rb_root *root = &cifs_sb->tlink_tree;
struct rb_node *node;
struct tcon_link *tlink;
- char *tmp;
cancel_delayed_work_sync(&cifs_sb->prune_tlinks);
@@ -3272,15 +3350,10 @@ cifs_umount(struct super_block *sb, struct cifs_sb_info *cifs_sb)
}
spin_unlock(&cifs_sb->tlink_tree_lock);
- tmp = cifs_sb->prepath;
- cifs_sb->prepathlen = 0;
- cifs_sb->prepath = NULL;
- kfree(tmp);
-
return 0;
}
-int cifs_negotiate_protocol(unsigned int xid, struct cifsSesInfo *ses)
+int cifs_negotiate_protocol(unsigned int xid, struct cifs_ses *ses)
{
int rc = 0;
struct TCP_Server_Info *server = ses->server;
@@ -3310,7 +3383,7 @@ int cifs_negotiate_protocol(unsigned int xid, struct cifsSesInfo *ses)
}
-int cifs_setup_session(unsigned int xid, struct cifsSesInfo *ses,
+int cifs_setup_session(unsigned int xid, struct cifs_ses *ses,
struct nls_table *nls_info)
{
int rc = 0;
@@ -3322,7 +3395,7 @@ int cifs_setup_session(unsigned int xid, struct cifsSesInfo *ses,
ses->capabilities &= (~CAP_UNIX);
cFYI(1, "Security Mode: 0x%x Capabilities: 0x%x TimeAdjust: %d",
- server->secMode, server->capabilities, server->timeAdj);
+ server->sec_mode, server->capabilities, server->timeAdj);
rc = CIFS_SessSetup(xid, ses, nls_info);
if (rc) {
@@ -3354,12 +3427,12 @@ int cifs_setup_session(unsigned int xid, struct cifsSesInfo *ses,
return rc;
}
-static struct cifsTconInfo *
+static struct cifs_tcon *
cifs_construct_tcon(struct cifs_sb_info *cifs_sb, uid_t fsuid)
{
- struct cifsTconInfo *master_tcon = cifs_sb_master_tcon(cifs_sb);
- struct cifsSesInfo *ses;
- struct cifsTconInfo *tcon = NULL;
+ struct cifs_tcon *master_tcon = cifs_sb_master_tcon(cifs_sb);
+ struct cifs_ses *ses;
+ struct cifs_tcon *tcon = NULL;
struct smb_vol *vol_info;
char username[28]; /* big enough for "krb50x" + hex of ULONG_MAX 6+16 */
/* We used to have this as MAX_USERNAME which is */
@@ -3392,7 +3465,7 @@ cifs_construct_tcon(struct cifs_sb_info *cifs_sb, uid_t fsuid)
ses = cifs_get_smb_ses(master_tcon->ses->server, vol_info);
if (IS_ERR(ses)) {
- tcon = (struct cifsTconInfo *)ses;
+ tcon = (struct cifs_tcon *)ses;
cifs_put_tcp_session(master_tcon->ses->server);
goto out;
}
@@ -3417,7 +3490,7 @@ cifs_sb_master_tlink(struct cifs_sb_info *cifs_sb)
return cifs_sb->master_tlink;
}
-struct cifsTconInfo *
+struct cifs_tcon *
cifs_sb_master_tcon(struct cifs_sb_info *cifs_sb)
{
return tlink_tcon(cifs_sb_master_tlink(cifs_sb));
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index 9ea65cf36714..81914df47ef1 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -50,12 +50,11 @@ build_path_from_dentry(struct dentry *direntry)
{
struct dentry *temp;
int namelen;
- int pplen;
int dfsplen;
char *full_path;
char dirsep;
struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb);
- struct cifsTconInfo *tcon = cifs_sb_master_tcon(cifs_sb);
+ struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
if (direntry == NULL)
return NULL; /* not much we can do if dentry is freed and
@@ -63,13 +62,12 @@ build_path_from_dentry(struct dentry *direntry)
when the server crashed */
dirsep = CIFS_DIR_SEP(cifs_sb);
- pplen = cifs_sb->prepathlen;
if (tcon->Flags & SMB_SHARE_IS_IN_DFS)
dfsplen = strnlen(tcon->treeName, MAX_TREE_SIZE + 1);
else
dfsplen = 0;
cifs_bp_rename_retry:
- namelen = pplen + dfsplen;
+ namelen = dfsplen;
for (temp = direntry; !IS_ROOT(temp);) {
namelen += (1 + temp->d_name.len);
temp = temp->d_parent;
@@ -100,7 +98,7 @@ cifs_bp_rename_retry:
return NULL;
}
}
- if (namelen != pplen + dfsplen) {
+ if (namelen != dfsplen) {
cERROR(1, "did not end path lookup where expected namelen is %d",
namelen);
/* presumably this is only possible if racing with a rename
@@ -126,7 +124,6 @@ cifs_bp_rename_retry:
}
}
}
- strncpy(full_path + dfsplen, CIFS_SB(direntry->d_sb)->prepath, pplen);
return full_path;
}
@@ -152,7 +149,7 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
__u16 fileHandle;
struct cifs_sb_info *cifs_sb;
struct tcon_link *tlink;
- struct cifsTconInfo *tcon;
+ struct cifs_tcon *tcon;
char *full_path = NULL;
FILE_ALL_INFO *buf = NULL;
struct inode *newinode = NULL;
@@ -356,7 +353,8 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, int mode,
int xid;
struct cifs_sb_info *cifs_sb;
struct tcon_link *tlink;
- struct cifsTconInfo *pTcon;
+ struct cifs_tcon *pTcon;
+ struct cifs_io_parms io_parms;
char *full_path = NULL;
struct inode *newinode = NULL;
int oplock = 0;
@@ -439,16 +437,19 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, int mode,
* timestamps in, but we can reuse it safely */
pdev = (struct win_dev *)buf;
+ io_parms.netfid = fileHandle;
+ io_parms.pid = current->tgid;
+ io_parms.tcon = pTcon;
+ io_parms.offset = 0;
+ io_parms.length = sizeof(struct win_dev);
if (S_ISCHR(mode)) {
memcpy(pdev->type, "IntxCHR", 8);
pdev->major =
cpu_to_le64(MAJOR(device_number));
pdev->minor =
cpu_to_le64(MINOR(device_number));
- rc = CIFSSMBWrite(xid, pTcon,
- fileHandle,
- sizeof(struct win_dev),
- 0, &bytes_written, (char *)pdev,
+ rc = CIFSSMBWrite(xid, &io_parms,
+ &bytes_written, (char *)pdev,
NULL, 0);
} else if (S_ISBLK(mode)) {
memcpy(pdev->type, "IntxBLK", 8);
@@ -456,10 +457,8 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, int mode,
cpu_to_le64(MAJOR(device_number));
pdev->minor =
cpu_to_le64(MINOR(device_number));
- rc = CIFSSMBWrite(xid, pTcon,
- fileHandle,
- sizeof(struct win_dev),
- 0, &bytes_written, (char *)pdev,
+ rc = CIFSSMBWrite(xid, &io_parms,
+ &bytes_written, (char *)pdev,
NULL, 0);
} /* else if (S_ISFIFO) */
CIFSSMBClose(xid, pTcon, fileHandle);
@@ -486,7 +485,7 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
bool posix_open = false;
struct cifs_sb_info *cifs_sb;
struct tcon_link *tlink;
- struct cifsTconInfo *pTcon;
+ struct cifs_tcon *pTcon;
struct cifsFileInfo *cfile;
struct inode *newInode = NULL;
char *full_path = NULL;
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index c672afef0c09..bb71471a4d9d 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -114,7 +114,7 @@ int cifs_posix_open(char *full_path, struct inode **pinode,
struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
struct cifs_fattr fattr;
struct tcon_link *tlink;
- struct cifsTconInfo *tcon;
+ struct cifs_tcon *tcon;
cFYI(1, "posix open %s", full_path);
@@ -168,7 +168,7 @@ posix_open_ret:
static int
cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
- struct cifsTconInfo *tcon, unsigned int f_flags, __u32 *poplock,
+ struct cifs_tcon *tcon, unsigned int f_flags, __u32 *poplock,
__u16 *pnetfid, int xid)
{
int rc;
@@ -285,7 +285,7 @@ cifs_new_fileinfo(__u16 fileHandle, struct file *file,
void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
{
struct inode *inode = cifs_file->dentry->d_inode;
- struct cifsTconInfo *tcon = tlink_tcon(cifs_file->tlink);
+ struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
struct cifsInodeInfo *cifsi = CIFS_I(inode);
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct cifsLockInfo *li, *tmp;
@@ -343,7 +343,7 @@ int cifs_open(struct inode *inode, struct file *file)
int xid;
__u32 oplock;
struct cifs_sb_info *cifs_sb;
- struct cifsTconInfo *tcon;
+ struct cifs_tcon *tcon;
struct tcon_link *tlink;
struct cifsFileInfo *pCifsFile = NULL;
char *full_path = NULL;
@@ -457,7 +457,7 @@ static int cifs_reopen_file(struct cifsFileInfo *pCifsFile, bool can_flush)
int xid;
__u32 oplock;
struct cifs_sb_info *cifs_sb;
- struct cifsTconInfo *tcon;
+ struct cifs_tcon *tcon;
struct cifsInodeInfo *pCifsInode;
struct inode *inode;
char *full_path = NULL;
@@ -596,7 +596,7 @@ int cifs_closedir(struct inode *inode, struct file *file)
xid = GetXid();
if (pCFileStruct) {
- struct cifsTconInfo *pTcon = tlink_tcon(pCFileStruct->tlink);
+ struct cifs_tcon *pTcon = tlink_tcon(pCFileStruct->tlink);
cFYI(1, "Freeing private data in close dir");
spin_lock(&cifs_file_list_lock);
@@ -653,7 +653,7 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
__u64 length;
bool wait_flag = false;
struct cifs_sb_info *cifs_sb;
- struct cifsTconInfo *tcon;
+ struct cifs_tcon *tcon;
__u16 netfid;
__u8 lockType = LOCKING_ANDX_LARGE_FILES;
bool posix_locking = 0;
@@ -725,8 +725,8 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
else
posix_lock_type = CIFS_WRLCK;
rc = CIFSSMBPosixLock(xid, tcon, netfid, 1 /* get */,
- length, pfLock,
- posix_lock_type, wait_flag);
+ length, pfLock, posix_lock_type,
+ wait_flag);
FreeXid(xid);
return rc;
}
@@ -797,8 +797,8 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
posix_lock_type = CIFS_UNLCK;
rc = CIFSSMBPosixLock(xid, tcon, netfid, 0 /* set */,
- length, pfLock,
- posix_lock_type, wait_flag);
+ length, pfLock, posix_lock_type,
+ wait_flag);
} else {
struct cifsFileInfo *fid = file->private_data;
@@ -857,7 +857,7 @@ cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
cifsi->server_eof = end_of_write;
}
-static ssize_t cifs_write(struct cifsFileInfo *open_file,
+static ssize_t cifs_write(struct cifsFileInfo *open_file, __u32 pid,
const char *write_data, size_t write_size,
loff_t *poffset)
{
@@ -865,10 +865,11 @@ static ssize_t cifs_write(struct cifsFileInfo *open_file,
unsigned int bytes_written = 0;
unsigned int total_written;
struct cifs_sb_info *cifs_sb;
- struct cifsTconInfo *pTcon;
+ struct cifs_tcon *pTcon;
int xid;
struct dentry *dentry = open_file->dentry;
struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode);
+ struct cifs_io_parms io_parms;
cifs_sb = CIFS_SB(dentry->d_sb);
@@ -901,8 +902,13 @@ static ssize_t cifs_write(struct cifsFileInfo *open_file,
/* iov[0] is reserved for smb header */
iov[1].iov_base = (char *)write_data + total_written;
iov[1].iov_len = len;
- rc = CIFSSMBWrite2(xid, pTcon, open_file->netfid, len,
- *poffset, &bytes_written, iov, 1, 0);
+ io_parms.netfid = open_file->netfid;
+ io_parms.pid = pid;
+ io_parms.tcon = pTcon;
+ io_parms.offset = *poffset;
+ io_parms.length = len;
+ rc = CIFSSMBWrite2(xid, &io_parms, &bytes_written, iov,
+ 1, 0);
}
if (rc || (bytes_written == 0)) {
if (total_written)
@@ -1071,8 +1077,8 @@ static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
open_file = find_writable_file(CIFS_I(mapping->host), false);
if (open_file) {
- bytes_written = cifs_write(open_file, write_data,
- to - from, &offset);
+ bytes_written = cifs_write(open_file, open_file->pid,
+ write_data, to - from, &offset);
cifsFileInfo_put(open_file);
/* Does mm or vfs already set times? */
inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
@@ -1092,58 +1098,20 @@ static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
static int cifs_writepages(struct address_space *mapping,
struct writeback_control *wbc)
{
- unsigned int bytes_to_write;
- unsigned int bytes_written;
- struct cifs_sb_info *cifs_sb;
- int done = 0;
- pgoff_t end;
- pgoff_t index;
- int range_whole = 0;
- struct kvec *iov;
- int len;
- int n_iov = 0;
- pgoff_t next;
- int nr_pages;
- __u64 offset = 0;
- struct cifsFileInfo *open_file;
- struct cifsTconInfo *tcon;
- struct cifsInodeInfo *cifsi = CIFS_I(mapping->host);
+ struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
+ bool done = false, scanned = false, range_whole = false;
+ pgoff_t end, index;
+ struct cifs_writedata *wdata;
struct page *page;
- struct pagevec pvec;
int rc = 0;
- int scanned = 0;
- int xid;
-
- cifs_sb = CIFS_SB(mapping->host->i_sb);
/*
- * If wsize is smaller that the page cache size, default to writing
+ * If wsize is smaller than the page cache size, default to writing
* one page at a time via cifs_writepage
*/
if (cifs_sb->wsize < PAGE_CACHE_SIZE)
return generic_writepages(mapping, wbc);
- iov = kmalloc(32 * sizeof(struct kvec), GFP_KERNEL);
- if (iov == NULL)
- return generic_writepages(mapping, wbc);
-
- /*
- * if there's no open file, then this is likely to fail too,
- * but it'll at least handle the return. Maybe it should be
- * a BUG() instead?
- */
- open_file = find_writable_file(CIFS_I(mapping->host), false);
- if (!open_file) {
- kfree(iov);
- return generic_writepages(mapping, wbc);
- }
-
- tcon = tlink_tcon(open_file->tlink);
- cifsFileInfo_put(open_file);
-
- xid = GetXid();
-
- pagevec_init(&pvec, 0);
if (wbc->range_cyclic) {
index = mapping->writeback_index; /* Start from prev offset */
end = -1;
@@ -1151,24 +1119,49 @@ static int cifs_writepages(struct address_space *mapping,
index = wbc->range_start >> PAGE_CACHE_SHIFT;
end = wbc->range_end >> PAGE_CACHE_SHIFT;
if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
- range_whole = 1;
- scanned = 1;
+ range_whole = true;
+ scanned = true;
}
retry:
- while (!done && (index <= end) &&
- (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
- PAGECACHE_TAG_DIRTY,
- min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1))) {
- int first;
- unsigned int i;
-
- first = -1;
- next = 0;
- n_iov = 0;
- bytes_to_write = 0;
-
- for (i = 0; i < nr_pages; i++) {
- page = pvec.pages[i];
+ while (!done && index <= end) {
+ unsigned int i, nr_pages, found_pages;
+ pgoff_t next = 0, tofind;
+ struct page **pages;
+
+ tofind = min((cifs_sb->wsize / PAGE_CACHE_SIZE) - 1,
+ end - index) + 1;
+
+ wdata = cifs_writedata_alloc((unsigned int)tofind);
+ if (!wdata) {
+ rc = -ENOMEM;
+ break;
+ }
+
+ /*
+ * find_get_pages_tag seems to return a max of 256 on each
+ * iteration, so we must call it several times in order to
+ * fill the array or the wsize is effectively limited to
+ * 256 * PAGE_CACHE_SIZE.
+ */
+ found_pages = 0;
+ pages = wdata->pages;
+ do {
+ nr_pages = find_get_pages_tag(mapping, &index,
+ PAGECACHE_TAG_DIRTY,
+ tofind, pages);
+ found_pages += nr_pages;
+ tofind -= nr_pages;
+ pages += nr_pages;
+ } while (nr_pages && tofind && index <= end);
+
+ if (found_pages == 0) {
+ kref_put(&wdata->refcount, cifs_writedata_release);
+ break;
+ }
+
+ nr_pages = 0;
+ for (i = 0; i < found_pages; i++) {
+ page = wdata->pages[i];
/*
* At this point we hold neither mapping->tree_lock nor
* lock on the page itself: the page may be truncated or
@@ -1177,7 +1170,7 @@ retry:
* mapping
*/
- if (first < 0)
+ if (nr_pages == 0)
lock_page(page);
else if (!trylock_page(page))
break;
@@ -1188,7 +1181,7 @@ retry:
}
if (!wbc->range_cyclic && page->index > end) {
- done = 1;
+ done = true;
unlock_page(page);
break;
}
@@ -1215,119 +1208,89 @@ retry:
set_page_writeback(page);
if (page_offset(page) >= mapping->host->i_size) {
- done = 1;
+ done = true;
unlock_page(page);
end_page_writeback(page);
break;
}
- /*
- * BB can we get rid of this? pages are held by pvec
- */
- page_cache_get(page);
+ wdata->pages[i] = page;
+ next = page->index + 1;
+ ++nr_pages;
+ }
- len = min(mapping->host->i_size - page_offset(page),
- (loff_t)PAGE_CACHE_SIZE);
+ /* reset index to refind any pages skipped */
+ if (nr_pages == 0)
+ index = wdata->pages[0]->index + 1;
- /* reserve iov[0] for the smb header */
- n_iov++;
- iov[n_iov].iov_base = kmap(page);
- iov[n_iov].iov_len = len;
- bytes_to_write += len;
+ /* put any pages we aren't going to use */
+ for (i = nr_pages; i < found_pages; i++) {
+ page_cache_release(wdata->pages[i]);
+ wdata->pages[i] = NULL;
+ }
- if (first < 0) {
- first = i;
- offset = page_offset(page);
- }
- next = page->index + 1;
- if (bytes_to_write + PAGE_CACHE_SIZE > cifs_sb->wsize)
- break;
+ /* nothing to write? */
+ if (nr_pages == 0) {
+ kref_put(&wdata->refcount, cifs_writedata_release);
+ continue;
}
- if (n_iov) {
-retry_write:
- open_file = find_writable_file(CIFS_I(mapping->host),
- false);
- if (!open_file) {
- cERROR(1, "No writable handles for inode");
- rc = -EBADF;
- } else {
- rc = CIFSSMBWrite2(xid, tcon, open_file->netfid,
- bytes_to_write, offset,
- &bytes_written, iov, n_iov,
- 0);
- cifsFileInfo_put(open_file);
- }
- cFYI(1, "Write2 rc=%d, wrote=%u", rc, bytes_written);
+ wdata->sync_mode = wbc->sync_mode;
+ wdata->nr_pages = nr_pages;
+ wdata->offset = page_offset(wdata->pages[0]);
- /*
- * For now, treat a short write as if nothing got
- * written. A zero length write however indicates
- * ENOSPC or EFBIG. We have no way to know which
- * though, so call it ENOSPC for now. EFBIG would
- * get translated to AS_EIO anyway.
- *
- * FIXME: make it take into account the data that did
- * get written
- */
- if (rc == 0) {
- if (bytes_written == 0)
- rc = -ENOSPC;
- else if (bytes_written < bytes_to_write)
- rc = -EAGAIN;
+ do {
+ if (wdata->cfile != NULL)
+ cifsFileInfo_put(wdata->cfile);
+ wdata->cfile = find_writable_file(CIFS_I(mapping->host),
+ false);
+ if (!wdata->cfile) {
+ cERROR(1, "No writable handles for inode");
+ rc = -EBADF;
+ break;
}
+ rc = cifs_async_writev(wdata);
+ } while (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN);
- /* retry on data-integrity flush */
- if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN)
- goto retry_write;
-
- /* fix the stats and EOF */
- if (bytes_written > 0) {
- cifs_stats_bytes_written(tcon, bytes_written);
- cifs_update_eof(cifsi, offset, bytes_written);
- }
+ for (i = 0; i < nr_pages; ++i)
+ unlock_page(wdata->pages[i]);
- for (i = 0; i < n_iov; i++) {
- page = pvec.pages[first + i];
- /* on retryable write error, redirty page */
+ /* send failure -- clean up the mess */
+ if (rc != 0) {
+ for (i = 0; i < nr_pages; ++i) {
if (rc == -EAGAIN)
- redirty_page_for_writepage(wbc, page);
- else if (rc != 0)
- SetPageError(page);
- kunmap(page);
- unlock_page(page);
- end_page_writeback(page);
- page_cache_release(page);
+ redirty_page_for_writepage(wbc,
+ wdata->pages[i]);
+ else
+ SetPageError(wdata->pages[i]);
+ end_page_writeback(wdata->pages[i]);
+ page_cache_release(wdata->pages[i]);
}
-
if (rc != -EAGAIN)
mapping_set_error(mapping, rc);
- else
- rc = 0;
+ }
+ kref_put(&wdata->refcount, cifs_writedata_release);
- if ((wbc->nr_to_write -= n_iov) <= 0)
- done = 1;
- index = next;
- } else
- /* Need to re-find the pages we skipped */
- index = pvec.pages[0]->index + 1;
+ wbc->nr_to_write -= nr_pages;
+ if (wbc->nr_to_write <= 0)
+ done = true;
- pagevec_release(&pvec);
+ index = next;
}
+
if (!scanned && !done) {
/*
* We hit the last page and there is more work to be done: wrap
* back to the start of the file
*/
- scanned = 1;
+ scanned = true;
index = 0;
goto retry;
}
+
if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
mapping->writeback_index = index;
- FreeXid(xid);
- kfree(iov);
return rc;
}
@@ -1383,6 +1346,14 @@ static int cifs_write_end(struct file *file, struct address_space *mapping,
{
int rc;
struct inode *inode = mapping->host;
+ struct cifsFileInfo *cfile = file->private_data;
+ struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
+ __u32 pid;
+
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
+ pid = cfile->pid;
+ else
+ pid = current->tgid;
cFYI(1, "write_end for page %p from pos %lld with %d bytes",
page, pos, copied);
@@ -1406,8 +1377,7 @@ static int cifs_write_end(struct file *file, struct address_space *mapping,
/* BB check if anything else missing out of ppw
such as updating last write time */
page_data = kmap(page);
- rc = cifs_write(file->private_data, page_data + offset,
- copied, &pos);
+ rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
/* if (rc < 0) should we set writebehind rc? */
kunmap(page);
@@ -1435,7 +1405,7 @@ int cifs_strict_fsync(struct file *file, int datasync)
{
int xid;
int rc = 0;
- struct cifsTconInfo *tcon;
+ struct cifs_tcon *tcon;
struct cifsFileInfo *smbfile = file->private_data;
struct inode *inode = file->f_path.dentry->d_inode;
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
@@ -1465,7 +1435,7 @@ int cifs_fsync(struct file *file, int datasync)
{
int xid;
int rc = 0;
- struct cifsTconInfo *tcon;
+ struct cifs_tcon *tcon;
struct cifsFileInfo *smbfile = file->private_data;
struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
@@ -1556,9 +1526,11 @@ cifs_iovec_write(struct file *file, const struct iovec *iov,
struct iov_iter it;
struct inode *inode;
struct cifsFileInfo *open_file;
- struct cifsTconInfo *pTcon;
+ struct cifs_tcon *pTcon;
struct cifs_sb_info *cifs_sb;
+ struct cifs_io_parms io_parms;
int xid, rc;
+ __u32 pid;
len = iov_length(iov, nr_segs);
if (!len)
@@ -1590,6 +1562,12 @@ cifs_iovec_write(struct file *file, const struct iovec *iov,
xid = GetXid();
open_file = file->private_data;
+
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
+ pid = open_file->pid;
+ else
+ pid = current->tgid;
+
pTcon = tlink_tcon(open_file->tlink);
inode = file->f_path.dentry->d_inode;
@@ -1616,9 +1594,13 @@ cifs_iovec_write(struct file *file, const struct iovec *iov,
if (rc != 0)
break;
}
- rc = CIFSSMBWrite2(xid, pTcon, open_file->netfid,
- cur_len, *poffset, &written,
- to_send, npages, 0);
+ io_parms.netfid = open_file->netfid;
+ io_parms.pid = pid;
+ io_parms.tcon = pTcon;
+ io_parms.offset = *poffset;
+ io_parms.length = cur_len;
+ rc = CIFSSMBWrite2(xid, &io_parms, &written, to_send,
+ npages, 0);
} while (rc == -EAGAIN);
for (i = 0; i < npages; i++)
@@ -1711,10 +1693,12 @@ cifs_iovec_read(struct file *file, const struct iovec *iov,
size_t len, cur_len;
int iov_offset = 0;
struct cifs_sb_info *cifs_sb;
- struct cifsTconInfo *pTcon;
+ struct cifs_tcon *pTcon;
struct cifsFileInfo *open_file;
struct smb_com_read_rsp *pSMBr;
+ struct cifs_io_parms io_parms;
char *read_data;
+ __u32 pid;
if (!nr_segs)
return 0;
@@ -1729,6 +1713,11 @@ cifs_iovec_read(struct file *file, const struct iovec *iov,
open_file = file->private_data;
pTcon = tlink_tcon(open_file->tlink);
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
+ pid = open_file->pid;
+ else
+ pid = current->tgid;
+
if ((file->f_flags & O_ACCMODE) == O_WRONLY)
cFYI(1, "attempting read on write only file instance");
@@ -1744,8 +1733,12 @@ cifs_iovec_read(struct file *file, const struct iovec *iov,
if (rc != 0)
break;
}
- rc = CIFSSMBRead(xid, pTcon, open_file->netfid,
- cur_len, *poffset, &bytes_read,
+ io_parms.netfid = open_file->netfid;
+ io_parms.pid = pid;
+ io_parms.tcon = pTcon;
+ io_parms.offset = *poffset;
+ io_parms.length = len;
+ rc = CIFSSMBRead(xid, &io_parms, &bytes_read,
&read_data, &buf_type);
pSMBr = (struct smb_com_read_rsp *)read_data;
if (read_data) {
@@ -1822,11 +1815,13 @@ static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
unsigned int total_read;
unsigned int current_read_size;
struct cifs_sb_info *cifs_sb;
- struct cifsTconInfo *pTcon;
+ struct cifs_tcon *pTcon;
int xid;
char *current_offset;
struct cifsFileInfo *open_file;
+ struct cifs_io_parms io_parms;
int buf_type = CIFS_NO_BUFFER;
+ __u32 pid;
xid = GetXid();
cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
@@ -1839,6 +1834,11 @@ static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
open_file = file->private_data;
pTcon = tlink_tcon(open_file->tlink);
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
+ pid = open_file->pid;
+ else
+ pid = current->tgid;
+
if ((file->f_flags & O_ACCMODE) == O_WRONLY)
cFYI(1, "attempting read on write only file instance");
@@ -1861,11 +1861,13 @@ static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
if (rc != 0)
break;
}
- rc = CIFSSMBRead(xid, pTcon,
- open_file->netfid,
- current_read_size, *poffset,
- &bytes_read, &current_offset,
- &buf_type);
+ io_parms.netfid = open_file->netfid;
+ io_parms.pid = pid;
+ io_parms.tcon = pTcon;
+ io_parms.offset = *poffset;
+ io_parms.length = current_read_size;
+ rc = CIFSSMBRead(xid, &io_parms, &bytes_read,
+ &current_offset, &buf_type);
}
if (rc || (bytes_read == 0)) {
if (total_read) {
@@ -1996,13 +1998,15 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
loff_t offset;
struct page *page;
struct cifs_sb_info *cifs_sb;
- struct cifsTconInfo *pTcon;
+ struct cifs_tcon *pTcon;
unsigned int bytes_read = 0;
unsigned int read_size, i;
char *smb_read_data = NULL;
struct smb_com_read_rsp *pSMBr;
struct cifsFileInfo *open_file;
+ struct cifs_io_parms io_parms;
int buf_type = CIFS_NO_BUFFER;
+ __u32 pid;
xid = GetXid();
if (file->private_data == NULL) {
@@ -2024,6 +2028,11 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
goto read_complete;
cFYI(DBG2, "rpages: num pages %d", num_pages);
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
+ pid = open_file->pid;
+ else
+ pid = current->tgid;
+
for (i = 0; i < num_pages; ) {
unsigned contig_pages;
struct page *tmp_page;
@@ -2065,12 +2074,13 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
if (rc != 0)
break;
}
-
- rc = CIFSSMBRead(xid, pTcon,
- open_file->netfid,
- read_size, offset,
- &bytes_read, &smb_read_data,
- &buf_type);
+ io_parms.netfid = open_file->netfid;
+ io_parms.pid = pid;
+ io_parms.tcon = pTcon;
+ io_parms.offset = offset;
+ io_parms.length = read_size;
+ rc = CIFSSMBRead(xid, &io_parms, &bytes_read,
+ &smb_read_data, &buf_type);
/* BB more RC checks ? */
if (rc == -EAGAIN) {
if (smb_read_data) {
diff --git a/fs/cifs/fscache.c b/fs/cifs/fscache.c
index 297a43d0ff7f..d368a47ba5eb 100644
--- a/fs/cifs/fscache.c
+++ b/fs/cifs/fscache.c
@@ -40,7 +40,7 @@ void cifs_fscache_release_client_cookie(struct TCP_Server_Info *server)
server->fscache = NULL;
}
-void cifs_fscache_get_super_cookie(struct cifsTconInfo *tcon)
+void cifs_fscache_get_super_cookie(struct cifs_tcon *tcon)
{
struct TCP_Server_Info *server = tcon->ses->server;
@@ -51,7 +51,7 @@ void cifs_fscache_get_super_cookie(struct cifsTconInfo *tcon)
server->fscache, tcon->fscache);
}
-void cifs_fscache_release_super_cookie(struct cifsTconInfo *tcon)
+void cifs_fscache_release_super_cookie(struct cifs_tcon *tcon)
{
cFYI(1, "CIFS: releasing superblock cookie (0x%p)", tcon->fscache);
fscache_relinquish_cookie(tcon->fscache, 0);
@@ -62,7 +62,7 @@ static void cifs_fscache_enable_inode_cookie(struct inode *inode)
{
struct cifsInodeInfo *cifsi = CIFS_I(inode);
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
- struct cifsTconInfo *tcon = cifs_sb_master_tcon(cifs_sb);
+ struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
if (cifsi->fscache)
return;
diff --git a/fs/cifs/fscache.h b/fs/cifs/fscache.h
index 31b88ec2341e..63539323e0b9 100644
--- a/fs/cifs/fscache.h
+++ b/fs/cifs/fscache.h
@@ -40,8 +40,8 @@ extern void cifs_fscache_unregister(void);
*/
extern void cifs_fscache_get_client_cookie(struct TCP_Server_Info *);
extern void cifs_fscache_release_client_cookie(struct TCP_Server_Info *);
-extern void cifs_fscache_get_super_cookie(struct cifsTconInfo *);
-extern void cifs_fscache_release_super_cookie(struct cifsTconInfo *);
+extern void cifs_fscache_get_super_cookie(struct cifs_tcon *);
+extern void cifs_fscache_release_super_cookie(struct cifs_tcon *);
extern void cifs_fscache_release_inode_cookie(struct inode *);
extern void cifs_fscache_set_inode_cookie(struct inode *, struct file *);
@@ -99,9 +99,9 @@ static inline void
cifs_fscache_get_client_cookie(struct TCP_Server_Info *server) {}
static inline void
cifs_fscache_release_client_cookie(struct TCP_Server_Info *server) {}
-static inline void cifs_fscache_get_super_cookie(struct cifsTconInfo *tcon) {}
+static inline void cifs_fscache_get_super_cookie(struct cifs_tcon *tcon) {}
static inline void
-cifs_fscache_release_super_cookie(struct cifsTconInfo *tcon) {}
+cifs_fscache_release_super_cookie(struct cifs_tcon *tcon) {}
static inline void cifs_fscache_release_inode_cookie(struct inode *inode) {}
static inline void cifs_fscache_set_inode_cookie(struct inode *inode,
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index de02ed5e25c2..9b018c8334fa 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -295,7 +295,7 @@ int cifs_get_file_info_unix(struct file *filp)
struct inode *inode = filp->f_path.dentry->d_inode;
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct cifsFileInfo *cfile = filp->private_data;
- struct cifsTconInfo *tcon = tlink_tcon(cfile->tlink);
+ struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
xid = GetXid();
rc = CIFSSMBUnixQFileInfo(xid, tcon, cfile->netfid, &find_data);
@@ -318,7 +318,7 @@ int cifs_get_inode_info_unix(struct inode **pinode,
int rc;
FILE_UNIX_BASIC_INFO find_data;
struct cifs_fattr fattr;
- struct cifsTconInfo *tcon;
+ struct cifs_tcon *tcon;
struct tcon_link *tlink;
struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
@@ -373,7 +373,8 @@ cifs_sfu_type(struct cifs_fattr *fattr, const unsigned char *path,
int oplock = 0;
__u16 netfid;
struct tcon_link *tlink;
- struct cifsTconInfo *tcon;
+ struct cifs_tcon *tcon;
+ struct cifs_io_parms io_parms;
char buf[24];
unsigned int bytes_read;
char *pbuf;
@@ -405,9 +406,13 @@ cifs_sfu_type(struct cifs_fattr *fattr, const unsigned char *path,
if (rc == 0) {
int buf_type = CIFS_NO_BUFFER;
/* Read header */
- rc = CIFSSMBRead(xid, tcon, netfid,
- 24 /* length */, 0 /* offset */,
- &bytes_read, &pbuf, &buf_type);
+ io_parms.netfid = netfid;
+ io_parms.pid = current->tgid;
+ io_parms.tcon = tcon;
+ io_parms.offset = 0;
+ io_parms.length = 24;
+ rc = CIFSSMBRead(xid, &io_parms, &bytes_read, &pbuf,
+ &buf_type);
if ((rc == 0) && (bytes_read >= 8)) {
if (memcmp("IntxBLK", pbuf, 8) == 0) {
cFYI(1, "Block device");
@@ -468,7 +473,7 @@ static int cifs_sfu_mode(struct cifs_fattr *fattr, const unsigned char *path,
char ea_value[4];
__u32 mode;
struct tcon_link *tlink;
- struct cifsTconInfo *tcon;
+ struct cifs_tcon *tcon;
tlink = cifs_sb_tlink(cifs_sb);
if (IS_ERR(tlink))
@@ -502,7 +507,7 @@ static void
cifs_all_info_to_fattr(struct cifs_fattr *fattr, FILE_ALL_INFO *info,
struct cifs_sb_info *cifs_sb, bool adjust_tz)
{
- struct cifsTconInfo *tcon = cifs_sb_master_tcon(cifs_sb);
+ struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
memset(fattr, 0, sizeof(*fattr));
fattr->cf_cifsattrs = le32_to_cpu(info->Attributes);
@@ -553,7 +558,7 @@ int cifs_get_file_info(struct file *filp)
struct inode *inode = filp->f_path.dentry->d_inode;
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct cifsFileInfo *cfile = filp->private_data;
- struct cifsTconInfo *tcon = tlink_tcon(cfile->tlink);
+ struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
xid = GetXid();
rc = CIFSSMBQFileInfo(xid, tcon, cfile->netfid, &find_data);
@@ -590,7 +595,7 @@ int cifs_get_inode_info(struct inode **pinode,
struct super_block *sb, int xid, const __u16 *pfid)
{
int rc = 0, tmprc;
- struct cifsTconInfo *pTcon;
+ struct cifs_tcon *pTcon;
struct tcon_link *tlink;
struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
char *buf = NULL;
@@ -735,10 +740,10 @@ static const struct inode_operations cifs_ipc_inode_ops = {
.lookup = cifs_lookup,
};
-char *cifs_build_path_to_root(struct cifs_sb_info *cifs_sb,
- struct cifsTconInfo *tcon)
+char *cifs_build_path_to_root(struct smb_vol *vol, struct cifs_sb_info *cifs_sb,
+ struct cifs_tcon *tcon)
{
- int pplen = cifs_sb->prepathlen;
+ int pplen = vol->prepath ? strlen(vol->prepath) : 0;
int dfsplen;
char *full_path = NULL;
@@ -772,7 +777,7 @@ char *cifs_build_path_to_root(struct cifs_sb_info *cifs_sb,
}
}
}
- strncpy(full_path + dfsplen, cifs_sb->prepath, pplen);
+ strncpy(full_path + dfsplen, vol->prepath, pplen);
full_path[dfsplen + pplen] = 0; /* add trailing null */
return full_path;
}
@@ -884,19 +889,13 @@ struct inode *cifs_root_iget(struct super_block *sb)
struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
struct inode *inode = NULL;
long rc;
- char *full_path;
- struct cifsTconInfo *tcon = cifs_sb_master_tcon(cifs_sb);
-
- full_path = cifs_build_path_to_root(cifs_sb, tcon);
- if (full_path == NULL)
- return ERR_PTR(-ENOMEM);
+ struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
xid = GetXid();
if (tcon->unix_ext)
- rc = cifs_get_inode_info_unix(&inode, full_path, sb, xid);
+ rc = cifs_get_inode_info_unix(&inode, "", sb, xid);
else
- rc = cifs_get_inode_info(&inode, full_path, NULL, sb,
- xid, NULL);
+ rc = cifs_get_inode_info(&inode, "", NULL, sb, xid, NULL);
if (!inode) {
inode = ERR_PTR(rc);
@@ -922,7 +921,6 @@ struct inode *cifs_root_iget(struct super_block *sb)
}
out:
- kfree(full_path);
/* can not call macro FreeXid here since in a void func
* TODO: This is no longer true
*/
@@ -943,7 +941,7 @@ cifs_set_file_info(struct inode *inode, struct iattr *attrs, int xid,
struct cifsInodeInfo *cifsInode = CIFS_I(inode);
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct tcon_link *tlink = NULL;
- struct cifsTconInfo *pTcon;
+ struct cifs_tcon *pTcon;
FILE_BASIC_INFO info_buf;
if (attrs == NULL)
@@ -1061,7 +1059,7 @@ cifs_rename_pending_delete(char *full_path, struct dentry *dentry, int xid)
struct cifsInodeInfo *cifsInode = CIFS_I(inode);
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct tcon_link *tlink;
- struct cifsTconInfo *tcon;
+ struct cifs_tcon *tcon;
__u32 dosattr, origattr;
FILE_BASIC_INFO *info_buf = NULL;
@@ -1179,7 +1177,7 @@ int cifs_unlink(struct inode *dir, struct dentry *dentry)
struct super_block *sb = dir->i_sb;
struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
struct tcon_link *tlink;
- struct cifsTconInfo *tcon;
+ struct cifs_tcon *tcon;
struct iattr *attrs = NULL;
__u32 dosattr = 0, origattr = 0;
@@ -1277,7 +1275,7 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, int mode)
int xid;
struct cifs_sb_info *cifs_sb;
struct tcon_link *tlink;
- struct cifsTconInfo *pTcon;
+ struct cifs_tcon *pTcon;
char *full_path = NULL;
struct inode *newinode = NULL;
struct cifs_fattr fattr;
@@ -1455,7 +1453,7 @@ int cifs_rmdir(struct inode *inode, struct dentry *direntry)
int xid;
struct cifs_sb_info *cifs_sb;
struct tcon_link *tlink;
- struct cifsTconInfo *pTcon;
+ struct cifs_tcon *pTcon;
char *full_path = NULL;
struct cifsInodeInfo *cifsInode;
@@ -1512,7 +1510,7 @@ cifs_do_rename(int xid, struct dentry *from_dentry, const char *fromPath,
{
struct cifs_sb_info *cifs_sb = CIFS_SB(from_dentry->d_sb);
struct tcon_link *tlink;
- struct cifsTconInfo *pTcon;
+ struct cifs_tcon *pTcon;
__u16 srcfid;
int oplock, rc;
@@ -1564,7 +1562,7 @@ int cifs_rename(struct inode *source_dir, struct dentry *source_dentry,
char *toName = NULL;
struct cifs_sb_info *cifs_sb;
struct tcon_link *tlink;
- struct cifsTconInfo *tcon;
+ struct cifs_tcon *tcon;
FILE_UNIX_BASIC_INFO *info_buf_source = NULL;
FILE_UNIX_BASIC_INFO *info_buf_target;
int xid, rc, tmprc;
@@ -1794,7 +1792,7 @@ int cifs_getattr(struct vfsmount *mnt, struct dentry *dentry,
struct kstat *stat)
{
struct cifs_sb_info *cifs_sb = CIFS_SB(dentry->d_sb);
- struct cifsTconInfo *tcon = cifs_sb_master_tcon(cifs_sb);
+ struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
struct inode *inode = dentry->d_inode;
int rc;
@@ -1872,7 +1870,8 @@ cifs_set_file_size(struct inode *inode, struct iattr *attrs,
struct cifsInodeInfo *cifsInode = CIFS_I(inode);
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct tcon_link *tlink = NULL;
- struct cifsTconInfo *pTcon = NULL;
+ struct cifs_tcon *pTcon = NULL;
+ struct cifs_io_parms io_parms;
/*
* To avoid spurious oplock breaks from server, in the case of
@@ -1894,8 +1893,14 @@ cifs_set_file_size(struct inode *inode, struct iattr *attrs,
cFYI(1, "SetFSize for attrs rc = %d", rc);
if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
unsigned int bytes_written;
- rc = CIFSSMBWrite(xid, pTcon, nfid, 0, attrs->ia_size,
- &bytes_written, NULL, NULL, 1);
+
+ io_parms.netfid = nfid;
+ io_parms.pid = npid;
+ io_parms.tcon = pTcon;
+ io_parms.offset = 0;
+ io_parms.length = attrs->ia_size;
+ rc = CIFSSMBWrite(xid, &io_parms, &bytes_written,
+ NULL, NULL, 1);
cFYI(1, "Wrt seteof rc %d", rc);
}
} else
@@ -1930,10 +1935,15 @@ cifs_set_file_size(struct inode *inode, struct iattr *attrs,
CIFS_MOUNT_MAP_SPECIAL_CHR);
if (rc == 0) {
unsigned int bytes_written;
- rc = CIFSSMBWrite(xid, pTcon, netfid, 0,
- attrs->ia_size,
- &bytes_written, NULL,
- NULL, 1);
+
+ io_parms.netfid = netfid;
+ io_parms.pid = current->tgid;
+ io_parms.tcon = pTcon;
+ io_parms.offset = 0;
+ io_parms.length = attrs->ia_size;
+ rc = CIFSSMBWrite(xid, &io_parms,
+ &bytes_written,
+ NULL, NULL, 1);
cFYI(1, "wrt seteof rc %d", rc);
CIFSSMBClose(xid, pTcon, netfid);
}
@@ -1961,7 +1971,7 @@ cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs)
struct cifsInodeInfo *cifsInode = CIFS_I(inode);
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct tcon_link *tlink;
- struct cifsTconInfo *pTcon;
+ struct cifs_tcon *pTcon;
struct cifs_unix_set_info_args *args = NULL;
struct cifsFileInfo *open_file;
@@ -2247,7 +2257,7 @@ cifs_setattr(struct dentry *direntry, struct iattr *attrs)
{
struct inode *inode = direntry->d_inode;
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
- struct cifsTconInfo *pTcon = cifs_sb_master_tcon(cifs_sb);
+ struct cifs_tcon *pTcon = cifs_sb_master_tcon(cifs_sb);
if (pTcon->unix_ext)
return cifs_setattr_unix(direntry, attrs);
diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c
index 0c98672d0122..4221b5e48a42 100644
--- a/fs/cifs/ioctl.c
+++ b/fs/cifs/ioctl.c
@@ -38,7 +38,7 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
struct cifs_sb_info *cifs_sb;
#ifdef CONFIG_CIFS_POSIX
struct cifsFileInfo *pSMBFile = filep->private_data;
- struct cifsTconInfo *tcon;
+ struct cifs_tcon *tcon;
__u64 ExtAttrBits = 0;
__u64 ExtAttrMask = 0;
__u64 caps;
diff --git a/fs/cifs/link.c b/fs/cifs/link.c
index ce417a9764a3..556b1a0b54de 100644
--- a/fs/cifs/link.c
+++ b/fs/cifs/link.c
@@ -175,7 +175,7 @@ CIFSFormatMFSymlink(u8 *buf, unsigned int buf_len, const char *link_str)
}
static int
-CIFSCreateMFSymLink(const int xid, struct cifsTconInfo *tcon,
+CIFSCreateMFSymLink(const int xid, struct cifs_tcon *tcon,
const char *fromName, const char *toName,
const struct nls_table *nls_codepage, int remap)
{
@@ -184,6 +184,7 @@ CIFSCreateMFSymLink(const int xid, struct cifsTconInfo *tcon,
__u16 netfid = 0;
u8 *buf;
unsigned int bytes_written = 0;
+ struct cifs_io_parms io_parms;
buf = kmalloc(CIFS_MF_SYMLINK_FILE_SIZE, GFP_KERNEL);
if (!buf)
@@ -203,10 +204,13 @@ CIFSCreateMFSymLink(const int xid, struct cifsTconInfo *tcon,
return rc;
}
- rc = CIFSSMBWrite(xid, tcon, netfid,
- CIFS_MF_SYMLINK_FILE_SIZE /* length */,
- 0 /* offset */,
- &bytes_written, buf, NULL, 0);
+ io_parms.netfid = netfid;
+ io_parms.pid = current->tgid;
+ io_parms.tcon = tcon;
+ io_parms.offset = 0;
+ io_parms.length = CIFS_MF_SYMLINK_FILE_SIZE;
+
+ rc = CIFSSMBWrite(xid, &io_parms, &bytes_written, buf, NULL, 0);
CIFSSMBClose(xid, tcon, netfid);
kfree(buf);
if (rc != 0)
@@ -219,7 +223,7 @@ CIFSCreateMFSymLink(const int xid, struct cifsTconInfo *tcon,
}
static int
-CIFSQueryMFSymLink(const int xid, struct cifsTconInfo *tcon,
+CIFSQueryMFSymLink(const int xid, struct cifs_tcon *tcon,
const unsigned char *searchName, char **symlinkinfo,
const struct nls_table *nls_codepage, int remap)
{
@@ -231,6 +235,7 @@ CIFSQueryMFSymLink(const int xid, struct cifsTconInfo *tcon,
unsigned int bytes_read = 0;
int buf_type = CIFS_NO_BUFFER;
unsigned int link_len = 0;
+ struct cifs_io_parms io_parms;
FILE_ALL_INFO file_info;
rc = CIFSSMBOpen(xid, tcon, searchName, FILE_OPEN, GENERIC_READ,
@@ -249,11 +254,13 @@ CIFSQueryMFSymLink(const int xid, struct cifsTconInfo *tcon,
if (!buf)
return -ENOMEM;
pbuf = buf;
+ io_parms.netfid = netfid;
+ io_parms.pid = current->tgid;
+ io_parms.tcon = tcon;
+ io_parms.offset = 0;
+ io_parms.length = CIFS_MF_SYMLINK_FILE_SIZE;
- rc = CIFSSMBRead(xid, tcon, netfid,
- CIFS_MF_SYMLINK_FILE_SIZE /* length */,
- 0 /* offset */,
- &bytes_read, &pbuf, &buf_type);
+ rc = CIFSSMBRead(xid, &io_parms, &bytes_read, &pbuf, &buf_type);
CIFSSMBClose(xid, tcon, netfid);
if (rc != 0) {
kfree(buf);
@@ -291,7 +298,8 @@ CIFSCheckMFSymlink(struct cifs_fattr *fattr,
int oplock = 0;
__u16 netfid = 0;
struct tcon_link *tlink;
- struct cifsTconInfo *pTcon;
+ struct cifs_tcon *pTcon;
+ struct cifs_io_parms io_parms;
u8 *buf;
char *pbuf;
unsigned int bytes_read = 0;
@@ -328,11 +336,13 @@ CIFSCheckMFSymlink(struct cifs_fattr *fattr,
goto out;
}
pbuf = buf;
+ io_parms.netfid = netfid;
+ io_parms.pid = current->tgid;
+ io_parms.tcon = pTcon;
+ io_parms.offset = 0;
+ io_parms.length = CIFS_MF_SYMLINK_FILE_SIZE;
- rc = CIFSSMBRead(xid, pTcon, netfid,
- CIFS_MF_SYMLINK_FILE_SIZE /* length */,
- 0 /* offset */,
- &bytes_read, &pbuf, &buf_type);
+ rc = CIFSSMBRead(xid, &io_parms, &bytes_read, &pbuf, &buf_type);
CIFSSMBClose(xid, pTcon, netfid);
if (rc != 0) {
kfree(buf);
@@ -370,7 +380,7 @@ cifs_hardlink(struct dentry *old_file, struct inode *inode,
char *toName = NULL;
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct tcon_link *tlink;
- struct cifsTconInfo *pTcon;
+ struct cifs_tcon *pTcon;
struct cifsInodeInfo *cifsInode;
tlink = cifs_sb_tlink(cifs_sb);
@@ -445,7 +455,7 @@ cifs_follow_link(struct dentry *direntry, struct nameidata *nd)
char *target_path = NULL;
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct tcon_link *tlink = NULL;
- struct cifsTconInfo *tcon;
+ struct cifs_tcon *tcon;
xid = GetXid();
@@ -518,7 +528,7 @@ cifs_symlink(struct inode *inode, struct dentry *direntry, const char *symname)
int xid;
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct tcon_link *tlink;
- struct cifsTconInfo *pTcon;
+ struct cifs_tcon *pTcon;
char *full_path = NULL;
struct inode *newinode = NULL;
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index 907531ac5888..03a1f491d39b 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -67,12 +67,12 @@ _FreeXid(unsigned int xid)
spin_unlock(&GlobalMid_Lock);
}
-struct cifsSesInfo *
+struct cifs_ses *
sesInfoAlloc(void)
{
- struct cifsSesInfo *ret_buf;
+ struct cifs_ses *ret_buf;
- ret_buf = kzalloc(sizeof(struct cifsSesInfo), GFP_KERNEL);
+ ret_buf = kzalloc(sizeof(struct cifs_ses), GFP_KERNEL);
if (ret_buf) {
atomic_inc(&sesInfoAllocCount);
ret_buf->status = CifsNew;
@@ -85,7 +85,7 @@ sesInfoAlloc(void)
}
void
-sesInfoFree(struct cifsSesInfo *buf_to_free)
+sesInfoFree(struct cifs_ses *buf_to_free)
{
if (buf_to_free == NULL) {
cFYI(1, "Null buffer passed to sesInfoFree");
@@ -105,11 +105,11 @@ sesInfoFree(struct cifsSesInfo *buf_to_free)
kfree(buf_to_free);
}
-struct cifsTconInfo *
+struct cifs_tcon *
tconInfoAlloc(void)
{
- struct cifsTconInfo *ret_buf;
- ret_buf = kzalloc(sizeof(struct cifsTconInfo), GFP_KERNEL);
+ struct cifs_tcon *ret_buf;
+ ret_buf = kzalloc(sizeof(struct cifs_tcon), GFP_KERNEL);
if (ret_buf) {
atomic_inc(&tconInfoAllocCount);
ret_buf->tidStatus = CifsNew;
@@ -124,7 +124,7 @@ tconInfoAlloc(void)
}
void
-tconInfoFree(struct cifsTconInfo *buf_to_free)
+tconInfoFree(struct cifs_tcon *buf_to_free)
{
if (buf_to_free == NULL) {
cFYI(1, "Null buffer passed to tconInfoFree");
@@ -295,11 +295,11 @@ __u16 GetNextMid(struct TCP_Server_Info *server)
case it is responsbility of caller to set the mid */
void
header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
- const struct cifsTconInfo *treeCon, int word_count
+ const struct cifs_tcon *treeCon, int word_count
/* length of fixed section (word count) in two byte units */)
{
struct list_head *temp_item;
- struct cifsSesInfo *ses;
+ struct cifs_ses *ses;
char *temp = (char *) buffer;
memset(temp, 0, 256); /* bigger than MAX_CIFS_HDR_SIZE */
@@ -359,7 +359,7 @@ header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
"did not match tcon uid");
spin_lock(&cifs_tcp_ses_lock);
list_for_each(temp_item, &treeCon->ses->server->smb_ses_list) {
- ses = list_entry(temp_item, struct cifsSesInfo, smb_ses_list);
+ ses = list_entry(temp_item, struct cifs_ses, smb_ses_list);
if (ses->linux_uid == current_fsuid()) {
if (ses->server == treeCon->ses->server) {
cFYI(1, "found matching uid substitute right smb_uid");
@@ -380,7 +380,7 @@ header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
if (treeCon->nocase)
buffer->Flags |= SMBFLG_CASELESS;
if ((treeCon->ses) && (treeCon->ses->server))
- if (treeCon->ses->server->secMode &
+ if (treeCon->ses->server->sec_mode &
(SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
}
@@ -507,8 +507,8 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv)
{
struct smb_com_lock_req *pSMB = (struct smb_com_lock_req *)buf;
struct list_head *tmp, *tmp1, *tmp2;
- struct cifsSesInfo *ses;
- struct cifsTconInfo *tcon;
+ struct cifs_ses *ses;
+ struct cifs_tcon *tcon;
struct cifsInodeInfo *pCifsInode;
struct cifsFileInfo *netfile;
@@ -566,9 +566,9 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv)
/* look up tcon based on tid & uid */
spin_lock(&cifs_tcp_ses_lock);
list_for_each(tmp, &srv->smb_ses_list) {
- ses = list_entry(tmp, struct cifsSesInfo, smb_ses_list);
+ ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
list_for_each(tmp1, &ses->tcon_list) {
- tcon = list_entry(tmp1, struct cifsTconInfo, tcon_list);
+ tcon = list_entry(tmp1, struct cifs_tcon, tcon_list);
if (tcon->tid != buf->Tid)
continue;
diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c
index 79b71c2c7c9d..73e47e84b61a 100644
--- a/fs/cifs/netmisc.c
+++ b/fs/cifs/netmisc.c
@@ -836,7 +836,7 @@ ntstatus_to_dos(__u32 ntstatus, __u8 *eclass, __u16 *ecode)
}
int
-map_smb_to_linux_error(struct smb_hdr *smb, int logErr)
+map_smb_to_linux_error(struct smb_hdr *smb, bool logErr)
{
unsigned int i;
int rc = -EIO; /* if transport error smb error may not be set */
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index f8e4cd2a7912..6751e745bbc6 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -195,7 +195,7 @@ int get_symlink_reparse_path(char *full_path, struct cifs_sb_info *cifs_sb,
int len;
int oplock = 0;
int rc;
- struct cifsTconInfo *ptcon = cifs_sb_tcon(cifs_sb);
+ struct cifs_tcon *ptcon = cifs_sb_tcon(cifs_sb);
char *tmpbuffer;
rc = CIFSSMBOpen(xid, ptcon, full_path, FILE_OPEN, GENERIC_READ,
@@ -223,7 +223,7 @@ static int initiate_cifs_search(const int xid, struct file *file)
struct cifsFileInfo *cifsFile;
struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
struct tcon_link *tlink = NULL;
- struct cifsTconInfo *pTcon;
+ struct cifs_tcon *pTcon;
if (file->private_data == NULL) {
tlink = cifs_sb_tlink(cifs_sb);
@@ -496,7 +496,7 @@ static int cifs_save_resume_key(const char *current_entry,
assume that they are located in the findfirst return buffer.*/
/* We start counting in the buffer with entry 2 and increment for every
entry (do not increment for . or .. entry) */
-static int find_cifs_entry(const int xid, struct cifsTconInfo *pTcon,
+static int find_cifs_entry(const int xid, struct cifs_tcon *pTcon,
struct file *file, char **ppCurrentEntry, int *num_to_ret)
{
int rc = 0;
@@ -764,7 +764,7 @@ int cifs_readdir(struct file *file, void *direntry, filldir_t filldir)
{
int rc = 0;
int xid, i;
- struct cifsTconInfo *pTcon;
+ struct cifs_tcon *pTcon;
struct cifsFileInfo *cifsFile = NULL;
char *current_entry;
int num_to_fill = 0;
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index 7dd462100378..3892ab817a36 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -37,13 +37,13 @@
* the socket has been reestablished (so we know whether to use vc 0).
* Called while holding the cifs_tcp_ses_lock, so do not block
*/
-static bool is_first_ses_reconnect(struct cifsSesInfo *ses)
+static bool is_first_ses_reconnect(struct cifs_ses *ses)
{
struct list_head *tmp;
- struct cifsSesInfo *tmp_ses;
+ struct cifs_ses *tmp_ses;
list_for_each(tmp, &ses->server->smb_ses_list) {
- tmp_ses = list_entry(tmp, struct cifsSesInfo,
+ tmp_ses = list_entry(tmp, struct cifs_ses,
smb_ses_list);
if (tmp_ses->need_reconnect == false)
return false;
@@ -61,11 +61,11 @@ static bool is_first_ses_reconnect(struct cifsSesInfo *ses)
* any vc but zero (some servers reset the connection on vcnum zero)
*
*/
-static __le16 get_next_vcnum(struct cifsSesInfo *ses)
+static __le16 get_next_vcnum(struct cifs_ses *ses)
{
__u16 vcnum = 0;
struct list_head *tmp;
- struct cifsSesInfo *tmp_ses;
+ struct cifs_ses *tmp_ses;
__u16 max_vcs = ses->server->max_vcs;
__u16 i;
int free_vc_found = 0;
@@ -87,7 +87,7 @@ static __le16 get_next_vcnum(struct cifsSesInfo *ses)
free_vc_found = 1;
list_for_each(tmp, &ses->server->smb_ses_list) {
- tmp_ses = list_entry(tmp, struct cifsSesInfo,
+ tmp_ses = list_entry(tmp, struct cifs_ses,
smb_ses_list);
if (tmp_ses->vcnum == i) {
free_vc_found = 0;
@@ -114,7 +114,7 @@ get_vc_num_exit:
return cpu_to_le16(vcnum);
}
-static __u32 cifs_ssetup_hdr(struct cifsSesInfo *ses, SESSION_SETUP_ANDX *pSMB)
+static __u32 cifs_ssetup_hdr(struct cifs_ses *ses, SESSION_SETUP_ANDX *pSMB)
{
__u32 capabilities = 0;
@@ -136,7 +136,7 @@ static __u32 cifs_ssetup_hdr(struct cifsSesInfo *ses, SESSION_SETUP_ANDX *pSMB)
capabilities = CAP_LARGE_FILES | CAP_NT_SMBS | CAP_LEVEL_II_OPLOCKS |
CAP_LARGE_WRITE_X | CAP_LARGE_READ_X;
- if (ses->server->secMode &
+ if (ses->server->sec_mode &
(SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
pSMB->req.hdr.Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
@@ -181,7 +181,7 @@ unicode_oslm_strings(char **pbcc_area, const struct nls_table *nls_cp)
*pbcc_area = bcc_ptr;
}
-static void unicode_domain_string(char **pbcc_area, struct cifsSesInfo *ses,
+static void unicode_domain_string(char **pbcc_area, struct cifs_ses *ses,
const struct nls_table *nls_cp)
{
char *bcc_ptr = *pbcc_area;
@@ -204,7 +204,7 @@ static void unicode_domain_string(char **pbcc_area, struct cifsSesInfo *ses,
}
-static void unicode_ssetup_strings(char **pbcc_area, struct cifsSesInfo *ses,
+static void unicode_ssetup_strings(char **pbcc_area, struct cifs_ses *ses,
const struct nls_table *nls_cp)
{
char *bcc_ptr = *pbcc_area;
@@ -236,7 +236,7 @@ static void unicode_ssetup_strings(char **pbcc_area, struct cifsSesInfo *ses,
*pbcc_area = bcc_ptr;
}
-static void ascii_ssetup_strings(char **pbcc_area, struct cifsSesInfo *ses,
+static void ascii_ssetup_strings(char **pbcc_area, struct cifs_ses *ses,
const struct nls_table *nls_cp)
{
char *bcc_ptr = *pbcc_area;
@@ -276,7 +276,7 @@ static void ascii_ssetup_strings(char **pbcc_area, struct cifsSesInfo *ses,
}
static void
-decode_unicode_ssetup(char **pbcc_area, int bleft, struct cifsSesInfo *ses,
+decode_unicode_ssetup(char **pbcc_area, int bleft, struct cifs_ses *ses,
const struct nls_table *nls_cp)
{
int len;
@@ -310,7 +310,7 @@ decode_unicode_ssetup(char **pbcc_area, int bleft, struct cifsSesInfo *ses,
}
static int decode_ascii_ssetup(char **pbcc_area, __u16 bleft,
- struct cifsSesInfo *ses,
+ struct cifs_ses *ses,
const struct nls_table *nls_cp)
{
int rc = 0;
@@ -364,7 +364,7 @@ static int decode_ascii_ssetup(char **pbcc_area, __u16 bleft,
}
static int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len,
- struct cifsSesInfo *ses)
+ struct cifs_ses *ses)
{
unsigned int tioffset; /* challenge message target info area */
unsigned int tilen; /* challenge message target info area length */
@@ -411,7 +411,7 @@ static int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len,
/* We do not malloc the blob, it is passed in pbuffer, because
it is fixed size, and small, making this approach cleaner */
static void build_ntlmssp_negotiate_blob(unsigned char *pbuffer,
- struct cifsSesInfo *ses)
+ struct cifs_ses *ses)
{
NEGOTIATE_MESSAGE *sec_blob = (NEGOTIATE_MESSAGE *)pbuffer;
__u32 flags;
@@ -424,7 +424,7 @@ static void build_ntlmssp_negotiate_blob(unsigned char *pbuffer,
flags = NTLMSSP_NEGOTIATE_56 | NTLMSSP_REQUEST_TARGET |
NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE |
NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_NEGOTIATE_EXTENDED_SEC;
- if (ses->server->secMode &
+ if (ses->server->sec_mode &
(SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) {
flags |= NTLMSSP_NEGOTIATE_SIGN;
if (!ses->server->session_estab)
@@ -449,7 +449,7 @@ static void build_ntlmssp_negotiate_blob(unsigned char *pbuffer,
This function returns the length of the data in the blob */
static int build_ntlmssp_auth_blob(unsigned char *pbuffer,
u16 *buflen,
- struct cifsSesInfo *ses,
+ struct cifs_ses *ses,
const struct nls_table *nls_cp)
{
int rc;
@@ -464,10 +464,10 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer,
NTLMSSP_REQUEST_TARGET | NTLMSSP_NEGOTIATE_TARGET_INFO |
NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE |
NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_NEGOTIATE_EXTENDED_SEC;
- if (ses->server->secMode &
+ if (ses->server->sec_mode &
(SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
flags |= NTLMSSP_NEGOTIATE_SIGN;
- if (ses->server->secMode & SECMODE_SIGN_REQUIRED)
+ if (ses->server->sec_mode & SECMODE_SIGN_REQUIRED)
flags |= NTLMSSP_NEGOTIATE_ALWAYS_SIGN;
tmp = pbuffer + sizeof(AUTHENTICATE_MESSAGE);
@@ -551,7 +551,7 @@ setup_ntlmv2_ret:
}
int
-CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses,
+CIFS_SessSetup(unsigned int xid, struct cifs_ses *ses,
const struct nls_table *nls_cp)
{
int rc = 0;
@@ -657,7 +657,7 @@ ssetup_ntlmssp_authenticate:
*/
rc = calc_lanman_hash(ses->password, ses->server->cryptkey,
- ses->server->secMode & SECMODE_PW_ENCRYPT ?
+ ses->server->sec_mode & SECMODE_PW_ENCRYPT ?
true : false, lnm_session_key);
ses->flags |= CIFS_SES_LANMAN;
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index f2513fb8c391..147aa22c3c3a 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -295,7 +295,7 @@ static int wait_for_free_request(struct TCP_Server_Info *server,
return 0;
}
-static int allocate_mid(struct cifsSesInfo *ses, struct smb_hdr *in_buf,
+static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
struct mid_q_entry **ppmidQ)
{
if (ses->server->tcpStatus == CifsExiting) {
@@ -342,22 +342,24 @@ wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
* the result. Caller is responsible for dealing with timeouts.
*/
int
-cifs_call_async(struct TCP_Server_Info *server, struct smb_hdr *in_buf,
- mid_callback_t *callback, void *cbdata)
+cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov,
+ unsigned int nvec, mid_callback_t *callback, void *cbdata,
+ bool ignore_pend)
{
int rc;
struct mid_q_entry *mid;
+ struct smb_hdr *hdr = (struct smb_hdr *)iov[0].iov_base;
- rc = wait_for_free_request(server, CIFS_ASYNC_OP);
+ rc = wait_for_free_request(server, ignore_pend ? CIFS_ASYNC_OP : 0);
if (rc)
return rc;
/* enable signing if server requires it */
- if (server->secMode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
- in_buf->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
+ if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
+ hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
mutex_lock(&server->srv_mutex);
- mid = AllocMidQEntry(in_buf, server);
+ mid = AllocMidQEntry(hdr, server);
if (mid == NULL) {
mutex_unlock(&server->srv_mutex);
return -ENOMEM;
@@ -368,7 +370,7 @@ cifs_call_async(struct TCP_Server_Info *server, struct smb_hdr *in_buf,
list_add_tail(&mid->qhead, &server->pending_mid_q);
spin_unlock(&GlobalMid_Lock);
- rc = cifs_sign_smb(in_buf, server, &mid->sequence_number);
+ rc = cifs_sign_smb2(iov, nvec, server, &mid->sequence_number);
if (rc) {
mutex_unlock(&server->srv_mutex);
goto out_err;
@@ -380,7 +382,7 @@ cifs_call_async(struct TCP_Server_Info *server, struct smb_hdr *in_buf,
#ifdef CONFIG_CIFS_STATS2
atomic_inc(&server->inSend);
#endif
- rc = smb_send(server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
+ rc = smb_sendv(server, iov, nvec);
#ifdef CONFIG_CIFS_STATS2
atomic_dec(&server->inSend);
mid->when_sent = jiffies;
@@ -407,7 +409,7 @@ out_err:
*
*/
int
-SendReceiveNoRsp(const unsigned int xid, struct cifsSesInfo *ses,
+SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
struct smb_hdr *in_buf, int flags)
{
int rc;
@@ -424,7 +426,7 @@ SendReceiveNoRsp(const unsigned int xid, struct cifsSesInfo *ses,
}
static int
-sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
+cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
{
int rc = 0;
@@ -432,28 +434,21 @@ sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
mid->mid, mid->midState);
spin_lock(&GlobalMid_Lock);
- /* ensure that it's no longer on the pending_mid_q */
- list_del_init(&mid->qhead);
-
switch (mid->midState) {
case MID_RESPONSE_RECEIVED:
spin_unlock(&GlobalMid_Lock);
return rc;
- case MID_REQUEST_SUBMITTED:
- /* socket is going down, reject all calls */
- if (server->tcpStatus == CifsExiting) {
- cERROR(1, "%s: canceling mid=%d cmd=0x%x state=%d",
- __func__, mid->mid, mid->command, mid->midState);
- rc = -EHOSTDOWN;
- break;
- }
case MID_RETRY_NEEDED:
rc = -EAGAIN;
break;
case MID_RESPONSE_MALFORMED:
rc = -EIO;
break;
+ case MID_SHUTDOWN:
+ rc = -EHOSTDOWN;
+ break;
default:
+ list_del_init(&mid->qhead);
cERROR(1, "%s: invalid mid state mid=%d state=%d", __func__,
mid->mid, mid->midState);
rc = -EIO;
@@ -502,13 +497,31 @@ send_nt_cancel(struct TCP_Server_Info *server, struct smb_hdr *in_buf,
}
int
-SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
+cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
+ bool log_error)
+{
+ dump_smb(mid->resp_buf,
+ min_t(u32, 92, be32_to_cpu(mid->resp_buf->smb_buf_length)));
+
+ /* convert the length into a more usable form */
+ if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) {
+ /* FIXME: add code to kill session */
+ if (cifs_verify_signature(mid->resp_buf, server,
+ mid->sequence_number + 1) != 0)
+ cERROR(1, "Unexpected SMB signature");
+ }
+
+ /* BB special case reconnect tid and uid here? */
+ return map_smb_to_linux_error(mid->resp_buf, log_error);
+}
+
+int
+SendReceive2(const unsigned int xid, struct cifs_ses *ses,
struct kvec *iov, int n_vec, int *pRespBufType /* ret */,
const int flags)
{
int rc = 0;
int long_op;
- unsigned int receive_len;
struct mid_q_entry *midQ;
struct smb_hdr *in_buf = iov[0].iov_base;
@@ -598,61 +611,31 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
cifs_small_buf_release(in_buf);
- rc = sync_mid_result(midQ, ses->server);
+ rc = cifs_sync_mid_result(midQ, ses->server);
if (rc != 0) {
atomic_dec(&ses->server->inFlight);
wake_up(&ses->server->request_q);
return rc;
}
- receive_len = be32_to_cpu(midQ->resp_buf->smb_buf_length);
-
- if (receive_len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE) {
- cERROR(1, "Frame too large received. Length: %d Xid: %d",
- receive_len, xid);
+ if (!midQ->resp_buf || midQ->midState != MID_RESPONSE_RECEIVED) {
rc = -EIO;
+ cFYI(1, "Bad MID state?");
goto out;
}
- /* rcvd frame is ok */
-
- if (midQ->resp_buf &&
- (midQ->midState == MID_RESPONSE_RECEIVED)) {
-
- iov[0].iov_base = (char *)midQ->resp_buf;
- if (midQ->largeBuf)
- *pRespBufType = CIFS_LARGE_BUFFER;
- else
- *pRespBufType = CIFS_SMALL_BUFFER;
- iov[0].iov_len = receive_len + 4;
-
- dump_smb(midQ->resp_buf, 80);
- /* convert the length into a more usable form */
- if ((receive_len > 24) &&
- (ses->server->secMode & (SECMODE_SIGN_REQUIRED |
- SECMODE_SIGN_ENABLED))) {
- rc = cifs_verify_signature(midQ->resp_buf,
- ses->server,
- midQ->sequence_number+1);
- if (rc) {
- cERROR(1, "Unexpected SMB signature");
- /* BB FIXME add code to kill session */
- }
- }
-
- /* BB special case reconnect tid and uid here? */
- rc = map_smb_to_linux_error(midQ->resp_buf,
- flags & CIFS_LOG_ERROR);
+ iov[0].iov_base = (char *)midQ->resp_buf;
+ iov[0].iov_len = be32_to_cpu(midQ->resp_buf->smb_buf_length) + 4;
+ if (midQ->largeBuf)
+ *pRespBufType = CIFS_LARGE_BUFFER;
+ else
+ *pRespBufType = CIFS_SMALL_BUFFER;
- if ((flags & CIFS_NO_RESP) == 0)
- midQ->resp_buf = NULL; /* mark it so buf will
- not be freed by
- delete_mid */
- } else {
- rc = -EIO;
- cFYI(1, "Bad MID state?");
- }
+ rc = cifs_check_receive(midQ, ses->server, flags & CIFS_LOG_ERROR);
+ /* mark it so buf will not be freed by delete_mid */
+ if ((flags & CIFS_NO_RESP) == 0)
+ midQ->resp_buf = NULL;
out:
delete_mid(midQ);
atomic_dec(&ses->server->inFlight);
@@ -662,12 +645,11 @@ out:
}
int
-SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
+SendReceive(const unsigned int xid, struct cifs_ses *ses,
struct smb_hdr *in_buf, struct smb_hdr *out_buf,
int *pbytes_returned, const int long_op)
{
int rc = 0;
- unsigned int receive_len;
struct mid_q_entry *midQ;
if (ses == NULL) {
@@ -750,54 +732,23 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
spin_unlock(&GlobalMid_Lock);
}
- rc = sync_mid_result(midQ, ses->server);
+ rc = cifs_sync_mid_result(midQ, ses->server);
if (rc != 0) {
atomic_dec(&ses->server->inFlight);
wake_up(&ses->server->request_q);
return rc;
}
- receive_len = be32_to_cpu(midQ->resp_buf->smb_buf_length);
-
- if (receive_len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE) {
- cERROR(1, "Frame too large received. Length: %d Xid: %d",
- receive_len, xid);
- rc = -EIO;
- goto out;
- }
-
- /* rcvd frame is ok */
-
- if (midQ->resp_buf && out_buf
- && (midQ->midState == MID_RESPONSE_RECEIVED)) {
- out_buf->smb_buf_length = cpu_to_be32(receive_len);
- memcpy((char *)out_buf + 4,
- (char *)midQ->resp_buf + 4,
- receive_len);
-
- dump_smb(out_buf, 92);
- /* convert the length into a more usable form */
- if ((receive_len > 24) &&
- (ses->server->secMode & (SECMODE_SIGN_REQUIRED |
- SECMODE_SIGN_ENABLED))) {
- rc = cifs_verify_signature(out_buf,
- ses->server,
- midQ->sequence_number+1);
- if (rc) {
- cERROR(1, "Unexpected SMB signature");
- /* BB FIXME add code to kill session */
- }
- }
-
- *pbytes_returned = be32_to_cpu(out_buf->smb_buf_length);
-
- /* BB special case reconnect tid and uid here? */
- rc = map_smb_to_linux_error(out_buf, 0 /* no log */ );
- } else {
+ if (!midQ->resp_buf || !out_buf ||
+ midQ->midState != MID_RESPONSE_RECEIVED) {
rc = -EIO;
cERROR(1, "Bad MID state?");
+ goto out;
}
+ *pbytes_returned = be32_to_cpu(midQ->resp_buf->smb_buf_length);
+ memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
+ rc = cifs_check_receive(midQ, ses->server, 0);
out:
delete_mid(midQ);
atomic_dec(&ses->server->inFlight);
@@ -810,12 +761,12 @@ out:
blocking lock to return. */
static int
-send_lock_cancel(const unsigned int xid, struct cifsTconInfo *tcon,
+send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
struct smb_hdr *in_buf,
struct smb_hdr *out_buf)
{
int bytes_returned;
- struct cifsSesInfo *ses = tcon->ses;
+ struct cifs_ses *ses = tcon->ses;
LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
/* We just modify the current in_buf to change
@@ -832,15 +783,14 @@ send_lock_cancel(const unsigned int xid, struct cifsTconInfo *tcon,
}
int
-SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon,
+SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
struct smb_hdr *in_buf, struct smb_hdr *out_buf,
int *pbytes_returned)
{
int rc = 0;
int rstart = 0;
- unsigned int receive_len;
struct mid_q_entry *midQ;
- struct cifsSesInfo *ses;
+ struct cifs_ses *ses;
if (tcon == NULL || tcon->ses == NULL) {
cERROR(1, "Null smb session");
@@ -957,50 +907,20 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon,
rstart = 1;
}
- rc = sync_mid_result(midQ, ses->server);
+ rc = cifs_sync_mid_result(midQ, ses->server);
if (rc != 0)
return rc;
- receive_len = be32_to_cpu(midQ->resp_buf->smb_buf_length);
- if (receive_len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE) {
- cERROR(1, "Frame too large received. Length: %d Xid: %d",
- receive_len, xid);
- rc = -EIO;
- goto out;
- }
-
/* rcvd frame is ok */
-
- if ((out_buf == NULL) || (midQ->midState != MID_RESPONSE_RECEIVED)) {
+ if (out_buf == NULL || midQ->midState != MID_RESPONSE_RECEIVED) {
rc = -EIO;
cERROR(1, "Bad MID state?");
goto out;
}
- out_buf->smb_buf_length = cpu_to_be32(receive_len);
- memcpy((char *)out_buf + 4,
- (char *)midQ->resp_buf + 4,
- receive_len);
-
- dump_smb(out_buf, 92);
- /* convert the length into a more usable form */
- if ((receive_len > 24) &&
- (ses->server->secMode & (SECMODE_SIGN_REQUIRED |
- SECMODE_SIGN_ENABLED))) {
- rc = cifs_verify_signature(out_buf,
- ses->server,
- midQ->sequence_number+1);
- if (rc) {
- cERROR(1, "Unexpected SMB signature");
- /* BB FIXME add code to kill session */
- }
- }
-
- *pbytes_returned = be32_to_cpu(out_buf->smb_buf_length);
-
- /* BB special case reconnect tid and uid here? */
- rc = map_smb_to_linux_error(out_buf, 0 /* no log */ );
-
+ *pbytes_returned = be32_to_cpu(midQ->resp_buf->smb_buf_length);
+ memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
+ rc = cifs_check_receive(midQ, ses->server, 0);
out:
delete_mid(midQ);
if (rstart && rc == -EACCES)
diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c
index 912995e013ec..2a22fb2989e4 100644
--- a/fs/cifs/xattr.c
+++ b/fs/cifs/xattr.c
@@ -49,7 +49,7 @@ int cifs_removexattr(struct dentry *direntry, const char *ea_name)
int xid;
struct cifs_sb_info *cifs_sb;
struct tcon_link *tlink;
- struct cifsTconInfo *pTcon;
+ struct cifs_tcon *pTcon;
struct super_block *sb;
char *full_path = NULL;
@@ -109,7 +109,7 @@ int cifs_setxattr(struct dentry *direntry, const char *ea_name,
int xid;
struct cifs_sb_info *cifs_sb;
struct tcon_link *tlink;
- struct cifsTconInfo *pTcon;
+ struct cifs_tcon *pTcon;
struct super_block *sb;
char *full_path;
struct cifs_ntsd *pacl;
@@ -240,7 +240,7 @@ ssize_t cifs_getxattr(struct dentry *direntry, const char *ea_name,
int xid;
struct cifs_sb_info *cifs_sb;
struct tcon_link *tlink;
- struct cifsTconInfo *pTcon;
+ struct cifs_tcon *pTcon;
struct super_block *sb;
char *full_path;
@@ -372,7 +372,7 @@ ssize_t cifs_listxattr(struct dentry *direntry, char *data, size_t buf_size)
int xid;
struct cifs_sb_info *cifs_sb;
struct tcon_link *tlink;
- struct cifsTconInfo *pTcon;
+ struct cifs_tcon *pTcon;
struct super_block *sb;
char *full_path;
diff --git a/fs/coda/dir.c b/fs/coda/dir.c
index a46126fd5735..2b8dae4d121e 100644
--- a/fs/coda/dir.c
+++ b/fs/coda/dir.c
@@ -336,8 +336,6 @@ static int coda_rmdir(struct inode *dir, struct dentry *de)
int len = de->d_name.len;
int error;
- dentry_unhash(de);
-
error = venus_rmdir(dir->i_sb, coda_i2f(dir), name, len);
if (!error) {
/* VFS may delete the child */
@@ -361,9 +359,6 @@ static int coda_rename(struct inode *old_dir, struct dentry *old_dentry,
int new_length = new_dentry->d_name.len;
int error;
- if (new_dentry->d_inode && S_ISDIR(new_dentry->d_inode->i_mode))
- dentry_unhash(new_dentry);
-
error = venus_rename(old_dir->i_sb, coda_i2f(old_dir),
coda_i2f(new_dir), old_length, new_length,
(const char *) old_name, (const char *)new_name);
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
index 9d17d350abc5..9a37a9b6de3a 100644
--- a/fs/configfs/dir.c
+++ b/fs/configfs/dir.c
@@ -1359,8 +1359,6 @@ static int configfs_rmdir(struct inode *dir, struct dentry *dentry)
struct module *subsys_owner = NULL, *dead_item_owner = NULL;
int ret;
- dentry_unhash(dentry);
-
if (dentry->d_parent == configfs_sb->s_root)
return -EPERM;
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index 227b409b8406..94ab3c06317a 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -521,14 +521,14 @@ static int ecryptfs_rmdir(struct inode *dir, struct dentry *dentry)
struct dentry *lower_dir_dentry;
int rc;
- dentry_unhash(dentry);
-
lower_dentry = ecryptfs_dentry_to_lower(dentry);
dget(dentry);
lower_dir_dentry = lock_parent(lower_dentry);
dget(lower_dentry);
rc = vfs_rmdir(lower_dir_dentry->d_inode, lower_dentry);
dput(lower_dentry);
+ if (!rc && dentry->d_inode)
+ clear_nlink(dentry->d_inode);
fsstack_copy_attr_times(dir, lower_dir_dentry->d_inode);
dir->i_nlink = lower_dir_dentry->d_inode->i_nlink;
unlock_dir(lower_dir_dentry);
@@ -573,9 +573,6 @@ ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct dentry *lower_new_dir_dentry;
struct dentry *trap = NULL;
- if (new_dentry->d_inode && S_ISDIR(new_dentry->d_inode->i_mode))
- dentry_unhash(new_dentry);
-
lower_old_dentry = ecryptfs_dentry_to_lower(old_dentry);
lower_new_dentry = ecryptfs_dentry_to_lower(new_dentry);
dget(lower_old_dentry);
diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
index 03e609c45012..27a7fefb83eb 100644
--- a/fs/ecryptfs/keystore.c
+++ b/fs/ecryptfs/keystore.c
@@ -599,8 +599,8 @@ struct ecryptfs_write_tag_70_packet_silly_stack {
struct mutex *tfm_mutex;
char *block_aligned_filename;
struct ecryptfs_auth_tok *auth_tok;
- struct scatterlist src_sg;
- struct scatterlist dst_sg;
+ struct scatterlist src_sg[2];
+ struct scatterlist dst_sg[2];
struct blkcipher_desc desc;
char iv[ECRYPTFS_MAX_IV_BYTES];
char hash[ECRYPTFS_TAG_70_DIGEST_SIZE];
@@ -816,23 +816,21 @@ ecryptfs_write_tag_70_packet(char *dest, size_t *remaining_bytes,
memcpy(&s->block_aligned_filename[s->num_rand_bytes], filename,
filename_size);
rc = virt_to_scatterlist(s->block_aligned_filename,
- s->block_aligned_filename_size, &s->src_sg, 1);
- if (rc != 1) {
+ s->block_aligned_filename_size, s->src_sg, 2);
+ if (rc < 1) {
printk(KERN_ERR "%s: Internal error whilst attempting to "
- "convert filename memory to scatterlist; "
- "expected rc = 1; got rc = [%d]. "
+ "convert filename memory to scatterlist; rc = [%d]. "
"block_aligned_filename_size = [%zd]\n", __func__, rc,
s->block_aligned_filename_size);
goto out_release_free_unlock;
}
rc = virt_to_scatterlist(&dest[s->i], s->block_aligned_filename_size,
- &s->dst_sg, 1);
- if (rc != 1) {
+ s->dst_sg, 2);
+ if (rc < 1) {
printk(KERN_ERR "%s: Internal error whilst attempting to "
"convert encrypted filename memory to scatterlist; "
- "expected rc = 1; got rc = [%d]. "
- "block_aligned_filename_size = [%zd]\n", __func__, rc,
- s->block_aligned_filename_size);
+ "rc = [%d]. block_aligned_filename_size = [%zd]\n",
+ __func__, rc, s->block_aligned_filename_size);
goto out_release_free_unlock;
}
/* The characters in the first block effectively do the job
@@ -855,7 +853,7 @@ ecryptfs_write_tag_70_packet(char *dest, size_t *remaining_bytes,
mount_crypt_stat->global_default_fn_cipher_key_bytes);
goto out_release_free_unlock;
}
- rc = crypto_blkcipher_encrypt_iv(&s->desc, &s->dst_sg, &s->src_sg,
+ rc = crypto_blkcipher_encrypt_iv(&s->desc, s->dst_sg, s->src_sg,
s->block_aligned_filename_size);
if (rc) {
printk(KERN_ERR "%s: Error attempting to encrypt filename; "
@@ -891,8 +889,8 @@ struct ecryptfs_parse_tag_70_packet_silly_stack {
struct mutex *tfm_mutex;
char *decrypted_filename;
struct ecryptfs_auth_tok *auth_tok;
- struct scatterlist src_sg;
- struct scatterlist dst_sg;
+ struct scatterlist src_sg[2];
+ struct scatterlist dst_sg[2];
struct blkcipher_desc desc;
char fnek_sig_hex[ECRYPTFS_SIG_SIZE_HEX + 1];
char iv[ECRYPTFS_MAX_IV_BYTES];
@@ -1008,13 +1006,12 @@ ecryptfs_parse_tag_70_packet(char **filename, size_t *filename_size,
}
mutex_lock(s->tfm_mutex);
rc = virt_to_scatterlist(&data[(*packet_size)],
- s->block_aligned_filename_size, &s->src_sg, 1);
- if (rc != 1) {
+ s->block_aligned_filename_size, s->src_sg, 2);
+ if (rc < 1) {
printk(KERN_ERR "%s: Internal error whilst attempting to "
"convert encrypted filename memory to scatterlist; "
- "expected rc = 1; got rc = [%d]. "
- "block_aligned_filename_size = [%zd]\n", __func__, rc,
- s->block_aligned_filename_size);
+ "rc = [%d]. block_aligned_filename_size = [%zd]\n",
+ __func__, rc, s->block_aligned_filename_size);
goto out_unlock;
}
(*packet_size) += s->block_aligned_filename_size;
@@ -1028,13 +1025,12 @@ ecryptfs_parse_tag_70_packet(char **filename, size_t *filename_size,
goto out_unlock;
}
rc = virt_to_scatterlist(s->decrypted_filename,
- s->block_aligned_filename_size, &s->dst_sg, 1);
- if (rc != 1) {
+ s->block_aligned_filename_size, s->dst_sg, 2);
+ if (rc < 1) {
printk(KERN_ERR "%s: Internal error whilst attempting to "
"convert decrypted filename memory to scatterlist; "
- "expected rc = 1; got rc = [%d]. "
- "block_aligned_filename_size = [%zd]\n", __func__, rc,
- s->block_aligned_filename_size);
+ "rc = [%d]. block_aligned_filename_size = [%zd]\n",
+ __func__, rc, s->block_aligned_filename_size);
goto out_free_unlock;
}
/* The characters in the first block effectively do the job of
@@ -1065,7 +1061,7 @@ ecryptfs_parse_tag_70_packet(char **filename, size_t *filename_size,
mount_crypt_stat->global_default_fn_cipher_key_bytes);
goto out_free_unlock;
}
- rc = crypto_blkcipher_decrypt_iv(&s->desc, &s->dst_sg, &s->src_sg,
+ rc = crypto_blkcipher_decrypt_iv(&s->desc, s->dst_sg, s->src_sg,
s->block_aligned_filename_size);
if (rc) {
printk(KERN_ERR "%s: Error attempting to decrypt filename; "
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index 68b2e43d7c35..3451d23c3bae 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -3392,7 +3392,7 @@ int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode)
* so would cause a commit on atime updates, which we don't bother doing.
* We handle synchronous inodes at the highest possible level.
*/
-void ext3_dirty_inode(struct inode *inode)
+void ext3_dirty_inode(struct inode *inode, int flags)
{
handle_t *current_handle = ext3_journal_current_handle();
handle_t *handle;
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index a74b89c09f90..1921392cd708 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -1813,7 +1813,7 @@ extern int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry,
extern void ext4_evict_inode(struct inode *);
extern void ext4_clear_inode(struct inode *);
extern int ext4_sync_inode(handle_t *, struct inode *);
-extern void ext4_dirty_inode(struct inode *);
+extern void ext4_dirty_inode(struct inode *, int);
extern int ext4_change_inode_journal_flag(struct inode *, int);
extern int ext4_get_inode_loc(struct inode *, struct ext4_iloc *);
extern int ext4_can_truncate(struct inode *inode);
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 50d0e9c64584..a5763e3505ba 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -5733,7 +5733,7 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
* so would cause a commit on atime updates, which we don't bother doing.
* We handle synchronous inodes at the highest possible level.
*/
-void ext4_dirty_inode(struct inode *inode)
+void ext4_dirty_inode(struct inode *inode, int flags)
{
handle_t *handle;
diff --git a/fs/fat/namei_msdos.c b/fs/fat/namei_msdos.c
index be15437c272e..3b222dafd15b 100644
--- a/fs/fat/namei_msdos.c
+++ b/fs/fat/namei_msdos.c
@@ -326,8 +326,6 @@ static int msdos_rmdir(struct inode *dir, struct dentry *dentry)
struct fat_slot_info sinfo;
int err;
- dentry_unhash(dentry);
-
lock_super(sb);
/*
* Check whether the directory is not in use, then check
@@ -459,9 +457,6 @@ static int do_msdos_rename(struct inode *old_dir, unsigned char *old_name,
old_inode = old_dentry->d_inode;
new_inode = new_dentry->d_inode;
- if (new_inode && S_ISDIR(new_inode->i_mode))
- dentry_unhash(new_dentry);
-
err = fat_scan(old_dir, old_name, &old_sinfo);
if (err) {
err = -EIO;
diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c
index c61a6789f36c..20b4ea53fdc4 100644
--- a/fs/fat/namei_vfat.c
+++ b/fs/fat/namei_vfat.c
@@ -824,8 +824,6 @@ static int vfat_rmdir(struct inode *dir, struct dentry *dentry)
struct fat_slot_info sinfo;
int err;
- dentry_unhash(dentry);
-
lock_super(sb);
err = fat_dir_empty(inode);
@@ -933,9 +931,6 @@ static int vfat_rename(struct inode *old_dir, struct dentry *old_dentry,
int err, is_dir, update_dotdot, corrupt = 0;
struct super_block *sb = old_dir->i_sb;
- if (new_dentry->d_inode && S_ISDIR(new_dentry->d_inode->i_mode))
- dentry_unhash(new_dentry);
-
old_sinfo.bh = sinfo.bh = dotdot_bh = NULL;
old_inode = old_dentry->d_inode;
new_inode = new_dentry->d_inode;
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 34591ee804b5..0f015a0468de 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -1007,9 +1007,6 @@ static noinline void block_dump___mark_inode_dirty(struct inode *inode)
* In short, make sure you hash any inodes _before_ you start marking
* them dirty.
*
- * This function *must* be atomic for the I_DIRTY_PAGES case -
- * set_page_dirty() is called under spinlock in several places.
- *
* Note that for blockdevs, inode->dirtied_when represents the dirtying time of
* the block-special inode (/dev/hda1) itself. And the ->dirtied_when field of
* the kernel-internal blockdev inode represents the dirtying time of the
@@ -1028,7 +1025,7 @@ void __mark_inode_dirty(struct inode *inode, int flags)
*/
if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
if (sb->s_op->dirty_inode)
- sb->s_op->dirty_inode(inode);
+ sb->s_op->dirty_inode(inode, flags);
}
/*
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 0d0e3faddcfa..d50160714595 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -667,8 +667,6 @@ static int fuse_rmdir(struct inode *dir, struct dentry *entry)
if (IS_ERR(req))
return PTR_ERR(req);
- dentry_unhash(entry);
-
req->in.h.opcode = FUSE_RMDIR;
req->in.h.nodeid = get_node_id(dir);
req->in.numargs = 1;
@@ -694,9 +692,6 @@ static int fuse_rename(struct inode *olddir, struct dentry *oldent,
struct fuse_conn *fc = get_fuse_conn(olddir);
struct fuse_req *req = fuse_get_req(fc);
- if (newent->d_inode && S_ISDIR(newent->d_inode->i_mode))
- dentry_unhash(newent);
-
if (IS_ERR(req))
return PTR_ERR(req);
diff --git a/fs/hfs/dir.c b/fs/hfs/dir.c
index 1cb70cdba2c1..b4d70b13be92 100644
--- a/fs/hfs/dir.c
+++ b/fs/hfs/dir.c
@@ -253,9 +253,6 @@ static int hfs_remove(struct inode *dir, struct dentry *dentry)
struct inode *inode = dentry->d_inode;
int res;
- if (S_ISDIR(inode->i_mode))
- dentry_unhash(dentry);
-
if (S_ISDIR(inode->i_mode) && inode->i_size != 2)
return -ENOTEMPTY;
res = hfs_cat_delete(inode->i_ino, dir, &dentry->d_name);
@@ -286,9 +283,6 @@ static int hfs_rename(struct inode *old_dir, struct dentry *old_dentry,
/* Unlink destination if it already exists */
if (new_dentry->d_inode) {
- if (S_ISDIR(new_dentry->d_inode->i_mode))
- dentry_unhash(new_dentry);
-
res = hfs_remove(new_dir, new_dentry);
if (res)
return res;
diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c
index b28835091dd0..4df5059c25da 100644
--- a/fs/hfsplus/dir.c
+++ b/fs/hfsplus/dir.c
@@ -370,8 +370,6 @@ static int hfsplus_rmdir(struct inode *dir, struct dentry *dentry)
struct inode *inode = dentry->d_inode;
int res;
- dentry_unhash(dentry);
-
if (inode->i_size != 2)
return -ENOTEMPTY;
@@ -469,12 +467,10 @@ static int hfsplus_rename(struct inode *old_dir, struct dentry *old_dentry,
/* Unlink destination if it already exists */
if (new_dentry->d_inode) {
- if (S_ISDIR(new_dentry->d_inode->i_mode)) {
- dentry_unhash(new_dentry);
+ if (S_ISDIR(new_dentry->d_inode->i_mode))
res = hfsplus_rmdir(new_dir, new_dentry);
- } else {
+ else
res = hfsplus_unlink(new_dir, new_dentry);
- }
if (res)
return res;
}
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index e6816b9e6903..2638c834ed28 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -683,8 +683,6 @@ int hostfs_rmdir(struct inode *ino, struct dentry *dentry)
char *file;
int err;
- dentry_unhash(dentry);
-
if ((file = dentry_name(dentry)) == NULL)
return -ENOMEM;
err = do_rmdir(file);
@@ -738,9 +736,6 @@ int hostfs_rename(struct inode *from_ino, struct dentry *from,
char *from_name, *to_name;
int err;
- if (to->d_inode && S_ISDIR(to->d_inode->i_mode))
- dentry_unhash(to);
-
if ((from_name = dentry_name(from)) == NULL)
return -ENOMEM;
if ((to_name = dentry_name(to)) == NULL) {
diff --git a/fs/hpfs/namei.c b/fs/hpfs/namei.c
index ff0ce21c0867..acf95dab2aac 100644
--- a/fs/hpfs/namei.c
+++ b/fs/hpfs/namei.c
@@ -439,8 +439,6 @@ static int hpfs_rmdir(struct inode *dir, struct dentry *dentry)
int err;
int r;
- dentry_unhash(dentry);
-
hpfs_adjust_length(name, &len);
hpfs_lock(dir->i_sb);
err = -ENOENT;
@@ -535,9 +533,6 @@ static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct fnode *fnode;
int err;
- if (new_inode && S_ISDIR(new_inode->i_mode))
- dentry_unhash(new_dentry);
-
if ((err = hpfs_chk_name(new_name, &new_len))) return err;
err = 0;
hpfs_adjust_length(old_name, &old_len);
diff --git a/fs/inode.c b/fs/inode.c
index 990d284877a1..0f7e88a7803f 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -1,9 +1,7 @@
/*
- * linux/fs/inode.c
- *
* (C) 1997 Linus Torvalds
+ * (C) 1999 Andrea Arcangeli <andrea@suse.de> (dynamic inode allocation)
*/
-
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/dcache.h>
@@ -27,10 +25,11 @@
#include <linux/prefetch.h>
#include <linux/ima.h>
#include <linux/cred.h>
+#include <linux/buffer_head.h> /* for inode_has_buffers */
#include "internal.h"
/*
- * inode locking rules.
+ * Inode locking rules:
*
* inode->i_lock protects:
* inode->i_state, inode->i_hash, __iget()
@@ -60,54 +59,11 @@
* inode_hash_lock
*/
-/*
- * This is needed for the following functions:
- * - inode_has_buffers
- * - invalidate_bdev
- *
- * FIXME: remove all knowledge of the buffer layer from this file
- */
-#include <linux/buffer_head.h>
-
-/*
- * New inode.c implementation.
- *
- * This implementation has the basic premise of trying
- * to be extremely low-overhead and SMP-safe, yet be
- * simple enough to be "obviously correct".
- *
- * Famous last words.
- */
-
-/* inode dynamic allocation 1999, Andrea Arcangeli <andrea@suse.de> */
-
-/* #define INODE_PARANOIA 1 */
-/* #define INODE_DEBUG 1 */
-
-/*
- * Inode lookup is no longer as critical as it used to be:
- * most of the lookups are going to be through the dcache.
- */
-#define I_HASHBITS i_hash_shift
-#define I_HASHMASK i_hash_mask
-
static unsigned int i_hash_mask __read_mostly;
static unsigned int i_hash_shift __read_mostly;
static struct hlist_head *inode_hashtable __read_mostly;
static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock);
-/*
- * Each inode can be on two separate lists. One is
- * the hash list of the inode, used for lookups. The
- * other linked list is the "type" list:
- * "in_use" - valid inode, i_count > 0, i_nlink > 0
- * "dirty" - as "in_use" but also dirty
- * "unused" - valid inode, i_count = 0
- *
- * A "dirty" list is maintained for each super block,
- * allowing for low-overhead inode sync() operations.
- */
-
static LIST_HEAD(inode_lru);
static DEFINE_SPINLOCK(inode_lru_lock);
@@ -424,8 +380,8 @@ static unsigned long hash(struct super_block *sb, unsigned long hashval)
tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) /
L1_CACHE_BYTES;
- tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> I_HASHBITS);
- return tmp & I_HASHMASK;
+ tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> i_hash_shift);
+ return tmp & i_hash_mask;
}
/**
diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c
index 05f73328b28b..4bca6a2e5c07 100644
--- a/fs/jffs2/dir.c
+++ b/fs/jffs2/dir.c
@@ -75,7 +75,6 @@ static struct dentry *jffs2_lookup(struct inode *dir_i, struct dentry *target,
struct nameidata *nd)
{
struct jffs2_inode_info *dir_f;
- struct jffs2_sb_info *c;
struct jffs2_full_dirent *fd = NULL, *fd_list;
uint32_t ino = 0;
struct inode *inode = NULL;
@@ -86,7 +85,6 @@ static struct dentry *jffs2_lookup(struct inode *dir_i, struct dentry *target,
return ERR_PTR(-ENAMETOOLONG);
dir_f = JFFS2_INODE_INFO(dir_i);
- c = JFFS2_SB_INFO(dir_i->i_sb);
mutex_lock(&dir_f->sem);
@@ -119,7 +117,6 @@ static struct dentry *jffs2_lookup(struct inode *dir_i, struct dentry *target,
static int jffs2_readdir(struct file *filp, void *dirent, filldir_t filldir)
{
struct jffs2_inode_info *f;
- struct jffs2_sb_info *c;
struct inode *inode = filp->f_path.dentry->d_inode;
struct jffs2_full_dirent *fd;
unsigned long offset, curofs;
@@ -127,7 +124,6 @@ static int jffs2_readdir(struct file *filp, void *dirent, filldir_t filldir)
D1(printk(KERN_DEBUG "jffs2_readdir() for dir_i #%lu\n", filp->f_path.dentry->d_inode->i_ino));
f = JFFS2_INODE_INFO(inode);
- c = JFFS2_SB_INFO(inode->i_sb);
offset = filp->f_pos;
@@ -609,8 +605,6 @@ static int jffs2_rmdir (struct inode *dir_i, struct dentry *dentry)
int ret;
uint32_t now = get_seconds();
- dentry_unhash(dentry);
-
for (fd = f->dents ; fd; fd = fd->next) {
if (fd->ino)
return -ENOTEMPTY;
@@ -786,9 +780,6 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry,
uint8_t type;
uint32_t now;
- if (new_dentry->d_inode && S_ISDIR(new_dentry->d_inode->i_mode))
- dentry_unhash(new_dentry);
-
/* The VFS will check for us and prevent trying to rename a
* file over a directory and vice versa, but if it's a directory,
* the VFS can't check whether the victim is empty. The filesystem
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
index e896e67767eb..46ad619b6124 100644
--- a/fs/jffs2/fs.c
+++ b/fs/jffs2/fs.c
@@ -357,7 +357,7 @@ error:
return ERR_PTR(ret);
}
-void jffs2_dirty_inode(struct inode *inode)
+void jffs2_dirty_inode(struct inode *inode, int flags)
{
struct iattr iattr;
diff --git a/fs/jffs2/os-linux.h b/fs/jffs2/os-linux.h
index 00bae7cc2e48..65c6c43ca482 100644
--- a/fs/jffs2/os-linux.h
+++ b/fs/jffs2/os-linux.h
@@ -172,7 +172,7 @@ int jffs2_setattr (struct dentry *, struct iattr *);
int jffs2_do_setattr (struct inode *, struct iattr *);
struct inode *jffs2_iget(struct super_block *, unsigned long);
void jffs2_evict_inode (struct inode *);
-void jffs2_dirty_inode(struct inode *inode);
+void jffs2_dirty_inode(struct inode *inode, int flags);
struct inode *jffs2_new_inode (struct inode *dir_i, int mode,
struct jffs2_raw_inode *ri);
int jffs2_statfs (struct dentry *, struct kstatfs *);
diff --git a/fs/jffs2/scan.c b/fs/jffs2/scan.c
index b632dddcb482..8d8cd3419d02 100644
--- a/fs/jffs2/scan.c
+++ b/fs/jffs2/scan.c
@@ -94,7 +94,7 @@ int jffs2_scan_medium(struct jffs2_sb_info *c)
uint32_t buf_size = 0;
struct jffs2_summary *s = NULL; /* summary info collected by the scan process */
#ifndef __ECOS
- size_t pointlen;
+ size_t pointlen, try_size;
if (c->mtd->point) {
ret = c->mtd->point(c->mtd, 0, c->mtd->size, &pointlen,
@@ -113,18 +113,21 @@ int jffs2_scan_medium(struct jffs2_sb_info *c)
/* For NAND it's quicker to read a whole eraseblock at a time,
apparently */
if (jffs2_cleanmarker_oob(c))
- buf_size = c->sector_size;
+ try_size = c->sector_size;
else
- buf_size = PAGE_SIZE;
+ try_size = PAGE_SIZE;
- /* Respect kmalloc limitations */
- if (buf_size > 128*1024)
- buf_size = 128*1024;
+ D1(printk(KERN_DEBUG "Trying to allocate readbuf of %zu "
+ "bytes\n", try_size));
- D1(printk(KERN_DEBUG "Allocating readbuf of %d bytes\n", buf_size));
- flashbuf = kmalloc(buf_size, GFP_KERNEL);
+ flashbuf = mtd_kmalloc_up_to(c->mtd, &try_size);
if (!flashbuf)
return -ENOMEM;
+
+ D1(printk(KERN_DEBUG "Allocated readbuf of %zu bytes\n",
+ try_size));
+
+ buf_size = (uint32_t)try_size;
}
if (jffs2_sum_active()) {
diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c
index eddbb373209e..109655904bbc 100644
--- a/fs/jfs/inode.c
+++ b/fs/jfs/inode.c
@@ -173,7 +173,7 @@ void jfs_evict_inode(struct inode *inode)
dquot_drop(inode);
}
-void jfs_dirty_inode(struct inode *inode)
+void jfs_dirty_inode(struct inode *inode, int flags)
{
static int noisy = 5;
diff --git a/fs/jfs/jfs_inode.h b/fs/jfs/jfs_inode.h
index 155e91eff07d..ec2fb8b945fc 100644
--- a/fs/jfs/jfs_inode.h
+++ b/fs/jfs/jfs_inode.h
@@ -28,7 +28,7 @@ extern struct inode *jfs_iget(struct super_block *, unsigned long);
extern int jfs_commit_inode(struct inode *, int);
extern int jfs_write_inode(struct inode *, struct writeback_control *);
extern void jfs_evict_inode(struct inode *);
-extern void jfs_dirty_inode(struct inode *);
+extern void jfs_dirty_inode(struct inode *, int);
extern void jfs_truncate(struct inode *);
extern void jfs_truncate_nolock(struct inode *, loff_t);
extern void jfs_free_zero_link(struct inode *);
diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c
index 865df16a6cf3..eaaf2b511e89 100644
--- a/fs/jfs/namei.c
+++ b/fs/jfs/namei.c
@@ -360,8 +360,6 @@ static int jfs_rmdir(struct inode *dip, struct dentry *dentry)
jfs_info("jfs_rmdir: dip:0x%p name:%s", dip, dentry->d_name.name);
- dentry_unhash(dentry);
-
/* Init inode for quota operations. */
dquot_initialize(dip);
dquot_initialize(ip);
@@ -1097,9 +1095,6 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry,
jfs_info("jfs_rename: %s %s", old_dentry->d_name.name,
new_dentry->d_name.name);
- if (new_dentry->d_inode && S_ISDIR(new_dentry->d_inode->i_mode))
- dentry_unhash(new_dentry);
-
dquot_initialize(old_dir);
dquot_initialize(new_dir);
diff --git a/fs/logfs/dir.c b/fs/logfs/dir.c
index f34c9cde9e94..9ed89d1663f8 100644
--- a/fs/logfs/dir.c
+++ b/fs/logfs/dir.c
@@ -273,8 +273,6 @@ static int logfs_rmdir(struct inode *dir, struct dentry *dentry)
{
struct inode *inode = dentry->d_inode;
- dentry_unhash(dentry);
-
if (!logfs_empty_dir(inode))
return -ENOTEMPTY;
@@ -624,9 +622,6 @@ static int logfs_rename_cross(struct inode *old_dir, struct dentry *old_dentry,
loff_t pos;
int err;
- if (new_dentry->d_inode && S_ISDIR(new_dentry->d_inode->i_mode))
- dentry_unhash(new_dentry);
-
/* 1. locate source dd */
err = logfs_get_dd(old_dir, old_dentry, &dd, &pos);
if (err)
diff --git a/fs/minix/namei.c b/fs/minix/namei.c
index f60aed8db9c4..6e6777f1b4b2 100644
--- a/fs/minix/namei.c
+++ b/fs/minix/namei.c
@@ -168,8 +168,6 @@ static int minix_rmdir(struct inode * dir, struct dentry *dentry)
struct inode * inode = dentry->d_inode;
int err = -ENOTEMPTY;
- dentry_unhash(dentry);
-
if (minix_empty_dir(inode)) {
err = minix_unlink(dir, dentry);
if (!err) {
@@ -192,9 +190,6 @@ static int minix_rename(struct inode * old_dir, struct dentry *old_dentry,
struct minix_dir_entry * old_de;
int err = -ENOENT;
- if (new_inode && S_ISDIR(new_inode->i_mode))
- dentry_unhash(new_dentry);
-
old_de = minix_find_entry(old_dentry, &old_page);
if (!old_de)
goto out;
diff --git a/fs/namei.c b/fs/namei.c
index 2358b326b221..1ab641f2e78e 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -919,12 +919,11 @@ static inline bool managed_dentry_might_block(struct dentry *dentry)
}
/*
- * Skip to top of mountpoint pile in rcuwalk mode. We abort the rcu-walk if we
- * meet a managed dentry and we're not walking to "..". True is returned to
- * continue, false to abort.
+ * Try to skip to top of mountpoint pile in rcuwalk mode. Fail if
+ * we meet a managed dentry that would need blocking.
*/
static bool __follow_mount_rcu(struct nameidata *nd, struct path *path,
- struct inode **inode, bool reverse_transit)
+ struct inode **inode)
{
for (;;) {
struct vfsmount *mounted;
@@ -933,8 +932,7 @@ static bool __follow_mount_rcu(struct nameidata *nd, struct path *path,
* that wants to block transit.
*/
*inode = path->dentry->d_inode;
- if (!reverse_transit &&
- unlikely(managed_dentry_might_block(path->dentry)))
+ if (unlikely(managed_dentry_might_block(path->dentry)))
return false;
if (!d_mountpoint(path->dentry))
@@ -947,16 +945,24 @@ static bool __follow_mount_rcu(struct nameidata *nd, struct path *path,
path->dentry = mounted->mnt_root;
nd->seq = read_seqcount_begin(&path->dentry->d_seq);
}
-
- if (unlikely(path->dentry->d_flags & DCACHE_NEED_AUTOMOUNT))
- return reverse_transit;
return true;
}
-static int follow_dotdot_rcu(struct nameidata *nd)
+static void follow_mount_rcu(struct nameidata *nd)
{
- struct inode *inode = nd->inode;
+ while (d_mountpoint(nd->path.dentry)) {
+ struct vfsmount *mounted;
+ mounted = __lookup_mnt(nd->path.mnt, nd->path.dentry, 1);
+ if (!mounted)
+ break;
+ nd->path.mnt = mounted;
+ nd->path.dentry = mounted->mnt_root;
+ nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
+ }
+}
+static int follow_dotdot_rcu(struct nameidata *nd)
+{
set_root_rcu(nd);
while (1) {
@@ -972,7 +978,6 @@ static int follow_dotdot_rcu(struct nameidata *nd)
seq = read_seqcount_begin(&parent->d_seq);
if (read_seqcount_retry(&old->d_seq, nd->seq))
goto failed;
- inode = parent->d_inode;
nd->path.dentry = parent;
nd->seq = seq;
break;
@@ -980,10 +985,9 @@ static int follow_dotdot_rcu(struct nameidata *nd)
if (!follow_up_rcu(&nd->path))
break;
nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
- inode = nd->path.dentry->d_inode;
}
- __follow_mount_rcu(nd, &nd->path, &inode, true);
- nd->inode = inode;
+ follow_mount_rcu(nd);
+ nd->inode = nd->path.dentry->d_inode;
return 0;
failed:
@@ -1157,8 +1161,11 @@ static int do_lookup(struct nameidata *nd, struct qstr *name,
}
path->mnt = mnt;
path->dentry = dentry;
- if (likely(__follow_mount_rcu(nd, path, inode, false)))
- return 0;
+ if (unlikely(!__follow_mount_rcu(nd, path, inode)))
+ goto unlazy;
+ if (unlikely(path->dentry->d_flags & DCACHE_NEED_AUTOMOUNT))
+ goto unlazy;
+ return 0;
unlazy:
if (unlazy_walk(nd, dentry))
return -ECHILD;
diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
index e3e646b06404..9c51f621e901 100644
--- a/fs/ncpfs/dir.c
+++ b/fs/ncpfs/dir.c
@@ -1033,8 +1033,11 @@ static int ncp_rmdir(struct inode *dir, struct dentry *dentry)
DPRINTK("ncp_rmdir: removing %s/%s\n",
dentry->d_parent->d_name.name, dentry->d_name.name);
+ /*
+ * fail with EBUSY if there are still references to this
+ * directory.
+ */
dentry_unhash(dentry);
-
error = -EBUSY;
if (!d_unhashed(dentry))
goto out;
@@ -1141,8 +1144,16 @@ static int ncp_rename(struct inode *old_dir, struct dentry *old_dentry,
old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
- if (new_dentry->d_inode && S_ISDIR(new_dentry->d_inode->i_mode))
+ if (new_dentry->d_inode && S_ISDIR(new_dentry->d_inode->i_mode)) {
+ /*
+ * fail with EBUSY if there are still references to this
+ * directory.
+ */
dentry_unhash(new_dentry);
+ error = -EBUSY;
+ if (!d_unhashed(new_dentry))
+ goto out;
+ }
ncp_age_dentry(server, old_dentry);
ncp_age_dentry(server, new_dentry);
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
index 587f18432832..b954878ad6ce 100644
--- a/fs/nilfs2/inode.c
+++ b/fs/nilfs2/inode.c
@@ -917,7 +917,7 @@ int nilfs_mark_inode_dirty(struct inode *inode)
* construction. This function can be called both as a single operation
* and as a part of indivisible file operations.
*/
-void nilfs_dirty_inode(struct inode *inode)
+void nilfs_dirty_inode(struct inode *inode, int flags)
{
struct nilfs_transaction_info ti;
struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c
index 1102a5fbb744..546849b3e88f 100644
--- a/fs/nilfs2/namei.c
+++ b/fs/nilfs2/namei.c
@@ -334,8 +334,6 @@ static int nilfs_rmdir(struct inode *dir, struct dentry *dentry)
struct nilfs_transaction_info ti;
int err;
- dentry_unhash(dentry);
-
err = nilfs_transaction_begin(dir->i_sb, &ti, 0);
if (err)
return err;
@@ -371,9 +369,6 @@ static int nilfs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct nilfs_transaction_info ti;
int err;
- if (new_inode && S_ISDIR(new_inode->i_mode))
- dentry_unhash(new_dentry);
-
err = nilfs_transaction_begin(old_dir->i_sb, &ti, 1);
if (unlikely(err))
return err;
diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h
index a9c6a531f80c..f02b9ad43a21 100644
--- a/fs/nilfs2/nilfs.h
+++ b/fs/nilfs2/nilfs.h
@@ -269,7 +269,7 @@ int nilfs_load_inode_block(struct inode *inode, struct buffer_head **pbh);
extern int nilfs_inode_dirty(struct inode *);
int nilfs_set_file_dirty(struct inode *inode, unsigned nr_dirty);
extern int nilfs_mark_inode_dirty(struct inode *);
-extern void nilfs_dirty_inode(struct inode *);
+extern void nilfs_dirty_inode(struct inode *, int flags);
int nilfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
__u64 start, __u64 len);
diff --git a/fs/ocfs2/move_extents.c b/fs/ocfs2/move_extents.c
index 4c5488468c14..cd9427023d2e 100644
--- a/fs/ocfs2/move_extents.c
+++ b/fs/ocfs2/move_extents.c
@@ -368,7 +368,7 @@ static int ocfs2_find_victim_alloc_group(struct inode *inode,
int *vict_bit,
struct buffer_head **ret_bh)
{
- int ret, i, blocks_per_unit = 1;
+ int ret, i, bits_per_unit = 0;
u64 blkno;
char namebuf[40];
@@ -398,14 +398,14 @@ static int ocfs2_find_victim_alloc_group(struct inode *inode,
rec = &(cl->cl_recs[0]);
if (type == GLOBAL_BITMAP_SYSTEM_INODE)
- blocks_per_unit <<= (osb->s_clustersize_bits -
- inode->i_sb->s_blocksize_bits);
+ bits_per_unit = osb->s_clustersize_bits -
+ inode->i_sb->s_blocksize_bits;
/*
* 'vict_blkno' was out of the valid range.
*/
if ((vict_blkno < le64_to_cpu(rec->c_blkno)) ||
- (vict_blkno >= (le32_to_cpu(ac_dinode->id1.bitmap1.i_total) *
- blocks_per_unit))) {
+ (vict_blkno >= (le32_to_cpu(ac_dinode->id1.bitmap1.i_total) <<
+ bits_per_unit))) {
ret = -EINVAL;
goto out;
}
@@ -441,8 +441,8 @@ static int ocfs2_find_victim_alloc_group(struct inode *inode,
le16_to_cpu(bg->bg_bits))) {
*ret_bh = gd_bh;
- *vict_bit = (vict_blkno - blkno) /
- blocks_per_unit;
+ *vict_bit = (vict_blkno - blkno) >>
+ bits_per_unit;
mlog(0, "find the victim group: #%llu, "
"total_bits: %u, vict_bit: %u\n",
blkno, le16_to_cpu(bg->bg_bits),
@@ -472,12 +472,24 @@ static int ocfs2_validate_and_adjust_move_goal(struct inode *inode,
int ret, goal_bit = 0;
struct buffer_head *gd_bh = NULL;
- struct ocfs2_group_desc *bg;
+ struct ocfs2_group_desc *bg = NULL;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
int c_to_b = 1 << (osb->s_clustersize_bits -
inode->i_sb->s_blocksize_bits);
/*
+ * make goal become cluster aligned.
+ */
+ range->me_goal = ocfs2_block_to_cluster_start(inode->i_sb,
+ range->me_goal);
+ /*
+ * moving goal is not allowd to start with a group desc blok(#0 blk)
+ * let's compromise to the latter cluster.
+ */
+ if (range->me_goal == le64_to_cpu(bg->bg_blkno))
+ range->me_goal += c_to_b;
+
+ /*
* validate goal sits within global_bitmap, and return the victim
* group desc
*/
@@ -491,19 +503,6 @@ static int ocfs2_validate_and_adjust_move_goal(struct inode *inode,
bg = (struct ocfs2_group_desc *)gd_bh->b_data;
/*
- * make goal become cluster aligned.
- */
- if (range->me_goal % c_to_b)
- range->me_goal = range->me_goal / c_to_b * c_to_b;
-
- /*
- * moving goal is not allowd to start with a group desc blok(#0 blk)
- * let's compromise to the latter cluster.
- */
- if (range->me_goal == le64_to_cpu(bg->bg_blkno))
- range->me_goal += c_to_b;
-
- /*
* movement is not gonna cross two groups.
*/
if ((le16_to_cpu(bg->bg_bits) - goal_bit) * osb->s_clustersize <
diff --git a/fs/omfs/dir.c b/fs/omfs/dir.c
index c368360c35a1..3b8d3979e03b 100644
--- a/fs/omfs/dir.c
+++ b/fs/omfs/dir.c
@@ -241,11 +241,9 @@ static int omfs_remove(struct inode *dir, struct dentry *dentry)
int ret;
- if (S_ISDIR(inode->i_mode)) {
- dentry_unhash(dentry);
- if (!omfs_dir_is_empty(inode))
- return -ENOTEMPTY;
- }
+ if (S_ISDIR(inode->i_mode) &&
+ !omfs_dir_is_empty(inode))
+ return -ENOTEMPTY;
ret = omfs_delete_entry(dentry);
if (ret)
@@ -382,9 +380,6 @@ static int omfs_rename(struct inode *old_dir, struct dentry *old_dentry,
int err;
if (new_inode) {
- if (S_ISDIR(new_inode->i_mode))
- dentry_unhash(new_dentry);
-
/* overwriting existing file/dir */
err = omfs_remove(new_dir, new_dentry);
if (err)
diff --git a/fs/partitions/check.c b/fs/partitions/check.c
index 8ed4d3433199..f82e762eeca2 100644
--- a/fs/partitions/check.c
+++ b/fs/partitions/check.c
@@ -256,10 +256,12 @@ ssize_t part_discard_alignment_show(struct device *dev,
{
struct hd_struct *p = dev_to_part(dev);
struct gendisk *disk = dev_to_disk(dev);
+ unsigned int alignment = 0;
- return sprintf(buf, "%u\n",
- queue_limit_discard_alignment(&disk->queue->limits,
- p->start_sect));
+ if (disk->queue)
+ alignment = queue_limit_discard_alignment(&disk->queue->limits,
+ p->start_sect);
+ return sprintf(buf, "%u\n", alignment);
}
ssize_t part_stat_show(struct device *dev,
diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
index 76c8164d5651..118662690cdf 100644
--- a/fs/reiserfs/namei.c
+++ b/fs/reiserfs/namei.c
@@ -831,8 +831,6 @@ static int reiserfs_rmdir(struct inode *dir, struct dentry *dentry)
INITIALIZE_PATH(path);
struct reiserfs_dir_entry de;
- dentry_unhash(dentry);
-
/* we will be doing 2 balancings and update 2 stat data, we change quotas
* of the owner of the directory and of the owner of the parent directory.
* The quota structure is possibly deleted only on last iput => outside
@@ -1227,9 +1225,6 @@ static int reiserfs_rename(struct inode *old_dir, struct dentry *old_dentry,
unsigned long savelink = 1;
struct timespec ctime;
- if (new_dentry->d_inode && S_ISDIR(new_dentry->d_inode->i_mode))
- dentry_unhash(new_dentry);
-
/* three balancings: (1) old name removal, (2) new name insertion
and (3) maybe "save" link insertion
stat data updates: (1) old directory,
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index b216ff6be1c9..aa91089162cb 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -568,7 +568,7 @@ static void destroy_inodecache(void)
}
/* we don't mark inodes dirty, we just log them */
-static void reiserfs_dirty_inode(struct inode *inode)
+static void reiserfs_dirty_inode(struct inode *inode, int flags)
{
struct reiserfs_transaction_handle th;
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index 50f1abccd1cd..e8a62f41b458 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -98,7 +98,6 @@ static int xattr_rmdir(struct inode *dir, struct dentry *dentry)
reiserfs_mutex_lock_nested_safe(&dentry->d_inode->i_mutex,
I_MUTEX_CHILD, dir->i_sb);
- dentry_unhash(dentry);
error = dir->i_op->rmdir(dir, dentry);
if (!error)
dentry->d_inode->i_flags |= S_DEAD;
diff --git a/fs/sysv/namei.c b/fs/sysv/namei.c
index e2cc6756f3b1..e474fbcf8bde 100644
--- a/fs/sysv/namei.c
+++ b/fs/sysv/namei.c
@@ -196,8 +196,6 @@ static int sysv_rmdir(struct inode * dir, struct dentry * dentry)
struct inode *inode = dentry->d_inode;
int err = -ENOTEMPTY;
- dentry_unhash(dentry);
-
if (sysv_empty_dir(inode)) {
err = sysv_unlink(dir, dentry);
if (!err) {
@@ -224,9 +222,6 @@ static int sysv_rename(struct inode * old_dir, struct dentry * old_dentry,
struct sysv_dir_entry * old_de;
int err = -ENOENT;
- if (new_inode && S_ISDIR(new_inode->i_mode))
- dentry_unhash(new_dentry);
-
old_de = sysv_find_entry(old_dentry, &old_page);
if (!old_de)
goto out;
diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
index c2b80943560d..ef5abd38f0bf 100644
--- a/fs/ubifs/dir.c
+++ b/fs/ubifs/dir.c
@@ -656,8 +656,6 @@ static int ubifs_rmdir(struct inode *dir, struct dentry *dentry)
struct ubifs_inode *dir_ui = ubifs_inode(dir);
struct ubifs_budget_req req = { .mod_dent = 1, .dirtied_ino = 2 };
- dentry_unhash(dentry);
-
/*
* Budget request settings: deletion direntry, deletion inode and
* changing the parent inode. If budgeting fails, go ahead anyway
@@ -978,9 +976,6 @@ static int ubifs_rename(struct inode *old_dir, struct dentry *old_dentry,
.dirtied_ino_d = ALIGN(old_inode_ui->data_len, 8) };
struct timespec time;
- if (new_inode && S_ISDIR(new_inode->i_mode))
- dentry_unhash(new_dentry);
-
/*
* Budget request settings: deletion direntry, new direntry, removing
* the old inode, and changing old and new parent directory inodes.
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 6db0bdaa9f74..1ab0d22e4c94 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -382,7 +382,7 @@ done:
end_writeback(inode);
}
-static void ubifs_dirty_inode(struct inode *inode)
+static void ubifs_dirty_inode(struct inode *inode, int flags)
{
struct ubifs_inode *ui = ubifs_inode(inode);
diff --git a/fs/udf/namei.c b/fs/udf/namei.c
index 4d76594c2a8f..f1dce848ef96 100644
--- a/fs/udf/namei.c
+++ b/fs/udf/namei.c
@@ -783,8 +783,6 @@ static int udf_rmdir(struct inode *dir, struct dentry *dentry)
struct fileIdentDesc *fi, cfi;
struct kernel_lb_addr tloc;
- dentry_unhash(dentry);
-
retval = -ENOENT;
fi = udf_find_entry(dir, &dentry->d_name, &fibh, &cfi);
if (!fi)
@@ -1083,9 +1081,6 @@ static int udf_rename(struct inode *old_dir, struct dentry *old_dentry,
struct kernel_lb_addr tloc;
struct udf_inode_info *old_iinfo = UDF_I(old_inode);
- if (new_inode && S_ISDIR(new_inode->i_mode))
- dentry_unhash(new_dentry);
-
ofi = udf_find_entry(old_dir, &old_dentry->d_name, &ofibh, &ocfi);
if (ofi) {
if (ofibh.sbh != ofibh.ebh)
diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c
index 953ebdfc5bf7..29309e25417f 100644
--- a/fs/ufs/namei.c
+++ b/fs/ufs/namei.c
@@ -258,8 +258,6 @@ static int ufs_rmdir (struct inode * dir, struct dentry *dentry)
struct inode * inode = dentry->d_inode;
int err= -ENOTEMPTY;
- dentry_unhash(dentry);
-
lock_ufs(dir->i_sb);
if (ufs_empty_dir (inode)) {
err = ufs_unlink(dir, dentry);
@@ -284,9 +282,6 @@ static int ufs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct ufs_dir_entry *old_de;
int err = -ENOENT;
- if (new_inode && S_ISDIR(new_inode->i_mode))
- dentry_unhash(new_dentry);
-
old_de = ufs_find_entry(old_dir, &old_dentry->d_name, &old_page);
if (!old_de)
goto out;
diff --git a/fs/xattr.c b/fs/xattr.c
index f1ef94974dea..f060663ab70c 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -46,18 +46,22 @@ xattr_permission(struct inode *inode, const char *name, int mask)
return 0;
/*
- * The trusted.* namespace can only be accessed by a privileged user.
+ * The trusted.* namespace can only be accessed by privileged users.
*/
- if (!strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN))
- return (capable(CAP_SYS_ADMIN) ? 0 : -EPERM);
+ if (!strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN)) {
+ if (!capable(CAP_SYS_ADMIN))
+ return (mask & MAY_WRITE) ? -EPERM : -ENODATA;
+ return 0;
+ }
- /* In user.* namespace, only regular files and directories can have
+ /*
+ * In the user.* namespace, only regular files and directories can have
* extended attributes. For sticky directories, only the owner and
- * privileged user can write attributes.
+ * privileged users can write attributes.
*/
if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) {
if (!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode))
- return -EPERM;
+ return (mask & MAY_WRITE) ? -EPERM : -ENODATA;
if (S_ISDIR(inode->i_mode) && (inode->i_mode & S_ISVTX) &&
(mask & MAY_WRITE) && !inode_owner_or_capable(inode))
return -EPERM;
@@ -87,7 +91,11 @@ int __vfs_setxattr_noperm(struct dentry *dentry, const char *name,
{
struct inode *inode = dentry->d_inode;
int error = -EOPNOTSUPP;
+ int issec = !strncmp(name, XATTR_SECURITY_PREFIX,
+ XATTR_SECURITY_PREFIX_LEN);
+ if (issec)
+ inode->i_flags &= ~S_NOSEC;
if (inode->i_op->setxattr) {
error = inode->i_op->setxattr(dentry, name, value, size, flags);
if (!error) {
@@ -95,8 +103,7 @@ int __vfs_setxattr_noperm(struct dentry *dentry, const char *name,
security_inode_post_setxattr(dentry, name, value,
size, flags);
}
- } else if (!strncmp(name, XATTR_SECURITY_PREFIX,
- XATTR_SECURITY_PREFIX_LEN)) {
+ } else if (issec) {
const char *suffix = name + XATTR_SECURITY_PREFIX_LEN;
error = security_inode_setsecurity(inode, suffix, value,
size, flags);
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index 98b9c91fcdf1..1e3a7ce804dc 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -925,7 +925,8 @@ xfs_fs_inode_init_once(
*/
STATIC void
xfs_fs_dirty_inode(
- struct inode *inode)
+ struct inode *inode,
+ int flags)
{
barrier();
XFS_I(inode)->i_update_core = 1;
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index 91784841e407..dfb0ec666c94 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -162,46 +162,6 @@ extern void warn_slowpath_null(const char *file, const int line);
unlikely(__ret_warn_once); \
})
-#ifdef CONFIG_PRINTK
-
-#define WARN_ON_RATELIMIT(condition, state) \
- WARN_ON((condition) && __ratelimit(state))
-
-#define __WARN_RATELIMIT(condition, state, format...) \
-({ \
- int rtn = 0; \
- if (unlikely(__ratelimit(state))) \
- rtn = WARN(condition, format); \
- rtn; \
-})
-
-#define WARN_RATELIMIT(condition, format...) \
-({ \
- static DEFINE_RATELIMIT_STATE(_rs, \
- DEFAULT_RATELIMIT_INTERVAL, \
- DEFAULT_RATELIMIT_BURST); \
- __WARN_RATELIMIT(condition, &_rs, format); \
-})
-
-#else
-
-#define WARN_ON_RATELIMIT(condition, state) \
- WARN_ON(condition)
-
-#define __WARN_RATELIMIT(condition, state, format...) \
-({ \
- int rtn = WARN(condition, format); \
- rtn; \
-})
-
-#define WARN_RATELIMIT(condition, format...) \
-({ \
- int rtn = WARN(condition, format); \
- rtn; \
-})
-
-#endif
-
/*
* WARN_ON_SMP() is for cases that the warning is either
* meaningless for !SMP or may even cause failures.
diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h
index ff5c66080c8c..fcdcb5d5c995 100644
--- a/include/asm-generic/gpio.h
+++ b/include/asm-generic/gpio.h
@@ -35,9 +35,9 @@
* platform data and other tables.
*/
-static inline int gpio_is_valid(int number)
+static inline bool gpio_is_valid(int number)
{
- return ((unsigned)number) < ARCH_NR_GPIOS;
+ return number >= 0 && number < ARCH_NR_GPIOS;
}
struct device;
@@ -193,8 +193,8 @@ struct gpio {
};
extern int gpio_request_one(unsigned gpio, unsigned long flags, const char *label);
-extern int gpio_request_array(struct gpio *array, size_t num);
-extern void gpio_free_array(struct gpio *array, size_t num);
+extern int gpio_request_array(const struct gpio *array, size_t num);
+extern void gpio_free_array(const struct gpio *array, size_t num);
#ifdef CONFIG_GPIO_SYSFS
@@ -212,7 +212,7 @@ extern void gpio_unexport(unsigned gpio);
#else /* !CONFIG_GPIOLIB */
-static inline int gpio_is_valid(int number)
+static inline bool gpio_is_valid(int number)
{
/* only non-negative numbers are valid */
return number >= 0;
diff --git a/include/asm-generic/unistd.h b/include/asm-generic/unistd.h
index 33d524704883..ae90e0f63995 100644
--- a/include/asm-generic/unistd.h
+++ b/include/asm-generic/unistd.h
@@ -681,9 +681,11 @@ __SC_COMP(__NR_open_by_handle_at, sys_open_by_handle_at, \
__SC_COMP(__NR_clock_adjtime, sys_clock_adjtime, compat_sys_clock_adjtime)
#define __NR_syncfs 267
__SYSCALL(__NR_syncfs, sys_syncfs)
+#define __NR_setns 268
+__SYSCALL(__NR_setns, sys_setns)
#undef __NR_syscalls
-#define __NR_syscalls 268
+#define __NR_syscalls 269
/*
* All syscalls below here should go away really,
diff --git a/include/linux/atomic.h b/include/linux/atomic.h
index 96c038e43d66..ee456c79b0e6 100644
--- a/include/linux/atomic.h
+++ b/include/linux/atomic.h
@@ -34,4 +34,17 @@ static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
}
#endif
+#ifndef CONFIG_ARCH_HAS_ATOMIC_OR
+static inline void atomic_or(int i, atomic_t *v)
+{
+ int old;
+ int new;
+
+ do {
+ old = atomic_read(v);
+ new = old | i;
+ } while (atomic_cmpxchg(v, old, new) != old);
+}
+#endif /* #ifndef CONFIG_ARCH_HAS_ATOMIC_OR */
+
#endif /* _LINUX_ATOMIC_H */
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index f20eb8f16025..e9eaec522655 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -146,7 +146,7 @@ static inline void cpuset_cpus_allowed(struct task_struct *p,
static inline int cpuset_cpus_allowed_fallback(struct task_struct *p)
{
- cpumask_copy(&p->cpus_allowed, cpu_possible_mask);
+ do_set_cpus_allowed(p, cpu_possible_mask);
return cpumask_any(cpu_active_mask);
}
diff --git a/include/linux/cred.h b/include/linux/cred.h
index be16b61283cc..82607992f308 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -1,4 +1,4 @@
-/* Credentials management - see Documentation/credentials.txt
+/* Credentials management - see Documentation/security/credentials.txt
*
* Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
diff --git a/include/linux/dw_dmac.h b/include/linux/dw_dmac.h
index 6998d9376ef9..4bfe0a2f7d50 100644
--- a/include/linux/dw_dmac.h
+++ b/include/linux/dw_dmac.h
@@ -3,6 +3,7 @@
* AVR32 systems.)
*
* Copyright (C) 2007 Atmel Corporation
+ * Copyright (C) 2010-2011 ST Microelectronics
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 33fa1203024e..e376270cd26e 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -299,6 +299,7 @@ extern void efi_initialize_iomem_resources(struct resource *code_resource,
struct resource *data_resource, struct resource *bss_resource);
extern unsigned long efi_get_time(void);
extern int efi_set_rtc_mmss(unsigned long nowtime);
+extern void efi_reserve_boot_services(void);
extern struct efi_memory_map memmap;
/**
diff --git a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h
index 85c1d302c12e..5e06acf95d0f 100644
--- a/include/linux/ext3_fs.h
+++ b/include/linux/ext3_fs.h
@@ -909,7 +909,7 @@ extern int ext3_setattr (struct dentry *, struct iattr *);
extern void ext3_evict_inode (struct inode *);
extern int ext3_sync_inode (handle_t *, struct inode *);
extern void ext3_discard_reservation (struct inode *);
-extern void ext3_dirty_inode(struct inode *);
+extern void ext3_dirty_inode(struct inode *, int);
extern int ext3_change_inode_journal_flag(struct inode *, int);
extern int ext3_get_inode_loc(struct inode *, struct ext3_iloc *);
extern int ext3_can_truncate(struct inode *inode);
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 241609346dfb..c55d6b7cd5d6 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -237,6 +237,7 @@ struct inodes_stat_t {
#define S_PRIVATE 512 /* Inode is fs-internal */
#define S_IMA 1024 /* Inode has an associated IMA struct */
#define S_AUTOMOUNT 2048 /* Automount/referral quasi-directory */
+#define S_NOSEC 4096 /* no suid or xattr security attributes */
/*
* Note that nosuid etc flags are inode-specific: setting some file-system
@@ -273,6 +274,7 @@ struct inodes_stat_t {
#define IS_PRIVATE(inode) ((inode)->i_flags & S_PRIVATE)
#define IS_IMA(inode) ((inode)->i_flags & S_IMA)
#define IS_AUTOMOUNT(inode) ((inode)->i_flags & S_AUTOMOUNT)
+#define IS_NOSEC(inode) ((inode)->i_flags & S_NOSEC)
/* the read-only stuff doesn't really belong here, but any other place is
probably as bad and I don't want to create yet another include file. */
@@ -1618,7 +1620,7 @@ struct super_operations {
struct inode *(*alloc_inode)(struct super_block *sb);
void (*destroy_inode)(struct inode *);
- void (*dirty_inode) (struct inode *);
+ void (*dirty_inode) (struct inode *, int flags);
int (*write_inode) (struct inode *, struct writeback_control *wbc);
int (*drop_inode) (struct inode *);
void (*evict_inode) (struct inode *);
@@ -2582,5 +2584,16 @@ int __init get_filesystem_list(char *buf);
#define OPEN_FMODE(flag) ((__force fmode_t)(((flag + 1) & O_ACCMODE) | \
(flag & __FMODE_NONOTIFY)))
+static inline int is_sxid(mode_t mode)
+{
+ return (mode & S_ISUID) || ((mode & S_ISGID) && (mode & S_IXGRP));
+}
+
+static inline void inode_has_no_xattr(struct inode *inode)
+{
+ if (!is_sxid(inode->i_mode))
+ inode->i_flags |= S_NOSEC;
+}
+
#endif /* __KERNEL__ */
#endif /* _LINUX_FS_H */
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index b5a550a39a70..59d3ef100eb9 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -16,6 +16,11 @@ struct trace_print_flags {
const char *name;
};
+struct trace_print_flags_u64 {
+ unsigned long long mask;
+ const char *name;
+};
+
const char *ftrace_print_flags_seq(struct trace_seq *p, const char *delim,
unsigned long flags,
const struct trace_print_flags *flag_array);
@@ -23,6 +28,13 @@ const char *ftrace_print_flags_seq(struct trace_seq *p, const char *delim,
const char *ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val,
const struct trace_print_flags *symbol_array);
+#if BITS_PER_LONG == 32
+const char *ftrace_print_symbols_seq_u64(struct trace_seq *p,
+ unsigned long long val,
+ const struct trace_print_flags_u64
+ *symbol_array);
+#endif
+
const char *ftrace_print_hex_seq(struct trace_seq *p,
const unsigned char *buf, int len);
diff --git a/include/linux/gpio.h b/include/linux/gpio.h
index 32720baf70f1..32d47e710661 100644
--- a/include/linux/gpio.h
+++ b/include/linux/gpio.h
@@ -25,9 +25,9 @@ struct gpio_chip;
* warning when something is wrongly called.
*/
-static inline int gpio_is_valid(int number)
+static inline bool gpio_is_valid(int number)
{
- return 0;
+ return false;
}
static inline int gpio_request(unsigned gpio, const char *label)
@@ -41,7 +41,7 @@ static inline int gpio_request_one(unsigned gpio,
return -ENOSYS;
}
-static inline int gpio_request_array(struct gpio *array, size_t num)
+static inline int gpio_request_array(const struct gpio *array, size_t num)
{
return -ENOSYS;
}
@@ -54,7 +54,7 @@ static inline void gpio_free(unsigned gpio)
WARN_ON(1);
}
-static inline void gpio_free_array(struct gpio *array, size_t num)
+static inline void gpio_free_array(const struct gpio *array, size_t num)
{
might_sleep();
diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h
index 0f1325d98295..0065ffd3226b 100644
--- a/include/linux/if_ether.h
+++ b/include/linux/if_ether.h
@@ -132,10 +132,6 @@ static inline struct ethhdr *eth_hdr(const struct sk_buff *skb)
int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr);
-#ifdef CONFIG_SYSCTL
-extern struct ctl_table ether_table[];
-#endif
-
int mac_pton(const char *s, u8 *mac);
extern ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len);
diff --git a/include/linux/key.h b/include/linux/key.h
index ef19b99aff98..6ea4eebd3467 100644
--- a/include/linux/key.h
+++ b/include/linux/key.h
@@ -9,7 +9,7 @@
* 2 of the License, or (at your option) any later version.
*
*
- * See Documentation/keys.txt for information on keys/keyrings.
+ * See Documentation/security/keys.txt for information on keys/keyrings.
*/
#ifndef _LINUX_KEY_H
diff --git a/include/linux/mfd/max8997-private.h b/include/linux/mfd/max8997-private.h
index 69d1010e2e51..5ff2400ad46c 100644
--- a/include/linux/mfd/max8997-private.h
+++ b/include/linux/mfd/max8997-private.h
@@ -311,10 +311,6 @@ enum max8997_irq {
MAX8997_IRQ_NR,
};
-#define MAX8997_REG_BUCK1DVS(x) (MAX8997_REG_BUCK1DVS1 + (x) - 1)
-#define MAX8997_REG_BUCK2DVS(x) (MAX8997_REG_BUCK2DVS1 + (x) - 1)
-#define MAX8997_REG_BUCK5DVS(x) (MAX8997_REG_BUCK5DVS1 + (x) - 1)
-
#define MAX8997_NUM_GPIO 12
struct max8997_dev {
struct device *dev;
diff --git a/include/linux/mfd/tps65910.h b/include/linux/mfd/tps65910.h
new file mode 100644
index 000000000000..8bb85b930c07
--- /dev/null
+++ b/include/linux/mfd/tps65910.h
@@ -0,0 +1,800 @@
+/*
+ * tps65910.h -- TI TPS6591x
+ *
+ * Copyright 2010-2011 Texas Instruments Inc.
+ *
+ * Author: Graeme Gregory <gg@slimlogic.co.uk>
+ * Author: Jorge Eduardo Candelaria <jedu@slimlogic.co.uk>
+ * Author: Arnaud Deconinck <a-deconinck@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef __LINUX_MFD_TPS65910_H
+#define __LINUX_MFD_TPS65910_H
+
+/* TPS chip id list */
+#define TPS65910 0
+#define TPS65911 1
+
+/* TPS regulator type list */
+#define REGULATOR_LDO 0
+#define REGULATOR_DCDC 1
+
+/*
+ * List of registers for component TPS65910
+ *
+ */
+
+#define TPS65910_SECONDS 0x0
+#define TPS65910_MINUTES 0x1
+#define TPS65910_HOURS 0x2
+#define TPS65910_DAYS 0x3
+#define TPS65910_MONTHS 0x4
+#define TPS65910_YEARS 0x5
+#define TPS65910_WEEKS 0x6
+#define TPS65910_ALARM_SECONDS 0x8
+#define TPS65910_ALARM_MINUTES 0x9
+#define TPS65910_ALARM_HOURS 0xA
+#define TPS65910_ALARM_DAYS 0xB
+#define TPS65910_ALARM_MONTHS 0xC
+#define TPS65910_ALARM_YEARS 0xD
+#define TPS65910_RTC_CTRL 0x10
+#define TPS65910_RTC_STATUS 0x11
+#define TPS65910_RTC_INTERRUPTS 0x12
+#define TPS65910_RTC_COMP_LSB 0x13
+#define TPS65910_RTC_COMP_MSB 0x14
+#define TPS65910_RTC_RES_PROG 0x15
+#define TPS65910_RTC_RESET_STATUS 0x16
+#define TPS65910_BCK1 0x17
+#define TPS65910_BCK2 0x18
+#define TPS65910_BCK3 0x19
+#define TPS65910_BCK4 0x1A
+#define TPS65910_BCK5 0x1B
+#define TPS65910_PUADEN 0x1C
+#define TPS65910_REF 0x1D
+#define TPS65910_VRTC 0x1E
+#define TPS65910_VIO 0x20
+#define TPS65910_VDD1 0x21
+#define TPS65910_VDD1_OP 0x22
+#define TPS65910_VDD1_SR 0x23
+#define TPS65910_VDD2 0x24
+#define TPS65910_VDD2_OP 0x25
+#define TPS65910_VDD2_SR 0x26
+#define TPS65910_VDD3 0x27
+#define TPS65910_VDIG1 0x30
+#define TPS65910_VDIG2 0x31
+#define TPS65910_VAUX1 0x32
+#define TPS65910_VAUX2 0x33
+#define TPS65910_VAUX33 0x34
+#define TPS65910_VMMC 0x35
+#define TPS65910_VPLL 0x36
+#define TPS65910_VDAC 0x37
+#define TPS65910_THERM 0x38
+#define TPS65910_BBCH 0x39
+#define TPS65910_DCDCCTRL 0x3E
+#define TPS65910_DEVCTRL 0x3F
+#define TPS65910_DEVCTRL2 0x40
+#define TPS65910_SLEEP_KEEP_LDO_ON 0x41
+#define TPS65910_SLEEP_KEEP_RES_ON 0x42
+#define TPS65910_SLEEP_SET_LDO_OFF 0x43
+#define TPS65910_SLEEP_SET_RES_OFF 0x44
+#define TPS65910_EN1_LDO_ASS 0x45
+#define TPS65910_EN1_SMPS_ASS 0x46
+#define TPS65910_EN2_LDO_ASS 0x47
+#define TPS65910_EN2_SMPS_ASS 0x48
+#define TPS65910_EN3_LDO_ASS 0x49
+#define TPS65910_SPARE 0x4A
+#define TPS65910_INT_STS 0x50
+#define TPS65910_INT_MSK 0x51
+#define TPS65910_INT_STS2 0x52
+#define TPS65910_INT_MSK2 0x53
+#define TPS65910_INT_STS3 0x54
+#define TPS65910_INT_MSK3 0x55
+#define TPS65910_GPIO0 0x60
+#define TPS65910_GPIO1 0x61
+#define TPS65910_GPIO2 0x62
+#define TPS65910_GPIO3 0x63
+#define TPS65910_GPIO4 0x64
+#define TPS65910_GPIO5 0x65
+#define TPS65910_GPIO6 0x66
+#define TPS65910_GPIO7 0x67
+#define TPS65910_GPIO8 0x68
+#define TPS65910_JTAGVERNUM 0x80
+#define TPS65910_MAX_REGISTER 0x80
+
+/*
+ * List of registers specific to TPS65911
+ */
+#define TPS65911_VDDCTRL 0x27
+#define TPS65911_VDDCTRL_OP 0x28
+#define TPS65911_VDDCTRL_SR 0x29
+#define TPS65911_LDO1 0x30
+#define TPS65911_LDO2 0x31
+#define TPS65911_LDO5 0x32
+#define TPS65911_LDO8 0x33
+#define TPS65911_LDO7 0x34
+#define TPS65911_LDO6 0x35
+#define TPS65911_LDO4 0x36
+#define TPS65911_LDO3 0x37
+#define TPS65911_VMBCH 0x6A
+#define TPS65911_VMBCH2 0x6B
+
+/*
+ * List of register bitfields for component TPS65910
+ *
+ */
+
+
+/*Register BCK1 (0x80) register.RegisterDescription */
+#define BCK1_BCKUP_MASK 0xFF
+#define BCK1_BCKUP_SHIFT 0
+
+
+/*Register BCK2 (0x80) register.RegisterDescription */
+#define BCK2_BCKUP_MASK 0xFF
+#define BCK2_BCKUP_SHIFT 0
+
+
+/*Register BCK3 (0x80) register.RegisterDescription */
+#define BCK3_BCKUP_MASK 0xFF
+#define BCK3_BCKUP_SHIFT 0
+
+
+/*Register BCK4 (0x80) register.RegisterDescription */
+#define BCK4_BCKUP_MASK 0xFF
+#define BCK4_BCKUP_SHIFT 0
+
+
+/*Register BCK5 (0x80) register.RegisterDescription */
+#define BCK5_BCKUP_MASK 0xFF
+#define BCK5_BCKUP_SHIFT 0
+
+
+/*Register PUADEN (0x80) register.RegisterDescription */
+#define PUADEN_EN3P_MASK 0x80
+#define PUADEN_EN3P_SHIFT 7
+#define PUADEN_I2CCTLP_MASK 0x40
+#define PUADEN_I2CCTLP_SHIFT 6
+#define PUADEN_I2CSRP_MASK 0x20
+#define PUADEN_I2CSRP_SHIFT 5
+#define PUADEN_PWRONP_MASK 0x10
+#define PUADEN_PWRONP_SHIFT 4
+#define PUADEN_SLEEPP_MASK 0x08
+#define PUADEN_SLEEPP_SHIFT 3
+#define PUADEN_PWRHOLDP_MASK 0x04
+#define PUADEN_PWRHOLDP_SHIFT 2
+#define PUADEN_BOOT1P_MASK 0x02
+#define PUADEN_BOOT1P_SHIFT 1
+#define PUADEN_BOOT0P_MASK 0x01
+#define PUADEN_BOOT0P_SHIFT 0
+
+
+/*Register REF (0x80) register.RegisterDescription */
+#define REF_VMBCH_SEL_MASK 0x0C
+#define REF_VMBCH_SEL_SHIFT 2
+#define REF_ST_MASK 0x03
+#define REF_ST_SHIFT 0
+
+
+/*Register VRTC (0x80) register.RegisterDescription */
+#define VRTC_VRTC_OFFMASK_MASK 0x08
+#define VRTC_VRTC_OFFMASK_SHIFT 3
+#define VRTC_ST_MASK 0x03
+#define VRTC_ST_SHIFT 0
+
+
+/*Register VIO (0x80) register.RegisterDescription */
+#define VIO_ILMAX_MASK 0xC0
+#define VIO_ILMAX_SHIFT 6
+#define VIO_SEL_MASK 0x0C
+#define VIO_SEL_SHIFT 2
+#define VIO_ST_MASK 0x03
+#define VIO_ST_SHIFT 0
+
+
+/*Register VDD1 (0x80) register.RegisterDescription */
+#define VDD1_VGAIN_SEL_MASK 0xC0
+#define VDD1_VGAIN_SEL_SHIFT 6
+#define VDD1_ILMAX_MASK 0x20
+#define VDD1_ILMAX_SHIFT 5
+#define VDD1_TSTEP_MASK 0x1C
+#define VDD1_TSTEP_SHIFT 2
+#define VDD1_ST_MASK 0x03
+#define VDD1_ST_SHIFT 0
+
+
+/*Register VDD1_OP (0x80) register.RegisterDescription */
+#define VDD1_OP_CMD_MASK 0x80
+#define VDD1_OP_CMD_SHIFT 7
+#define VDD1_OP_SEL_MASK 0x7F
+#define VDD1_OP_SEL_SHIFT 0
+
+
+/*Register VDD1_SR (0x80) register.RegisterDescription */
+#define VDD1_SR_SEL_MASK 0x7F
+#define VDD1_SR_SEL_SHIFT 0
+
+
+/*Register VDD2 (0x80) register.RegisterDescription */
+#define VDD2_VGAIN_SEL_MASK 0xC0
+#define VDD2_VGAIN_SEL_SHIFT 6
+#define VDD2_ILMAX_MASK 0x20
+#define VDD2_ILMAX_SHIFT 5
+#define VDD2_TSTEP_MASK 0x1C
+#define VDD2_TSTEP_SHIFT 2
+#define VDD2_ST_MASK 0x03
+#define VDD2_ST_SHIFT 0
+
+
+/*Register VDD2_OP (0x80) register.RegisterDescription */
+#define VDD2_OP_CMD_MASK 0x80
+#define VDD2_OP_CMD_SHIFT 7
+#define VDD2_OP_SEL_MASK 0x7F
+#define VDD2_OP_SEL_SHIFT 0
+
+/*Register VDD2_SR (0x80) register.RegisterDescription */
+#define VDD2_SR_SEL_MASK 0x7F
+#define VDD2_SR_SEL_SHIFT 0
+
+
+/*Registers VDD1, VDD2 voltage values definitions */
+#define VDD1_2_NUM_VOLTS 73
+#define VDD1_2_MIN_VOLT 6000
+#define VDD1_2_OFFSET 125
+
+
+/*Register VDD3 (0x80) register.RegisterDescription */
+#define VDD3_CKINEN_MASK 0x04
+#define VDD3_CKINEN_SHIFT 2
+#define VDD3_ST_MASK 0x03
+#define VDD3_ST_SHIFT 0
+#define VDDCTRL_MIN_VOLT 6000
+#define VDDCTRL_OFFSET 125
+
+/*Registers VDIG (0x80) to VDAC register.RegisterDescription */
+#define LDO_SEL_MASK 0x0C
+#define LDO_SEL_SHIFT 2
+#define LDO_ST_MASK 0x03
+#define LDO_ST_SHIFT 0
+#define LDO_ST_ON_BIT 0x01
+#define LDO_ST_MODE_BIT 0x02
+
+
+/* Registers LDO1 to LDO8 in tps65910 */
+#define LDO1_SEL_MASK 0xFC
+#define LDO3_SEL_MASK 0x7C
+#define LDO_MIN_VOLT 1000
+#define LDO_MAX_VOLT 3300;
+
+
+/*Register VDIG1 (0x80) register.RegisterDescription */
+#define VDIG1_SEL_MASK 0x0C
+#define VDIG1_SEL_SHIFT 2
+#define VDIG1_ST_MASK 0x03
+#define VDIG1_ST_SHIFT 0
+
+
+/*Register VDIG2 (0x80) register.RegisterDescription */
+#define VDIG2_SEL_MASK 0x0C
+#define VDIG2_SEL_SHIFT 2
+#define VDIG2_ST_MASK 0x03
+#define VDIG2_ST_SHIFT 0
+
+
+/*Register VAUX1 (0x80) register.RegisterDescription */
+#define VAUX1_SEL_MASK 0x0C
+#define VAUX1_SEL_SHIFT 2
+#define VAUX1_ST_MASK 0x03
+#define VAUX1_ST_SHIFT 0
+
+
+/*Register VAUX2 (0x80) register.RegisterDescription */
+#define VAUX2_SEL_MASK 0x0C
+#define VAUX2_SEL_SHIFT 2
+#define VAUX2_ST_MASK 0x03
+#define VAUX2_ST_SHIFT 0
+
+
+/*Register VAUX33 (0x80) register.RegisterDescription */
+#define VAUX33_SEL_MASK 0x0C
+#define VAUX33_SEL_SHIFT 2
+#define VAUX33_ST_MASK 0x03
+#define VAUX33_ST_SHIFT 0
+
+
+/*Register VMMC (0x80) register.RegisterDescription */
+#define VMMC_SEL_MASK 0x0C
+#define VMMC_SEL_SHIFT 2
+#define VMMC_ST_MASK 0x03
+#define VMMC_ST_SHIFT 0
+
+
+/*Register VPLL (0x80) register.RegisterDescription */
+#define VPLL_SEL_MASK 0x0C
+#define VPLL_SEL_SHIFT 2
+#define VPLL_ST_MASK 0x03
+#define VPLL_ST_SHIFT 0
+
+
+/*Register VDAC (0x80) register.RegisterDescription */
+#define VDAC_SEL_MASK 0x0C
+#define VDAC_SEL_SHIFT 2
+#define VDAC_ST_MASK 0x03
+#define VDAC_ST_SHIFT 0
+
+
+/*Register THERM (0x80) register.RegisterDescription */
+#define THERM_THERM_HD_MASK 0x20
+#define THERM_THERM_HD_SHIFT 5
+#define THERM_THERM_TS_MASK 0x10
+#define THERM_THERM_TS_SHIFT 4
+#define THERM_THERM_HDSEL_MASK 0x0C
+#define THERM_THERM_HDSEL_SHIFT 2
+#define THERM_RSVD1_MASK 0x02
+#define THERM_RSVD1_SHIFT 1
+#define THERM_THERM_STATE_MASK 0x01
+#define THERM_THERM_STATE_SHIFT 0
+
+
+/*Register BBCH (0x80) register.RegisterDescription */
+#define BBCH_BBSEL_MASK 0x06
+#define BBCH_BBSEL_SHIFT 1
+#define BBCH_BBCHEN_MASK 0x01
+#define BBCH_BBCHEN_SHIFT 0
+
+
+/*Register DCDCCTRL (0x80) register.RegisterDescription */
+#define DCDCCTRL_VDD2_PSKIP_MASK 0x20
+#define DCDCCTRL_VDD2_PSKIP_SHIFT 5
+#define DCDCCTRL_VDD1_PSKIP_MASK 0x10
+#define DCDCCTRL_VDD1_PSKIP_SHIFT 4
+#define DCDCCTRL_VIO_PSKIP_MASK 0x08
+#define DCDCCTRL_VIO_PSKIP_SHIFT 3
+#define DCDCCTRL_DCDCCKEXT_MASK 0x04
+#define DCDCCTRL_DCDCCKEXT_SHIFT 2
+#define DCDCCTRL_DCDCCKSYNC_MASK 0x03
+#define DCDCCTRL_DCDCCKSYNC_SHIFT 0
+
+
+/*Register DEVCTRL (0x80) register.RegisterDescription */
+#define DEVCTRL_RTC_PWDN_MASK 0x40
+#define DEVCTRL_RTC_PWDN_SHIFT 6
+#define DEVCTRL_CK32K_CTRL_MASK 0x20
+#define DEVCTRL_CK32K_CTRL_SHIFT 5
+#define DEVCTRL_SR_CTL_I2C_SEL_MASK 0x10
+#define DEVCTRL_SR_CTL_I2C_SEL_SHIFT 4
+#define DEVCTRL_DEV_OFF_RST_MASK 0x08
+#define DEVCTRL_DEV_OFF_RST_SHIFT 3
+#define DEVCTRL_DEV_ON_MASK 0x04
+#define DEVCTRL_DEV_ON_SHIFT 2
+#define DEVCTRL_DEV_SLP_MASK 0x02
+#define DEVCTRL_DEV_SLP_SHIFT 1
+#define DEVCTRL_DEV_OFF_MASK 0x01
+#define DEVCTRL_DEV_OFF_SHIFT 0
+
+
+/*Register DEVCTRL2 (0x80) register.RegisterDescription */
+#define DEVCTRL2_TSLOT_LENGTH_MASK 0x30
+#define DEVCTRL2_TSLOT_LENGTH_SHIFT 4
+#define DEVCTRL2_SLEEPSIG_POL_MASK 0x08
+#define DEVCTRL2_SLEEPSIG_POL_SHIFT 3
+#define DEVCTRL2_PWON_LP_OFF_MASK 0x04
+#define DEVCTRL2_PWON_LP_OFF_SHIFT 2
+#define DEVCTRL2_PWON_LP_RST_MASK 0x02
+#define DEVCTRL2_PWON_LP_RST_SHIFT 1
+#define DEVCTRL2_IT_POL_MASK 0x01
+#define DEVCTRL2_IT_POL_SHIFT 0
+
+
+/*Register SLEEP_KEEP_LDO_ON (0x80) register.RegisterDescription */
+#define SLEEP_KEEP_LDO_ON_VDAC_KEEPON_MASK 0x80
+#define SLEEP_KEEP_LDO_ON_VDAC_KEEPON_SHIFT 7
+#define SLEEP_KEEP_LDO_ON_VPLL_KEEPON_MASK 0x40
+#define SLEEP_KEEP_LDO_ON_VPLL_KEEPON_SHIFT 6
+#define SLEEP_KEEP_LDO_ON_VAUX33_KEEPON_MASK 0x20
+#define SLEEP_KEEP_LDO_ON_VAUX33_KEEPON_SHIFT 5
+#define SLEEP_KEEP_LDO_ON_VAUX2_KEEPON_MASK 0x10
+#define SLEEP_KEEP_LDO_ON_VAUX2_KEEPON_SHIFT 4
+#define SLEEP_KEEP_LDO_ON_VAUX1_KEEPON_MASK 0x08
+#define SLEEP_KEEP_LDO_ON_VAUX1_KEEPON_SHIFT 3
+#define SLEEP_KEEP_LDO_ON_VDIG2_KEEPON_MASK 0x04
+#define SLEEP_KEEP_LDO_ON_VDIG2_KEEPON_SHIFT 2
+#define SLEEP_KEEP_LDO_ON_VDIG1_KEEPON_MASK 0x02
+#define SLEEP_KEEP_LDO_ON_VDIG1_KEEPON_SHIFT 1
+#define SLEEP_KEEP_LDO_ON_VMMC_KEEPON_MASK 0x01
+#define SLEEP_KEEP_LDO_ON_VMMC_KEEPON_SHIFT 0
+
+
+/*Register SLEEP_KEEP_RES_ON (0x80) register.RegisterDescription */
+#define SLEEP_KEEP_RES_ON_THERM_KEEPON_MASK 0x80
+#define SLEEP_KEEP_RES_ON_THERM_KEEPON_SHIFT 7
+#define SLEEP_KEEP_RES_ON_CLKOUT32K_KEEPON_MASK 0x40
+#define SLEEP_KEEP_RES_ON_CLKOUT32K_KEEPON_SHIFT 6
+#define SLEEP_KEEP_RES_ON_VRTC_KEEPON_MASK 0x20
+#define SLEEP_KEEP_RES_ON_VRTC_KEEPON_SHIFT 5
+#define SLEEP_KEEP_RES_ON_I2CHS_KEEPON_MASK 0x10
+#define SLEEP_KEEP_RES_ON_I2CHS_KEEPON_SHIFT 4
+#define SLEEP_KEEP_RES_ON_VDD3_KEEPON_MASK 0x08
+#define SLEEP_KEEP_RES_ON_VDD3_KEEPON_SHIFT 3
+#define SLEEP_KEEP_RES_ON_VDD2_KEEPON_MASK 0x04
+#define SLEEP_KEEP_RES_ON_VDD2_KEEPON_SHIFT 2
+#define SLEEP_KEEP_RES_ON_VDD1_KEEPON_MASK 0x02
+#define SLEEP_KEEP_RES_ON_VDD1_KEEPON_SHIFT 1
+#define SLEEP_KEEP_RES_ON_VIO_KEEPON_MASK 0x01
+#define SLEEP_KEEP_RES_ON_VIO_KEEPON_SHIFT 0
+
+
+/*Register SLEEP_SET_LDO_OFF (0x80) register.RegisterDescription */
+#define SLEEP_SET_LDO_OFF_VDAC_SETOFF_MASK 0x80
+#define SLEEP_SET_LDO_OFF_VDAC_SETOFF_SHIFT 7
+#define SLEEP_SET_LDO_OFF_VPLL_SETOFF_MASK 0x40
+#define SLEEP_SET_LDO_OFF_VPLL_SETOFF_SHIFT 6
+#define SLEEP_SET_LDO_OFF_VAUX33_SETOFF_MASK 0x20
+#define SLEEP_SET_LDO_OFF_VAUX33_SETOFF_SHIFT 5
+#define SLEEP_SET_LDO_OFF_VAUX2_SETOFF_MASK 0x10
+#define SLEEP_SET_LDO_OFF_VAUX2_SETOFF_SHIFT 4
+#define SLEEP_SET_LDO_OFF_VAUX1_SETOFF_MASK 0x08
+#define SLEEP_SET_LDO_OFF_VAUX1_SETOFF_SHIFT 3
+#define SLEEP_SET_LDO_OFF_VDIG2_SETOFF_MASK 0x04
+#define SLEEP_SET_LDO_OFF_VDIG2_SETOFF_SHIFT 2
+#define SLEEP_SET_LDO_OFF_VDIG1_SETOFF_MASK 0x02
+#define SLEEP_SET_LDO_OFF_VDIG1_SETOFF_SHIFT 1
+#define SLEEP_SET_LDO_OFF_VMMC_SETOFF_MASK 0x01
+#define SLEEP_SET_LDO_OFF_VMMC_SETOFF_SHIFT 0
+
+
+/*Register SLEEP_SET_RES_OFF (0x80) register.RegisterDescription */
+#define SLEEP_SET_RES_OFF_DEFAULT_VOLT_MASK 0x80
+#define SLEEP_SET_RES_OFF_DEFAULT_VOLT_SHIFT 7
+#define SLEEP_SET_RES_OFF_RSVD_MASK 0x60
+#define SLEEP_SET_RES_OFF_RSVD_SHIFT 5
+#define SLEEP_SET_RES_OFF_SPARE_SETOFF_MASK 0x10
+#define SLEEP_SET_RES_OFF_SPARE_SETOFF_SHIFT 4
+#define SLEEP_SET_RES_OFF_VDD3_SETOFF_MASK 0x08
+#define SLEEP_SET_RES_OFF_VDD3_SETOFF_SHIFT 3
+#define SLEEP_SET_RES_OFF_VDD2_SETOFF_MASK 0x04
+#define SLEEP_SET_RES_OFF_VDD2_SETOFF_SHIFT 2
+#define SLEEP_SET_RES_OFF_VDD1_SETOFF_MASK 0x02
+#define SLEEP_SET_RES_OFF_VDD1_SETOFF_SHIFT 1
+#define SLEEP_SET_RES_OFF_VIO_SETOFF_MASK 0x01
+#define SLEEP_SET_RES_OFF_VIO_SETOFF_SHIFT 0
+
+
+/*Register EN1_LDO_ASS (0x80) register.RegisterDescription */
+#define EN1_LDO_ASS_VDAC_EN1_MASK 0x80
+#define EN1_LDO_ASS_VDAC_EN1_SHIFT 7
+#define EN1_LDO_ASS_VPLL_EN1_MASK 0x40
+#define EN1_LDO_ASS_VPLL_EN1_SHIFT 6
+#define EN1_LDO_ASS_VAUX33_EN1_MASK 0x20
+#define EN1_LDO_ASS_VAUX33_EN1_SHIFT 5
+#define EN1_LDO_ASS_VAUX2_EN1_MASK 0x10
+#define EN1_LDO_ASS_VAUX2_EN1_SHIFT 4
+#define EN1_LDO_ASS_VAUX1_EN1_MASK 0x08
+#define EN1_LDO_ASS_VAUX1_EN1_SHIFT 3
+#define EN1_LDO_ASS_VDIG2_EN1_MASK 0x04
+#define EN1_LDO_ASS_VDIG2_EN1_SHIFT 2
+#define EN1_LDO_ASS_VDIG1_EN1_MASK 0x02
+#define EN1_LDO_ASS_VDIG1_EN1_SHIFT 1
+#define EN1_LDO_ASS_VMMC_EN1_MASK 0x01
+#define EN1_LDO_ASS_VMMC_EN1_SHIFT 0
+
+
+/*Register EN1_SMPS_ASS (0x80) register.RegisterDescription */
+#define EN1_SMPS_ASS_RSVD_MASK 0xE0
+#define EN1_SMPS_ASS_RSVD_SHIFT 5
+#define EN1_SMPS_ASS_SPARE_EN1_MASK 0x10
+#define EN1_SMPS_ASS_SPARE_EN1_SHIFT 4
+#define EN1_SMPS_ASS_VDD3_EN1_MASK 0x08
+#define EN1_SMPS_ASS_VDD3_EN1_SHIFT 3
+#define EN1_SMPS_ASS_VDD2_EN1_MASK 0x04
+#define EN1_SMPS_ASS_VDD2_EN1_SHIFT 2
+#define EN1_SMPS_ASS_VDD1_EN1_MASK 0x02
+#define EN1_SMPS_ASS_VDD1_EN1_SHIFT 1
+#define EN1_SMPS_ASS_VIO_EN1_MASK 0x01
+#define EN1_SMPS_ASS_VIO_EN1_SHIFT 0
+
+
+/*Register EN2_LDO_ASS (0x80) register.RegisterDescription */
+#define EN2_LDO_ASS_VDAC_EN2_MASK 0x80
+#define EN2_LDO_ASS_VDAC_EN2_SHIFT 7
+#define EN2_LDO_ASS_VPLL_EN2_MASK 0x40
+#define EN2_LDO_ASS_VPLL_EN2_SHIFT 6
+#define EN2_LDO_ASS_VAUX33_EN2_MASK 0x20
+#define EN2_LDO_ASS_VAUX33_EN2_SHIFT 5
+#define EN2_LDO_ASS_VAUX2_EN2_MASK 0x10
+#define EN2_LDO_ASS_VAUX2_EN2_SHIFT 4
+#define EN2_LDO_ASS_VAUX1_EN2_MASK 0x08
+#define EN2_LDO_ASS_VAUX1_EN2_SHIFT 3
+#define EN2_LDO_ASS_VDIG2_EN2_MASK 0x04
+#define EN2_LDO_ASS_VDIG2_EN2_SHIFT 2
+#define EN2_LDO_ASS_VDIG1_EN2_MASK 0x02
+#define EN2_LDO_ASS_VDIG1_EN2_SHIFT 1
+#define EN2_LDO_ASS_VMMC_EN2_MASK 0x01
+#define EN2_LDO_ASS_VMMC_EN2_SHIFT 0
+
+
+/*Register EN2_SMPS_ASS (0x80) register.RegisterDescription */
+#define EN2_SMPS_ASS_RSVD_MASK 0xE0
+#define EN2_SMPS_ASS_RSVD_SHIFT 5
+#define EN2_SMPS_ASS_SPARE_EN2_MASK 0x10
+#define EN2_SMPS_ASS_SPARE_EN2_SHIFT 4
+#define EN2_SMPS_ASS_VDD3_EN2_MASK 0x08
+#define EN2_SMPS_ASS_VDD3_EN2_SHIFT 3
+#define EN2_SMPS_ASS_VDD2_EN2_MASK 0x04
+#define EN2_SMPS_ASS_VDD2_EN2_SHIFT 2
+#define EN2_SMPS_ASS_VDD1_EN2_MASK 0x02
+#define EN2_SMPS_ASS_VDD1_EN2_SHIFT 1
+#define EN2_SMPS_ASS_VIO_EN2_MASK 0x01
+#define EN2_SMPS_ASS_VIO_EN2_SHIFT 0
+
+
+/*Register EN3_LDO_ASS (0x80) register.RegisterDescription */
+#define EN3_LDO_ASS_VDAC_EN3_MASK 0x80
+#define EN3_LDO_ASS_VDAC_EN3_SHIFT 7
+#define EN3_LDO_ASS_VPLL_EN3_MASK 0x40
+#define EN3_LDO_ASS_VPLL_EN3_SHIFT 6
+#define EN3_LDO_ASS_VAUX33_EN3_MASK 0x20
+#define EN3_LDO_ASS_VAUX33_EN3_SHIFT 5
+#define EN3_LDO_ASS_VAUX2_EN3_MASK 0x10
+#define EN3_LDO_ASS_VAUX2_EN3_SHIFT 4
+#define EN3_LDO_ASS_VAUX1_EN3_MASK 0x08
+#define EN3_LDO_ASS_VAUX1_EN3_SHIFT 3
+#define EN3_LDO_ASS_VDIG2_EN3_MASK 0x04
+#define EN3_LDO_ASS_VDIG2_EN3_SHIFT 2
+#define EN3_LDO_ASS_VDIG1_EN3_MASK 0x02
+#define EN3_LDO_ASS_VDIG1_EN3_SHIFT 1
+#define EN3_LDO_ASS_VMMC_EN3_MASK 0x01
+#define EN3_LDO_ASS_VMMC_EN3_SHIFT 0
+
+
+/*Register SPARE (0x80) register.RegisterDescription */
+#define SPARE_SPARE_MASK 0xFF
+#define SPARE_SPARE_SHIFT 0
+
+
+/*Register INT_STS (0x80) register.RegisterDescription */
+#define INT_STS_RTC_PERIOD_IT_MASK 0x80
+#define INT_STS_RTC_PERIOD_IT_SHIFT 7
+#define INT_STS_RTC_ALARM_IT_MASK 0x40
+#define INT_STS_RTC_ALARM_IT_SHIFT 6
+#define INT_STS_HOTDIE_IT_MASK 0x20
+#define INT_STS_HOTDIE_IT_SHIFT 5
+#define INT_STS_PWRHOLD_IT_MASK 0x10
+#define INT_STS_PWRHOLD_IT_SHIFT 4
+#define INT_STS_PWRON_LP_IT_MASK 0x08
+#define INT_STS_PWRON_LP_IT_SHIFT 3
+#define INT_STS_PWRON_IT_MASK 0x04
+#define INT_STS_PWRON_IT_SHIFT 2
+#define INT_STS_VMBHI_IT_MASK 0x02
+#define INT_STS_VMBHI_IT_SHIFT 1
+#define INT_STS_VMBDCH_IT_MASK 0x01
+#define INT_STS_VMBDCH_IT_SHIFT 0
+
+
+/*Register INT_MSK (0x80) register.RegisterDescription */
+#define INT_MSK_RTC_PERIOD_IT_MSK_MASK 0x80
+#define INT_MSK_RTC_PERIOD_IT_MSK_SHIFT 7
+#define INT_MSK_RTC_ALARM_IT_MSK_MASK 0x40
+#define INT_MSK_RTC_ALARM_IT_MSK_SHIFT 6
+#define INT_MSK_HOTDIE_IT_MSK_MASK 0x20
+#define INT_MSK_HOTDIE_IT_MSK_SHIFT 5
+#define INT_MSK_PWRHOLD_IT_MSK_MASK 0x10
+#define INT_MSK_PWRHOLD_IT_MSK_SHIFT 4
+#define INT_MSK_PWRON_LP_IT_MSK_MASK 0x08
+#define INT_MSK_PWRON_LP_IT_MSK_SHIFT 3
+#define INT_MSK_PWRON_IT_MSK_MASK 0x04
+#define INT_MSK_PWRON_IT_MSK_SHIFT 2
+#define INT_MSK_VMBHI_IT_MSK_MASK 0x02
+#define INT_MSK_VMBHI_IT_MSK_SHIFT 1
+#define INT_MSK_VMBDCH_IT_MSK_MASK 0x01
+#define INT_MSK_VMBDCH_IT_MSK_SHIFT 0
+
+
+/*Register INT_STS2 (0x80) register.RegisterDescription */
+#define INT_STS2_GPIO3_F_IT_MASK 0x80
+#define INT_STS2_GPIO3_F_IT_SHIFT 7
+#define INT_STS2_GPIO3_R_IT_MASK 0x40
+#define INT_STS2_GPIO3_R_IT_SHIFT 6
+#define INT_STS2_GPIO2_F_IT_MASK 0x20
+#define INT_STS2_GPIO2_F_IT_SHIFT 5
+#define INT_STS2_GPIO2_R_IT_MASK 0x10
+#define INT_STS2_GPIO2_R_IT_SHIFT 4
+#define INT_STS2_GPIO1_F_IT_MASK 0x08
+#define INT_STS2_GPIO1_F_IT_SHIFT 3
+#define INT_STS2_GPIO1_R_IT_MASK 0x04
+#define INT_STS2_GPIO1_R_IT_SHIFT 2
+#define INT_STS2_GPIO0_F_IT_MASK 0x02
+#define INT_STS2_GPIO0_F_IT_SHIFT 1
+#define INT_STS2_GPIO0_R_IT_MASK 0x01
+#define INT_STS2_GPIO0_R_IT_SHIFT 0
+
+
+/*Register INT_MSK2 (0x80) register.RegisterDescription */
+#define INT_MSK2_GPIO3_F_IT_MSK_MASK 0x80
+#define INT_MSK2_GPIO3_F_IT_MSK_SHIFT 7
+#define INT_MSK2_GPIO3_R_IT_MSK_MASK 0x40
+#define INT_MSK2_GPIO3_R_IT_MSK_SHIFT 6
+#define INT_MSK2_GPIO2_F_IT_MSK_MASK 0x20
+#define INT_MSK2_GPIO2_F_IT_MSK_SHIFT 5
+#define INT_MSK2_GPIO2_R_IT_MSK_MASK 0x10
+#define INT_MSK2_GPIO2_R_IT_MSK_SHIFT 4
+#define INT_MSK2_GPIO1_F_IT_MSK_MASK 0x08
+#define INT_MSK2_GPIO1_F_IT_MSK_SHIFT 3
+#define INT_MSK2_GPIO1_R_IT_MSK_MASK 0x04
+#define INT_MSK2_GPIO1_R_IT_MSK_SHIFT 2
+#define INT_MSK2_GPIO0_F_IT_MSK_MASK 0x02
+#define INT_MSK2_GPIO0_F_IT_MSK_SHIFT 1
+#define INT_MSK2_GPIO0_R_IT_MSK_MASK 0x01
+#define INT_MSK2_GPIO0_R_IT_MSK_SHIFT 0
+
+
+/*Register INT_STS3 (0x80) register.RegisterDescription */
+#define INT_STS3_GPIO5_F_IT_MASK 0x08
+#define INT_STS3_GPIO5_F_IT_SHIFT 3
+#define INT_STS3_GPIO5_R_IT_MASK 0x04
+#define INT_STS3_GPIO5_R_IT_SHIFT 2
+#define INT_STS3_GPIO4_F_IT_MASK 0x02
+#define INT_STS3_GPIO4_F_IT_SHIFT 1
+#define INT_STS3_GPIO4_R_IT_MASK 0x01
+#define INT_STS3_GPIO4_R_IT_SHIFT 0
+
+
+/*Register INT_MSK3 (0x80) register.RegisterDescription */
+#define INT_MSK3_GPIO5_F_IT_MSK_MASK 0x08
+#define INT_MSK3_GPIO5_F_IT_MSK_SHIFT 3
+#define INT_MSK3_GPIO5_R_IT_MSK_MASK 0x04
+#define INT_MSK3_GPIO5_R_IT_MSK_SHIFT 2
+#define INT_MSK3_GPIO4_F_IT_MSK_MASK 0x02
+#define INT_MSK3_GPIO4_F_IT_MSK_SHIFT 1
+#define INT_MSK3_GPIO4_R_IT_MSK_MASK 0x01
+#define INT_MSK3_GPIO4_R_IT_MSK_SHIFT 0
+
+
+/*Register GPIO (0x80) register.RegisterDescription */
+#define GPIO_DEB_MASK 0x10
+#define GPIO_DEB_SHIFT 4
+#define GPIO_PUEN_MASK 0x08
+#define GPIO_PUEN_SHIFT 3
+#define GPIO_CFG_MASK 0x04
+#define GPIO_CFG_SHIFT 2
+#define GPIO_STS_MASK 0x02
+#define GPIO_STS_SHIFT 1
+#define GPIO_SET_MASK 0x01
+#define GPIO_SET_SHIFT 0
+
+
+/*Register JTAGVERNUM (0x80) register.RegisterDescription */
+#define JTAGVERNUM_VERNUM_MASK 0x0F
+#define JTAGVERNUM_VERNUM_SHIFT 0
+
+
+/* Register VDDCTRL (0x27) bit definitions */
+#define VDDCTRL_ST_MASK 0x03
+#define VDDCTRL_ST_SHIFT 0
+
+
+/*Register VDDCTRL_OP (0x28) bit definitios */
+#define VDDCTRL_OP_CMD_MASK 0x80
+#define VDDCTRL_OP_CMD_SHIFT 7
+#define VDDCTRL_OP_SEL_MASK 0x7F
+#define VDDCTRL_OP_SEL_SHIFT 0
+
+
+/*Register VDDCTRL_SR (0x29) bit definitions */
+#define VDDCTRL_SR_SEL_MASK 0x7F
+#define VDDCTRL_SR_SEL_SHIFT 0
+
+
+/* IRQ Definitions */
+#define TPS65910_IRQ_VBAT_VMBDCH 0
+#define TPS65910_IRQ_VBAT_VMHI 1
+#define TPS65910_IRQ_PWRON 2
+#define TPS65910_IRQ_PWRON_LP 3
+#define TPS65910_IRQ_PWRHOLD 4
+#define TPS65910_IRQ_HOTDIE 5
+#define TPS65910_IRQ_RTC_ALARM 6
+#define TPS65910_IRQ_RTC_PERIOD 7
+#define TPS65910_IRQ_GPIO_R 8
+#define TPS65910_IRQ_GPIO_F 9
+#define TPS65910_NUM_IRQ 10
+
+#define TPS65911_IRQ_VBAT_VMBDCH 0
+#define TPS65911_IRQ_VBAT_VMBDCH2L 1
+#define TPS65911_IRQ_VBAT_VMBDCH2H 2
+#define TPS65911_IRQ_VBAT_VMHI 3
+#define TPS65911_IRQ_PWRON 4
+#define TPS65911_IRQ_PWRON_LP 5
+#define TPS65911_IRQ_PWRHOLD_F 6
+#define TPS65911_IRQ_PWRHOLD_R 7
+#define TPS65911_IRQ_HOTDIE 8
+#define TPS65911_IRQ_RTC_ALARM 9
+#define TPS65911_IRQ_RTC_PERIOD 10
+#define TPS65911_IRQ_GPIO0_R 11
+#define TPS65911_IRQ_GPIO0_F 12
+#define TPS65911_IRQ_GPIO1_R 13
+#define TPS65911_IRQ_GPIO1_F 14
+#define TPS65911_IRQ_GPIO2_R 15
+#define TPS65911_IRQ_GPIO2_F 16
+#define TPS65911_IRQ_GPIO3_R 17
+#define TPS65911_IRQ_GPIO3_F 18
+#define TPS65911_IRQ_GPIO4_R 19
+#define TPS65911_IRQ_GPIO4_F 20
+#define TPS65911_IRQ_GPIO5_R 21
+#define TPS65911_IRQ_GPIO5_F 22
+#define TPS65911_IRQ_WTCHDG 23
+#define TPS65911_IRQ_PWRDN 24
+
+#define TPS65911_NUM_IRQ 25
+
+
+/* GPIO Register Definitions */
+#define TPS65910_GPIO_DEB BIT(2)
+#define TPS65910_GPIO_PUEN BIT(3)
+#define TPS65910_GPIO_CFG BIT(2)
+#define TPS65910_GPIO_STS BIT(1)
+#define TPS65910_GPIO_SET BIT(0)
+
+/**
+ * struct tps65910_board
+ * Board platform data may be used to initialize regulators.
+ */
+
+struct tps65910_board {
+ int gpio_base;
+ int irq;
+ int irq_base;
+ int vmbch_threshold;
+ int vmbch2_threshold;
+ struct regulator_init_data *tps65910_pmic_init_data;
+};
+
+/**
+ * struct tps65910 - tps65910 sub-driver chip access routines
+ */
+
+struct tps65910 {
+ struct device *dev;
+ struct i2c_client *i2c_client;
+ struct mutex io_mutex;
+ unsigned int id;
+ int (*read)(struct tps65910 *tps65910, u8 reg, int size, void *dest);
+ int (*write)(struct tps65910 *tps65910, u8 reg, int size, void *src);
+
+ /* Client devices */
+ struct tps65910_pmic *pmic;
+ struct tps65910_rtc *rtc;
+ struct tps65910_power *power;
+
+ /* GPIO Handling */
+ struct gpio_chip gpio;
+
+ /* IRQ Handling */
+ struct mutex irq_lock;
+ int chip_irq;
+ int irq_base;
+ int irq_num;
+ u32 irq_mask;
+};
+
+struct tps65910_platform_data {
+ int irq;
+ int irq_base;
+};
+
+int tps65910_set_bits(struct tps65910 *tps65910, u8 reg, u8 mask);
+int tps65910_clear_bits(struct tps65910 *tps65910, u8 reg, u8 mask);
+void tps65910_gpio_init(struct tps65910 *tps65910, int gpio_base);
+int tps65910_irq_init(struct tps65910 *tps65910, int irq,
+ struct tps65910_platform_data *pdata);
+
+static inline int tps65910_chip_id(struct tps65910 *tps65910)
+{
+ return tps65910->id;
+}
+
+#endif /* __LINUX_MFD_TPS65910_H */
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 29312bdf119f..c928dac6cad0 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -1051,12 +1051,14 @@ static inline struct mem_section *__pfn_to_section(unsigned long pfn)
return __nr_to_section(pfn_to_section_nr(pfn));
}
+#ifndef CONFIG_HAVE_ARCH_PFN_VALID
static inline int pfn_valid(unsigned long pfn)
{
if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
return 0;
return valid_section(__nr_to_section(pfn_to_section_nr(pfn)));
}
+#endif
static inline int pfn_present(unsigned long pfn)
{
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index 9d5306bad117..2541fb848daa 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -322,9 +322,12 @@ static inline uint32_t mtd_mod_by_ws(uint64_t sz, struct mtd_info *mtd)
/* Kernel-side ioctl definitions */
-extern int add_mtd_device(struct mtd_info *mtd);
-extern int del_mtd_device (struct mtd_info *mtd);
+struct mtd_partition;
+extern int mtd_device_register(struct mtd_info *master,
+ const struct mtd_partition *parts,
+ int nr_parts);
+extern int mtd_device_unregister(struct mtd_info *master);
extern struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num);
extern int __get_mtd_device(struct mtd_info *mtd);
extern void __put_mtd_device(struct mtd_info *mtd);
@@ -348,15 +351,9 @@ int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
int default_mtd_readv(struct mtd_info *mtd, struct kvec *vecs,
unsigned long count, loff_t from, size_t *retlen);
-#ifdef CONFIG_MTD_PARTITIONS
+void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size);
+
void mtd_erase_callback(struct erase_info *instr);
-#else
-static inline void mtd_erase_callback(struct erase_info *instr)
-{
- if (instr->callback)
- instr->callback(instr);
-}
-#endif
/*
* Debugging macro and defines
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index d44192740f6f..c2b9ac4fbc4a 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -237,9 +237,9 @@ typedef enum {
* If passed additionally to NAND_USE_FLASH_BBT then BBT code will not touch
* the OOB area.
*/
-#define NAND_USE_FLASH_BBT_NO_OOB 0x00100000
+#define NAND_USE_FLASH_BBT_NO_OOB 0x00800000
/* Create an empty BBT with no vendor information if the BBT is available */
-#define NAND_CREATE_EMPTY_BBT 0x00200000
+#define NAND_CREATE_EMPTY_BBT 0x01000000
/* Options set by nand scan */
/* Nand scan has allocated controller struct */
diff --git a/include/linux/mtd/partitions.h b/include/linux/mtd/partitions.h
index 4a0a8ba90a72..3a6f0372fc96 100644
--- a/include/linux/mtd/partitions.h
+++ b/include/linux/mtd/partitions.h
@@ -16,7 +16,7 @@
* Partition definition structure:
*
* An array of struct partition is passed along with a MTD object to
- * add_mtd_partitions() to create them.
+ * mtd_device_register() to create them.
*
* For each partition, these fields are available:
* name: string that will be used to label the partition's MTD device.
@@ -49,9 +49,6 @@ struct mtd_partition {
struct mtd_info;
-int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *, int);
-int del_mtd_partitions(struct mtd_info *);
-
/*
* Functions dealing with the various ways of partitioning the space
*/
@@ -73,14 +70,17 @@ extern int parse_mtd_partitions(struct mtd_info *master, const char **types,
struct device;
struct device_node;
+#ifdef CONFIG_MTD_OF_PARTS
int __devinit of_mtd_parse_partitions(struct device *dev,
struct device_node *node,
struct mtd_partition **pparts);
-
-#ifdef CONFIG_MTD_PARTITIONS
-static inline int mtd_has_partitions(void) { return 1; }
#else
-static inline int mtd_has_partitions(void) { return 0; }
+static inline int of_mtd_parse_partitions(struct device *dev,
+ struct device_node *node,
+ struct mtd_partition **pparts)
+{
+ return 0;
+}
#endif
#ifdef CONFIG_MTD_CMDLINE_PARTS
diff --git a/include/linux/mtd/physmap.h b/include/linux/mtd/physmap.h
index 49b959029417..d40bfa1d9c91 100644
--- a/include/linux/mtd/physmap.h
+++ b/include/linux/mtd/physmap.h
@@ -37,8 +37,6 @@ struct physmap_flash_data {
void physmap_configure(unsigned long addr, unsigned long size,
int bankwidth, void (*set_vpp)(struct map_info *, int) );
-#ifdef CONFIG_MTD_PARTITIONS
-
/*
* Machines that wish to do flash partition may want to call this function in
* their setup routine.
@@ -50,6 +48,4 @@ void physmap_configure(unsigned long addr, unsigned long size,
*/
void physmap_set_partitions(struct mtd_partition *parts, int num_parts);
-#endif /* defined(CONFIG_MTD_PARTITIONS) */
-
#endif /* __LINUX_MTD_PHYSMAP__ */
diff --git a/include/linux/net.h b/include/linux/net.h
index 1da55e9b6f01..b29923006b11 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -289,11 +289,5 @@ extern int kernel_sock_shutdown(struct socket *sock,
MODULE_ALIAS("net-pf-" __stringify(pf) "-proto-" __stringify(proto) \
"-type-" __stringify(type))
-#ifdef CONFIG_SYSCTL
-#include <linux/sysctl.h>
-#include <linux/ratelimit.h>
-extern struct ratelimit_state net_ratelimit_state;
-#endif
-
#endif /* __KERNEL__ */
#endif /* _LINUX_NET_H */
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 7fa95df60146..857f5026ced6 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -13,6 +13,7 @@
#endif
#include <linux/types.h>
#include <linux/compiler.h>
+#include <linux/sysctl.h>
/* Responses from hook functions. */
#define NF_DROP 0
diff --git a/include/linux/netfilter/ipset/ip_set_ahash.h b/include/linux/netfilter/ipset/ip_set_ahash.h
index a0196ac79051..ac3c822eb39a 100644
--- a/include/linux/netfilter/ipset/ip_set_ahash.h
+++ b/include/linux/netfilter/ipset/ip_set_ahash.h
@@ -839,7 +839,7 @@ type_pf_tdel(struct ip_set *set, void *value, u32 timeout)
struct htable *t = h->table;
const struct type_pf_elem *d = value;
struct hbucket *n;
- int i, ret = 0;
+ int i;
struct type_pf_elem *data;
u32 key;
@@ -850,7 +850,7 @@ type_pf_tdel(struct ip_set *set, void *value, u32 timeout)
if (!type_pf_data_equal(data, d))
continue;
if (type_pf_data_expired(data))
- ret = -IPSET_ERR_EXIST;
+ return -IPSET_ERR_EXIST;
if (i != n->pos - 1)
/* Not last one */
type_pf_data_copy(data, ahash_tdata(n, n->pos - 1));
diff --git a/include/linux/netfilter/ipset/ip_set_timeout.h b/include/linux/netfilter/ipset/ip_set_timeout.h
index 9f30c5f2ec1c..bcdd40ad39ed 100644
--- a/include/linux/netfilter/ipset/ip_set_timeout.h
+++ b/include/linux/netfilter/ipset/ip_set_timeout.h
@@ -45,7 +45,7 @@ ip_set_timeout_test(unsigned long timeout)
{
return timeout != IPSET_ELEM_UNSET &&
(timeout == IPSET_ELEM_PERMANENT ||
- time_after(timeout, jiffies));
+ time_is_after_jiffies(timeout));
}
static inline bool
@@ -53,7 +53,7 @@ ip_set_timeout_expired(unsigned long timeout)
{
return timeout != IPSET_ELEM_UNSET &&
timeout != IPSET_ELEM_PERMANENT &&
- time_before(timeout, jiffies);
+ time_is_before_jiffies(timeout);
}
static inline unsigned long
@@ -64,7 +64,7 @@ ip_set_timeout_set(u32 timeout)
if (!timeout)
return IPSET_ELEM_PERMANENT;
- t = timeout * HZ + jiffies;
+ t = msecs_to_jiffies(timeout * 1000) + jiffies;
if (t == IPSET_ELEM_UNSET || t == IPSET_ELEM_PERMANENT)
/* Bingo! */
t++;
@@ -75,7 +75,8 @@ ip_set_timeout_set(u32 timeout)
static inline u32
ip_set_timeout_get(unsigned long timeout)
{
- return timeout == IPSET_ELEM_PERMANENT ? 0 : (timeout - jiffies)/HZ;
+ return timeout == IPSET_ELEM_PERMANENT ? 0 :
+ jiffies_to_msecs(timeout - jiffies)/1000;
}
#else
@@ -89,14 +90,14 @@ static inline bool
ip_set_timeout_test(unsigned long timeout)
{
return timeout == IPSET_ELEM_PERMANENT ||
- time_after(timeout, jiffies);
+ time_is_after_jiffies(timeout);
}
static inline bool
ip_set_timeout_expired(unsigned long timeout)
{
return timeout != IPSET_ELEM_PERMANENT &&
- time_before(timeout, jiffies);
+ time_is_before_jiffies(timeout);
}
static inline unsigned long
@@ -107,7 +108,7 @@ ip_set_timeout_set(u32 timeout)
if (!timeout)
return IPSET_ELEM_PERMANENT;
- t = timeout * HZ + jiffies;
+ t = msecs_to_jiffies(timeout * 1000) + jiffies;
if (t == IPSET_ELEM_PERMANENT)
/* Bingo! :-) */
t++;
@@ -118,7 +119,8 @@ ip_set_timeout_set(u32 timeout)
static inline u32
ip_set_timeout_get(unsigned long timeout)
{
- return timeout == IPSET_ELEM_PERMANENT ? 0 : (timeout - jiffies)/HZ;
+ return timeout == IPSET_ELEM_PERMANENT ? 0 :
+ jiffies_to_msecs(timeout - jiffies)/1000;
}
#endif /* ! IP_SET_BITMAP_TIMEOUT */
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 24787b751286..a311008af5e1 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2483,6 +2483,7 @@
#define PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MAX 0x1c5f
#define PCI_DEVICE_ID_INTEL_PATSBURG_LPC_0 0x1d40
#define PCI_DEVICE_ID_INTEL_PATSBURG_LPC_1 0x1d41
+#define PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI 0x1e31
#define PCI_DEVICE_ID_INTEL_PANTHERPOINT_LPC_MIN 0x1e40
#define PCI_DEVICE_ID_INTEL_PANTHERPOINT_LPC_MAX 0x1e5f
#define PCI_DEVICE_ID_INTEL_DH89XXCC_LPC_MIN 0x2310
diff --git a/arch/arm/mach-s5p6442/include/mach/dma.h b/include/linux/power/isp1704_charger.h
index 81209eb1409b..68096a6aa2d7 100644
--- a/arch/arm/mach-s5p6442/include/mach/dma.h
+++ b/include/linux/power/isp1704_charger.h
@@ -1,6 +1,7 @@
/*
- * Copyright (C) 2010 Samsung Electronics Co. Ltd.
- * Jaswinder Singh <jassi.brar@samsung.com>
+ * ISP1704 USB Charger Detection driver
+ *
+ * Copyright (C) 2011 Nokia Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -14,13 +15,15 @@
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#ifndef __MACH_DMA_H
-#define __MACH_DMA_H
-/* This platform uses the common S3C DMA API driver for PL330 */
-#include <plat/s3c-dma-pl330.h>
+#ifndef __ISP1704_CHARGER_H
+#define __ISP1704_CHARGER_H
+
+struct isp1704_charger_data {
+ void (*set_power)(bool on);
+};
-#endif /* __MACH_DMA_H */
+#endif
diff --git a/include/linux/power/max8903_charger.h b/include/linux/power/max8903_charger.h
new file mode 100644
index 000000000000..24f51db8a83f
--- /dev/null
+++ b/include/linux/power/max8903_charger.h
@@ -0,0 +1,57 @@
+/*
+ * max8903_charger.h - Maxim 8903 USB/Adapter Charger Driver
+ *
+ * Copyright (C) 2011 Samsung Electronics
+ * MyungJoo Ham <myungjoo.ham@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#ifndef __MAX8903_CHARGER_H__
+#define __MAX8903_CHARGER_H__
+
+struct max8903_pdata {
+ /*
+ * GPIOs
+ * cen, chg, flt, and usus are optional.
+ * dok, dcm, and uok are not optional depending on the status of
+ * dc_valid and usb_valid.
+ */
+ int cen; /* Charger Enable input */
+ int dok; /* DC(Adapter) Power OK output */
+ int uok; /* USB Power OK output */
+ int chg; /* Charger status output */
+ int flt; /* Fault output */
+ int dcm; /* Current-Limit Mode input (1: DC, 2: USB) */
+ int usus; /* USB Suspend Input (1: suspended) */
+
+ /*
+ * DC(Adapter/TA) is wired
+ * When dc_valid is true,
+ * dok and dcm should be valid.
+ *
+ * At least one of dc_valid or usb_valid should be true.
+ */
+ bool dc_valid;
+ /*
+ * USB is wired
+ * When usb_valid is true,
+ * uok should be valid.
+ */
+ bool usb_valid;
+};
+
+#endif /* __MAX8903_CHARGER_H__ */
diff --git a/include/linux/ratelimit.h b/include/linux/ratelimit.h
index 03ff67b0cdf5..2f007157fab9 100644
--- a/include/linux/ratelimit.h
+++ b/include/linux/ratelimit.h
@@ -41,4 +41,44 @@ extern struct ratelimit_state printk_ratelimit_state;
extern int ___ratelimit(struct ratelimit_state *rs, const char *func);
#define __ratelimit(state) ___ratelimit(state, __func__)
+#ifdef CONFIG_PRINTK
+
+#define WARN_ON_RATELIMIT(condition, state) \
+ WARN_ON((condition) && __ratelimit(state))
+
+#define __WARN_RATELIMIT(condition, state, format...) \
+({ \
+ int rtn = 0; \
+ if (unlikely(__ratelimit(state))) \
+ rtn = WARN(condition, format); \
+ rtn; \
+})
+
+#define WARN_RATELIMIT(condition, format...) \
+({ \
+ static DEFINE_RATELIMIT_STATE(_rs, \
+ DEFAULT_RATELIMIT_INTERVAL, \
+ DEFAULT_RATELIMIT_BURST); \
+ __WARN_RATELIMIT(condition, &_rs, format); \
+})
+
+#else
+
+#define WARN_ON_RATELIMIT(condition, state) \
+ WARN_ON(condition)
+
+#define __WARN_RATELIMIT(condition, state, format...) \
+({ \
+ int rtn = WARN(condition, format); \
+ rtn; \
+})
+
+#define WARN_RATELIMIT(condition, format...) \
+({ \
+ int rtn = WARN(condition, format); \
+ rtn; \
+})
+
+#endif
+
#endif /* _LINUX_RATELIMIT_H */
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h
index c4c4fc45f856..ce3127a75c88 100644
--- a/include/linux/regulator/machine.h
+++ b/include/linux/regulator/machine.h
@@ -68,6 +68,8 @@ struct regulator_state {
*
* @min_uV: Smallest voltage consumers may set.
* @max_uV: Largest voltage consumers may set.
+ * @uV_offset: Offset applied to voltages from consumer to compensate for
+ * voltage drops.
*
* @min_uA: Smallest consumers consumers may set.
* @max_uA: Largest current consumers may set.
@@ -99,6 +101,8 @@ struct regulation_constraints {
int min_uV;
int max_uV;
+ int uV_offset;
+
/* current output range (inclusive) - for current control */
int min_uA;
int max_uA;
@@ -160,8 +164,6 @@ struct regulator_consumer_supply {
* @supply_regulator: Parent regulator. Specified using the regulator name
* as it appears in the name field in sysfs, which can
* be explicitly set using the constraints field 'name'.
- * @supply_regulator_dev: Parent regulator (if any) - DEPRECATED in favour
- * of supply_regulator.
*
* @constraints: Constraints. These must be specified for the regulator to
* be usable.
@@ -173,7 +175,6 @@ struct regulator_consumer_supply {
*/
struct regulator_init_data {
const char *supply_regulator; /* or NULL for system supply */
- struct device *supply_regulator_dev; /* or NULL for system supply */
struct regulation_constraints constraints;
diff --git a/include/linux/sched.h b/include/linux/sched.h
index dc8871295a5a..bcddd0138105 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1546,7 +1546,7 @@ struct task_struct {
#ifdef CONFIG_TRACING
/* state flags for use by tracers */
unsigned long trace;
- /* bitmask of trace recursion */
+ /* bitmask and counter of trace recursion */
unsigned long trace_recursion;
#endif /* CONFIG_TRACING */
#ifdef CONFIG_CGROUP_MEM_RES_CTLR /* memcg uses this to do batch job */
@@ -1841,9 +1841,16 @@ static inline void rcu_copy_process(struct task_struct *p)
#endif
#ifdef CONFIG_SMP
+extern void do_set_cpus_allowed(struct task_struct *p,
+ const struct cpumask *new_mask);
+
extern int set_cpus_allowed_ptr(struct task_struct *p,
const struct cpumask *new_mask);
#else
+static inline void do_set_cpus_allowed(struct task_struct *p,
+ const struct cpumask *new_mask)
+{
+}
static inline int set_cpus_allowed_ptr(struct task_struct *p,
const struct cpumask *new_mask)
{
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index d512d98dfb7d..5ca0951e1855 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -93,8 +93,8 @@ static inline unsigned long __copy_from_user_nocache(void *to,
* Safely read from address @src to the buffer at @dst. If a kernel fault
* happens, handle that and return -EFAULT.
*/
-extern long probe_kernel_read(void *dst, void *src, size_t size);
-extern long __probe_kernel_read(void *dst, void *src, size_t size);
+extern long probe_kernel_read(void *dst, const void *src, size_t size);
+extern long __probe_kernel_read(void *dst, const void *src, size_t size);
/*
* probe_kernel_write(): safely attempt to write to a location
@@ -105,7 +105,7 @@ extern long __probe_kernel_read(void *dst, void *src, size_t size);
* Safely write to address @dst from the buffer at @src. If a kernel fault
* happens, handle that and return -EFAULT.
*/
-extern long notrace probe_kernel_write(void *dst, void *src, size_t size);
-extern long notrace __probe_kernel_write(void *dst, void *src, size_t size);
+extern long notrace probe_kernel_write(void *dst, const void *src, size_t size);
+extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size);
#endif /* __LINUX_UACCESS_H__ */
diff --git a/include/media/m5mols.h b/include/media/m5mols.h
new file mode 100644
index 000000000000..2d7e7ca2313d
--- /dev/null
+++ b/include/media/m5mols.h
@@ -0,0 +1,35 @@
+/*
+ * Driver header for M-5MOLS 8M Pixel camera sensor with ISP
+ *
+ * Copyright (C) 2011 Samsung Electronics Co., Ltd.
+ * Author: HeungJun Kim, riverful.kim@samsung.com
+ *
+ * Copyright (C) 2009 Samsung Electronics Co., Ltd.
+ * Author: Dongsoo Nathaniel Kim, dongsoo45.kim@samsung.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef MEDIA_M5MOLS_H
+#define MEDIA_M5MOLS_H
+
+/**
+ * struct m5mols_platform_data - platform data for M-5MOLS driver
+ * @irq: GPIO getting the irq pin of M-5MOLS
+ * @gpio_reset: GPIO driving the reset pin of M-5MOLS
+ * @reset_polarity: active state for gpio_rst pin, 0 or 1
+ * @set_power: an additional callback to the board setup code
+ * to be called after enabling and before disabling
+ * the sensor's supply regulators
+ */
+struct m5mols_platform_data {
+ int irq;
+ int gpio_reset;
+ u8 reset_polarity;
+ int (*set_power)(struct device *dev, int on);
+};
+
+#endif /* MEDIA_M5MOLS_H */
diff --git a/include/media/videobuf-dvb.h b/include/media/videobuf-dvb.h
index 07cf4b9d0a65..bf365721d6b0 100644
--- a/include/media/videobuf-dvb.h
+++ b/include/media/videobuf-dvb.h
@@ -4,6 +4,9 @@
#include <dvb_net.h>
#include <dvb_frontend.h>
+#ifndef _VIDEOBUF_DVB_H_
+#define _VIDEOBUF_DVB_H_
+
struct videobuf_dvb {
/* filling that the job of the driver */
char *name;
@@ -54,6 +57,7 @@ void videobuf_dvb_dealloc_frontends(struct videobuf_dvb_frontends *f);
struct videobuf_dvb_frontend * videobuf_dvb_get_frontend(struct videobuf_dvb_frontends *f, int id);
int videobuf_dvb_find_frontend(struct videobuf_dvb_frontends *f, struct dvb_frontend *p);
+#endif /* _VIDEOBUF_DVB_H_ */
/*
* Local variables:
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index 4fff432aeade..481f856c650f 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -797,7 +797,8 @@ struct netns_ipvs {
struct list_head rs_table[IP_VS_RTAB_SIZE];
/* ip_vs_app */
struct list_head app_list;
-
+ /* ip_vs_ftp */
+ struct ip_vs_app *ftp_app;
/* ip_vs_proto */
#define IP_VS_PROTO_TAB_SIZE 32 /* must be power of 2 */
struct ip_vs_proto_data *proto_data_table[IP_VS_PROTO_TAB_SIZE];
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index dcc8f5749d3f..2bf9ed9ef26b 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -7,6 +7,7 @@
#include <asm/atomic.h>
#include <linux/workqueue.h>
#include <linux/list.h>
+#include <linux/sysctl.h>
#include <net/netns/core.h>
#include <net/netns/mib.h>
diff --git a/include/net/net_ratelimit.h b/include/net/net_ratelimit.h
new file mode 100644
index 000000000000..7727b4247daf
--- /dev/null
+++ b/include/net/net_ratelimit.h
@@ -0,0 +1,8 @@
+#ifndef _LINUX_NET_RATELIMIT_H
+#define _LINUX_NET_RATELIMIT_H
+
+#include <linux/ratelimit.h>
+
+extern struct ratelimit_state net_ratelimit_state;
+
+#endif /* _LINUX_NET_RATELIMIT_H */
diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
index 8f6bb9c7f3eb..ee866060f8a4 100644
--- a/include/scsi/libsas.h
+++ b/include/scsi/libsas.h
@@ -604,6 +604,7 @@ struct sas_domain_function_template {
int (*lldd_clear_aca)(struct domain_device *, u8 *lun);
int (*lldd_clear_task_set)(struct domain_device *, u8 *lun);
int (*lldd_I_T_nexus_reset)(struct domain_device *);
+ int (*lldd_ata_soft_reset)(struct domain_device *);
int (*lldd_lu_reset)(struct domain_device *, u8 *lun);
int (*lldd_query_task)(struct sas_task *);
diff --git a/include/scsi/scsi_tcq.h b/include/scsi/scsi_tcq.h
index d6e7994aa634..81dd12edc38c 100644
--- a/include/scsi/scsi_tcq.h
+++ b/include/scsi/scsi_tcq.h
@@ -9,6 +9,7 @@
#define MSG_SIMPLE_TAG 0x20
#define MSG_HEAD_TAG 0x21
#define MSG_ORDERED_TAG 0x22
+#define MSG_ACA_TAG 0x24 /* unsupported */
#define SCSI_NO_TAG (-1) /* identify no tag in use */
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 1d3b5b2f0dbc..561ac99def5a 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -98,6 +98,7 @@ enum transport_state_table {
TRANSPORT_REMOVE = 14,
TRANSPORT_FREE = 15,
TRANSPORT_NEW_CMD_MAP = 16,
+ TRANSPORT_FREE_CMD_INTR = 17,
};
/* Used for struct se_cmd->se_cmd_flags */
diff --git a/include/target/target_core_fabric_ops.h b/include/target/target_core_fabric_ops.h
index dc78f77f9450..747e1404dca0 100644
--- a/include/target/target_core_fabric_ops.h
+++ b/include/target/target_core_fabric_ops.h
@@ -77,7 +77,6 @@ struct target_core_fabric_ops {
u16 (*set_fabric_sense_len)(struct se_cmd *, u32);
u16 (*get_fabric_sense_len)(void);
int (*is_state_remove)(struct se_cmd *);
- u64 (*pack_lun)(unsigned int);
/*
* fabric module calls for target_core_fabric_configfs.c
*/
diff --git a/include/target/target_core_transport.h b/include/target/target_core_transport.h
index 59aa464f6ee2..24a1c6cb83c3 100644
--- a/include/target/target_core_transport.h
+++ b/include/target/target_core_transport.h
@@ -172,6 +172,7 @@ extern int transport_generic_handle_cdb_map(struct se_cmd *);
extern int transport_generic_handle_data(struct se_cmd *);
extern void transport_new_cmd_failure(struct se_cmd *);
extern int transport_generic_handle_tmr(struct se_cmd *);
+extern void transport_generic_free_cmd_intr(struct se_cmd *);
extern void __transport_stop_task_timer(struct se_task *, unsigned long *);
extern unsigned char transport_asciihex_to_binaryhex(unsigned char val[2]);
extern int transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *, u32,
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index f445cff66ab7..4114129f0794 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -28,7 +28,7 @@ struct extent_buffer;
{ BTRFS_SHARED_DATA_REF_KEY, "SHARED_DATA_REF" })
#define __show_root_type(obj) \
- __print_symbolic(obj, \
+ __print_symbolic_u64(obj, \
{ BTRFS_ROOT_TREE_OBJECTID, "ROOT_TREE" }, \
{ BTRFS_EXTENT_TREE_OBJECTID, "EXTENT_TREE" }, \
{ BTRFS_CHUNK_TREE_OBJECTID, "CHUNK_TREE" }, \
@@ -125,7 +125,7 @@ DEFINE_EVENT(btrfs__inode, btrfs_inode_evict,
);
#define __show_map_type(type) \
- __print_symbolic(type, \
+ __print_symbolic_u64(type, \
{ EXTENT_MAP_LAST_BYTE, "LAST_BYTE" }, \
{ EXTENT_MAP_HOLE, "HOLE" }, \
{ EXTENT_MAP_INLINE, "INLINE" }, \
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index 3e68366d485a..533c49f48047 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -205,6 +205,19 @@
ftrace_print_symbols_seq(p, value, symbols); \
})
+#undef __print_symbolic_u64
+#if BITS_PER_LONG == 32
+#define __print_symbolic_u64(value, symbol_array...) \
+ ({ \
+ static const struct trace_print_flags_u64 symbols[] = \
+ { symbol_array, { -1, NULL } }; \
+ ftrace_print_symbols_seq_u64(p, value, symbols); \
+ })
+#else
+#define __print_symbolic_u64(value, symbol_array...) \
+ __print_symbolic(value, symbol_array)
+#endif
+
#undef __print_hex
#define __print_hex(buf, buf_len) ftrace_print_hex_seq(p, buf, buf_len)
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 1ceeb049c827..9c9b7545c810 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -2190,7 +2190,7 @@ int cpuset_cpus_allowed_fallback(struct task_struct *tsk)
rcu_read_lock();
cs = task_cs(tsk);
if (cs)
- cpumask_copy(&tsk->cpus_allowed, cs->cpus_allowed);
+ do_set_cpus_allowed(tsk, cs->cpus_allowed);
rcu_read_unlock();
/*
@@ -2217,7 +2217,7 @@ int cpuset_cpus_allowed_fallback(struct task_struct *tsk)
* Like above we can temporary set any mask and rely on
* set_cpus_allowed_ptr() as synchronization point.
*/
- cpumask_copy(&tsk->cpus_allowed, cpu_possible_mask);
+ do_set_cpus_allowed(tsk, cpu_possible_mask);
cpu = cpumask_any(cpu_active_mask);
}
diff --git a/kernel/cred.c b/kernel/cred.c
index e12c8af793f8..174fa84eca30 100644
--- a/kernel/cred.c
+++ b/kernel/cred.c
@@ -1,4 +1,4 @@
-/* Task credentials management - see Documentation/credentials.txt
+/* Task credentials management - see Documentation/security/credentials.txt
*
* Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
diff --git a/kernel/events/core.c b/kernel/events/core.c
index c09767f7db3e..d863b3c057bb 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -5028,6 +5028,14 @@ static int __perf_event_overflow(struct perf_event *event, int nmi,
else
perf_event_output(event, nmi, data, regs);
+ if (event->fasync && event->pending_kill) {
+ if (nmi) {
+ event->pending_wakeup = 1;
+ irq_work_queue(&event->pending);
+ } else
+ perf_event_wakeup(event);
+ }
+
return ret;
}
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index 74d1c099fbd1..fa27e750dbc0 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -105,9 +105,12 @@ static int __jump_label_text_reserved(struct jump_entry *iter_start,
}
static void __jump_label_update(struct jump_label_key *key,
- struct jump_entry *entry, int enable)
+ struct jump_entry *entry,
+ struct jump_entry *stop, int enable)
{
- for (; entry->key == (jump_label_t)(unsigned long)key; entry++) {
+ for (; (entry < stop) &&
+ (entry->key == (jump_label_t)(unsigned long)key);
+ entry++) {
/*
* entry->code set to 0 invalidates module init text sections
* kernel_text_address() verifies we are not in core kernel
@@ -181,7 +184,11 @@ static void __jump_label_mod_update(struct jump_label_key *key, int enable)
struct jump_label_mod *mod = key->next;
while (mod) {
- __jump_label_update(key, mod->entries, enable);
+ struct module *m = mod->mod;
+
+ __jump_label_update(key, mod->entries,
+ m->jump_entries + m->num_jump_entries,
+ enable);
mod = mod->next;
}
}
@@ -245,7 +252,8 @@ static int jump_label_add_module(struct module *mod)
key->next = jlm;
if (jump_label_enabled(key))
- __jump_label_update(key, iter, JUMP_LABEL_ENABLE);
+ __jump_label_update(key, iter, iter_stop,
+ JUMP_LABEL_ENABLE);
}
return 0;
@@ -371,7 +379,7 @@ static void jump_label_update(struct jump_label_key *key, int enable)
/* if there are no users, entry can be NULL */
if (entry)
- __jump_label_update(key, entry, enable);
+ __jump_label_update(key, entry, __stop___jump_table, enable);
#ifdef CONFIG_MODULES
__jump_label_mod_update(key, enable);
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 3b34d2732bce..4ba7cccb4994 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -202,8 +202,8 @@ void kthread_bind(struct task_struct *p, unsigned int cpu)
return;
}
- p->cpus_allowed = cpumask_of_cpu(cpu);
- p->rt.nr_cpus_allowed = 1;
+ /* It's safe because the task is inactive. */
+ do_set_cpus_allowed(p, cpumask_of(cpu));
p->flags |= PF_THREAD_BOUND;
}
EXPORT_SYMBOL(kthread_bind);
diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c
index beb184689af9..fd8d1e035df9 100644
--- a/kernel/pm_qos_params.c
+++ b/kernel/pm_qos_params.c
@@ -40,6 +40,7 @@
#include <linux/string.h>
#include <linux/platform_device.h>
#include <linux/init.h>
+#include <linux/kernel.h>
#include <linux/uaccess.h>
@@ -404,24 +405,36 @@ static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf,
size_t count, loff_t *f_pos)
{
s32 value;
- int x;
- char ascii_value[11];
struct pm_qos_request_list *pm_qos_req;
if (count == sizeof(s32)) {
if (copy_from_user(&value, buf, sizeof(s32)))
return -EFAULT;
- } else if (count == 11) { /* len('0x12345678/0') */
- if (copy_from_user(ascii_value, buf, 11))
+ } else if (count <= 11) { /* ASCII perhaps? */
+ char ascii_value[11];
+ unsigned long int ulval;
+ int ret;
+
+ if (copy_from_user(ascii_value, buf, count))
return -EFAULT;
- if (strlen(ascii_value) != 10)
- return -EINVAL;
- x = sscanf(ascii_value, "%x", &value);
- if (x != 1)
+
+ if (count > 10) {
+ if (ascii_value[10] == '\n')
+ ascii_value[10] = '\0';
+ else
+ return -EINVAL;
+ } else {
+ ascii_value[count] = '\0';
+ }
+ ret = strict_strtoul(ascii_value, 16, &ulval);
+ if (ret) {
+ pr_debug("%s, 0x%lx, 0x%x\n", ascii_value, ulval, ret);
return -EINVAL;
- pr_debug("%s, %d, 0x%x\n", ascii_value, x, value);
- } else
+ }
+ value = (s32)lower_32_bits(ulval);
+ } else {
return -EINVAL;
+ }
pm_qos_req = filp->private_data;
pm_qos_update_request(pm_qos_req, value);
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index f9bec56d8825..8f7b1db1ece1 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -25,7 +25,6 @@
#include <linux/gfp.h>
#include <linux/syscore_ops.h>
#include <scsi/scsi_scan.h>
-#include <asm/suspend.h>
#include "power.h"
@@ -55,10 +54,9 @@ static int hibernation_mode = HIBERNATION_SHUTDOWN;
static const struct platform_hibernation_ops *hibernation_ops;
/**
- * hibernation_set_ops - set the global hibernate operations
- * @ops: the hibernation operations to use in subsequent hibernation transitions
+ * hibernation_set_ops - Set the global hibernate operations.
+ * @ops: Hibernation operations to use in subsequent hibernation transitions.
*/
-
void hibernation_set_ops(const struct platform_hibernation_ops *ops)
{
if (ops && !(ops->begin && ops->end && ops->pre_snapshot
@@ -115,10 +113,9 @@ static int hibernation_test(int level) { return 0; }
#endif /* !CONFIG_PM_DEBUG */
/**
- * platform_begin - tell the platform driver that we're starting
- * hibernation
+ * platform_begin - Call platform to start hibernation.
+ * @platform_mode: Whether or not to use the platform driver.
*/
-
static int platform_begin(int platform_mode)
{
return (platform_mode && hibernation_ops) ?
@@ -126,10 +123,9 @@ static int platform_begin(int platform_mode)
}
/**
- * platform_end - tell the platform driver that we've entered the
- * working state
+ * platform_end - Call platform to finish transition to the working state.
+ * @platform_mode: Whether or not to use the platform driver.
*/
-
static void platform_end(int platform_mode)
{
if (platform_mode && hibernation_ops)
@@ -137,8 +133,11 @@ static void platform_end(int platform_mode)
}
/**
- * platform_pre_snapshot - prepare the machine for hibernation using the
- * platform driver if so configured and return an error code if it fails
+ * platform_pre_snapshot - Call platform to prepare the machine for hibernation.
+ * @platform_mode: Whether or not to use the platform driver.
+ *
+ * Use the platform driver to prepare the system for creating a hibernate image,
+ * if so configured, and return an error code if that fails.
*/
static int platform_pre_snapshot(int platform_mode)
@@ -148,10 +147,14 @@ static int platform_pre_snapshot(int platform_mode)
}
/**
- * platform_leave - prepare the machine for switching to the normal mode
- * of operation using the platform driver (called with interrupts disabled)
+ * platform_leave - Call platform to prepare a transition to the working state.
+ * @platform_mode: Whether or not to use the platform driver.
+ *
+ * Use the platform driver prepare to prepare the machine for switching to the
+ * normal mode of operation.
+ *
+ * This routine is called on one CPU with interrupts disabled.
*/
-
static void platform_leave(int platform_mode)
{
if (platform_mode && hibernation_ops)
@@ -159,10 +162,14 @@ static void platform_leave(int platform_mode)
}
/**
- * platform_finish - switch the machine to the normal mode of operation
- * using the platform driver (must be called after platform_prepare())
+ * platform_finish - Call platform to switch the system to the working state.
+ * @platform_mode: Whether or not to use the platform driver.
+ *
+ * Use the platform driver to switch the machine to the normal mode of
+ * operation.
+ *
+ * This routine must be called after platform_prepare().
*/
-
static void platform_finish(int platform_mode)
{
if (platform_mode && hibernation_ops)
@@ -170,11 +177,15 @@ static void platform_finish(int platform_mode)
}
/**
- * platform_pre_restore - prepare the platform for the restoration from a
- * hibernation image. If the restore fails after this function has been
- * called, platform_restore_cleanup() must be called.
+ * platform_pre_restore - Prepare for hibernate image restoration.
+ * @platform_mode: Whether or not to use the platform driver.
+ *
+ * Use the platform driver to prepare the system for resume from a hibernation
+ * image.
+ *
+ * If the restore fails after this function has been called,
+ * platform_restore_cleanup() must be called.
*/
-
static int platform_pre_restore(int platform_mode)
{
return (platform_mode && hibernation_ops) ?
@@ -182,12 +193,16 @@ static int platform_pre_restore(int platform_mode)
}
/**
- * platform_restore_cleanup - switch the platform to the normal mode of
- * operation after a failing restore. If platform_pre_restore() has been
- * called before the failing restore, this function must be called too,
- * regardless of the result of platform_pre_restore().
+ * platform_restore_cleanup - Switch to the working state after failing restore.
+ * @platform_mode: Whether or not to use the platform driver.
+ *
+ * Use the platform driver to switch the system to the normal mode of operation
+ * after a failing restore.
+ *
+ * If platform_pre_restore() has been called before the failing restore, this
+ * function must be called too, regardless of the result of
+ * platform_pre_restore().
*/
-
static void platform_restore_cleanup(int platform_mode)
{
if (platform_mode && hibernation_ops)
@@ -195,10 +210,9 @@ static void platform_restore_cleanup(int platform_mode)
}
/**
- * platform_recover - recover the platform from a failure to suspend
- * devices.
+ * platform_recover - Recover from a failure to suspend devices.
+ * @platform_mode: Whether or not to use the platform driver.
*/
-
static void platform_recover(int platform_mode)
{
if (platform_mode && hibernation_ops && hibernation_ops->recover)
@@ -206,13 +220,12 @@ static void platform_recover(int platform_mode)
}
/**
- * swsusp_show_speed - print the time elapsed between two events.
- * @start: Starting event.
- * @stop: Final event.
- * @nr_pages - number of pages processed between @start and @stop
- * @msg - introductory message to print
+ * swsusp_show_speed - Print time elapsed between two events during hibernation.
+ * @start: Starting event.
+ * @stop: Final event.
+ * @nr_pages: Number of memory pages processed between @start and @stop.
+ * @msg: Additional diagnostic message to print.
*/
-
void swsusp_show_speed(struct timeval *start, struct timeval *stop,
unsigned nr_pages, char *msg)
{
@@ -235,25 +248,18 @@ void swsusp_show_speed(struct timeval *start, struct timeval *stop,
}
/**
- * create_image - freeze devices that need to be frozen with interrupts
- * off, create the hibernation image and thaw those devices. Control
- * reappears in this routine after a restore.
+ * create_image - Create a hibernation image.
+ * @platform_mode: Whether or not to use the platform driver.
+ *
+ * Execute device drivers' .freeze_noirq() callbacks, create a hibernation image
+ * and execute the drivers' .thaw_noirq() callbacks.
+ *
+ * Control reappears in this routine after the subsequent restore.
*/
-
static int create_image(int platform_mode)
{
int error;
- error = arch_prepare_suspend();
- if (error)
- return error;
-
- /* At this point, dpm_suspend_start() has been called, but *not*
- * dpm_suspend_noirq(). We *must* call dpm_suspend_noirq() now.
- * Otherwise, drivers for some devices (e.g. interrupt controllers)
- * become desynchronized with the actual state of the hardware
- * at resume time, and evil weirdness ensues.
- */
error = dpm_suspend_noirq(PMSG_FREEZE);
if (error) {
printk(KERN_ERR "PM: Some devices failed to power down, "
@@ -297,9 +303,6 @@ static int create_image(int platform_mode)
Power_up:
syscore_resume();
- /* NOTE: dpm_resume_noirq() is just a resume() for devices
- * that suspended with irqs off ... no overall powerup.
- */
Enable_irqs:
local_irq_enable();
@@ -317,14 +320,11 @@ static int create_image(int platform_mode)
}
/**
- * hibernation_snapshot - quiesce devices and create the hibernation
- * snapshot image.
- * @platform_mode - if set, use the platform driver, if available, to
- * prepare the platform firmware for the power transition.
+ * hibernation_snapshot - Quiesce devices and create a hibernation image.
+ * @platform_mode: If set, use platform driver to prepare for the transition.
*
- * Must be called with pm_mutex held
+ * This routine must be called with pm_mutex held.
*/
-
int hibernation_snapshot(int platform_mode)
{
pm_message_t msg = PMSG_RECOVER;
@@ -384,13 +384,14 @@ int hibernation_snapshot(int platform_mode)
}
/**
- * resume_target_kernel - prepare devices that need to be suspended with
- * interrupts off, restore the contents of highmem that have not been
- * restored yet from the image and run the low level code that will restore
- * the remaining contents of memory and switch to the just restored target
- * kernel.
+ * resume_target_kernel - Restore system state from a hibernation image.
+ * @platform_mode: Whether or not to use the platform driver.
+ *
+ * Execute device drivers' .freeze_noirq() callbacks, restore the contents of
+ * highmem that have not been restored yet from the image and run the low-level
+ * code that will restore the remaining contents of memory and switch to the
+ * just restored target kernel.
*/
-
static int resume_target_kernel(bool platform_mode)
{
int error;
@@ -416,24 +417,26 @@ static int resume_target_kernel(bool platform_mode)
if (error)
goto Enable_irqs;
- /* We'll ignore saved state, but this gets preempt count (etc) right */
save_processor_state();
error = restore_highmem();
if (!error) {
error = swsusp_arch_resume();
/*
* The code below is only ever reached in case of a failure.
- * Otherwise execution continues at place where
- * swsusp_arch_suspend() was called
+ * Otherwise, execution continues at the place where
+ * swsusp_arch_suspend() was called.
*/
BUG_ON(!error);
- /* This call to restore_highmem() undos the previous one */
+ /*
+ * This call to restore_highmem() reverts the changes made by
+ * the previous one.
+ */
restore_highmem();
}
/*
* The only reason why swsusp_arch_resume() can fail is memory being
* very tight, so we have to free it as soon as we can to avoid
- * subsequent failures
+ * subsequent failures.
*/
swsusp_free();
restore_processor_state();
@@ -456,14 +459,12 @@ static int resume_target_kernel(bool platform_mode)
}
/**
- * hibernation_restore - quiesce devices and restore the hibernation
- * snapshot image. If successful, control returns in hibernation_snaphot()
- * @platform_mode - if set, use the platform driver, if available, to
- * prepare the platform firmware for the transition.
+ * hibernation_restore - Quiesce devices and restore from a hibernation image.
+ * @platform_mode: If set, use platform driver to prepare for the transition.
*
- * Must be called with pm_mutex held
+ * This routine must be called with pm_mutex held. If it is successful, control
+ * reappears in the restored target kernel in hibernation_snaphot().
*/
-
int hibernation_restore(int platform_mode)
{
int error;
@@ -483,10 +484,8 @@ int hibernation_restore(int platform_mode)
}
/**
- * hibernation_platform_enter - enter the hibernation state using the
- * platform driver (if available)
+ * hibernation_platform_enter - Power off the system using the platform driver.
*/
-
int hibernation_platform_enter(void)
{
int error;
@@ -557,12 +556,12 @@ int hibernation_platform_enter(void)
}
/**
- * power_down - Shut the machine down for hibernation.
+ * power_down - Shut the machine down for hibernation.
*
- * Use the platform driver, if configured so; otherwise try
- * to power off or reboot.
+ * Use the platform driver, if configured, to put the system into the sleep
+ * state corresponding to hibernation, or try to power it off or reboot,
+ * depending on the value of hibernation_mode.
*/
-
static void power_down(void)
{
switch (hibernation_mode) {
@@ -599,9 +598,8 @@ static int prepare_processes(void)
}
/**
- * hibernate - The granpappy of the built-in hibernation management
+ * hibernate - Carry out system hibernation, including saving the image.
*/
-
int hibernate(void)
{
int error;
@@ -679,17 +677,20 @@ int hibernate(void)
/**
- * software_resume - Resume from a saved image.
+ * software_resume - Resume from a saved hibernation image.
*
- * Called as a late_initcall (so all devices are discovered and
- * initialized), we call swsusp to see if we have a saved image or not.
- * If so, we quiesce devices, the restore the saved image. We will
- * return above (in hibernate() ) if everything goes well.
- * Otherwise, we fail gracefully and return to the normally
- * scheduled program.
+ * This routine is called as a late initcall, when all devices have been
+ * discovered and initialized already.
*
+ * The image reading code is called to see if there is a hibernation image
+ * available for reading. If that is the case, devices are quiesced and the
+ * contents of memory is restored from the saved image.
+ *
+ * If this is successful, control reappears in the restored target kernel in
+ * hibernation_snaphot() which returns to hibernate(). Otherwise, the routine
+ * attempts to recover gracefully and make the kernel return to the normal mode
+ * of operation.
*/
-
static int software_resume(void)
{
int error;
@@ -819,21 +820,17 @@ static const char * const hibernation_modes[] = {
[HIBERNATION_TESTPROC] = "testproc",
};
-/**
- * disk - Control hibernation mode
- *
- * Suspend-to-disk can be handled in several ways. We have a few options
- * for putting the system to sleep - using the platform driver (e.g. ACPI
- * or other hibernation_ops), powering off the system or rebooting the
- * system (for testing) as well as the two test modes.
+/*
+ * /sys/power/disk - Control hibernation mode.
*
- * The system can support 'platform', and that is known a priori (and
- * encoded by the presence of hibernation_ops). However, the user may
- * choose 'shutdown' or 'reboot' as alternatives, as well as one fo the
- * test modes, 'test' or 'testproc'.
+ * Hibernation can be handled in several ways. There are a few different ways
+ * to put the system into the sleep state: using the platform driver (e.g. ACPI
+ * or other hibernation_ops), powering it off or rebooting it (for testing
+ * mostly), or using one of the two available test modes.
*
- * show() will display what the mode is currently set to.
- * store() will accept one of
+ * The sysfs file /sys/power/disk provides an interface for selecting the
+ * hibernation mode to use. Reading from this file causes the available modes
+ * to be printed. There are 5 modes that can be supported:
*
* 'platform'
* 'shutdown'
@@ -841,8 +838,14 @@ static const char * const hibernation_modes[] = {
* 'test'
* 'testproc'
*
- * It will only change to 'platform' if the system
- * supports it (as determined by having hibernation_ops).
+ * If a platform hibernation driver is in use, 'platform' will be supported
+ * and will be used by default. Otherwise, 'shutdown' will be used by default.
+ * The selected option (i.e. the one corresponding to the current value of
+ * hibernation_mode) is enclosed by a square bracket.
+ *
+ * To select a given hibernation mode it is necessary to write the mode's
+ * string representation (as returned by reading from /sys/power/disk) back
+ * into /sys/power/disk.
*/
static ssize_t disk_show(struct kobject *kobj, struct kobj_attribute *attr,
@@ -875,7 +878,6 @@ static ssize_t disk_show(struct kobject *kobj, struct kobj_attribute *attr,
return buf-start;
}
-
static ssize_t disk_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t n)
{
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index f07d2f03181a..77a7671dd147 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -36,7 +36,7 @@
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/nmi.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <linux/bitops.h>
#include <linux/module.h>
#include <linux/completion.h>
@@ -95,7 +95,6 @@ static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task);
DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
DEFINE_PER_CPU(int, rcu_cpu_kthread_cpu);
DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
-static DEFINE_PER_CPU(wait_queue_head_t, rcu_cpu_wq);
DEFINE_PER_CPU(char, rcu_cpu_has_work);
static char rcu_kthreads_spawnable;
@@ -163,7 +162,7 @@ EXPORT_SYMBOL_GPL(rcu_note_context_switch);
#ifdef CONFIG_NO_HZ
DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
.dynticks_nesting = 1,
- .dynticks = 1,
+ .dynticks = ATOMIC_INIT(1),
};
#endif /* #ifdef CONFIG_NO_HZ */
@@ -322,13 +321,25 @@ void rcu_enter_nohz(void)
unsigned long flags;
struct rcu_dynticks *rdtp;
- smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */
local_irq_save(flags);
rdtp = &__get_cpu_var(rcu_dynticks);
- rdtp->dynticks++;
- rdtp->dynticks_nesting--;
- WARN_ON_ONCE(rdtp->dynticks & 0x1);
+ if (--rdtp->dynticks_nesting) {
+ local_irq_restore(flags);
+ return;
+ }
+ /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
+ smp_mb__before_atomic_inc(); /* See above. */
+ atomic_inc(&rdtp->dynticks);
+ smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
+ WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
local_irq_restore(flags);
+
+ /* If the interrupt queued a callback, get out of dyntick mode. */
+ if (in_irq() &&
+ (__get_cpu_var(rcu_sched_data).nxtlist ||
+ __get_cpu_var(rcu_bh_data).nxtlist ||
+ rcu_preempt_needs_cpu(smp_processor_id())))
+ set_need_resched();
}
/*
@@ -344,11 +355,16 @@ void rcu_exit_nohz(void)
local_irq_save(flags);
rdtp = &__get_cpu_var(rcu_dynticks);
- rdtp->dynticks++;
- rdtp->dynticks_nesting++;
- WARN_ON_ONCE(!(rdtp->dynticks & 0x1));
+ if (rdtp->dynticks_nesting++) {
+ local_irq_restore(flags);
+ return;
+ }
+ smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
+ atomic_inc(&rdtp->dynticks);
+ /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
+ smp_mb__after_atomic_inc(); /* See above. */
+ WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
local_irq_restore(flags);
- smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
}
/**
@@ -362,11 +378,15 @@ void rcu_nmi_enter(void)
{
struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
- if (rdtp->dynticks & 0x1)
+ if (rdtp->dynticks_nmi_nesting == 0 &&
+ (atomic_read(&rdtp->dynticks) & 0x1))
return;
- rdtp->dynticks_nmi++;
- WARN_ON_ONCE(!(rdtp->dynticks_nmi & 0x1));
- smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
+ rdtp->dynticks_nmi_nesting++;
+ smp_mb__before_atomic_inc(); /* Force delay from prior write. */
+ atomic_inc(&rdtp->dynticks);
+ /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
+ smp_mb__after_atomic_inc(); /* See above. */
+ WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
}
/**
@@ -380,11 +400,14 @@ void rcu_nmi_exit(void)
{
struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
- if (rdtp->dynticks & 0x1)
+ if (rdtp->dynticks_nmi_nesting == 0 ||
+ --rdtp->dynticks_nmi_nesting != 0)
return;
- smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */
- rdtp->dynticks_nmi++;
- WARN_ON_ONCE(rdtp->dynticks_nmi & 0x1);
+ /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
+ smp_mb__before_atomic_inc(); /* See above. */
+ atomic_inc(&rdtp->dynticks);
+ smp_mb__after_atomic_inc(); /* Force delay to next write. */
+ WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
}
/**
@@ -395,13 +418,7 @@ void rcu_nmi_exit(void)
*/
void rcu_irq_enter(void)
{
- struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
-
- if (rdtp->dynticks_nesting++)
- return;
- rdtp->dynticks++;
- WARN_ON_ONCE(!(rdtp->dynticks & 0x1));
- smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
+ rcu_exit_nohz();
}
/**
@@ -413,18 +430,7 @@ void rcu_irq_enter(void)
*/
void rcu_irq_exit(void)
{
- struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
-
- if (--rdtp->dynticks_nesting)
- return;
- smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */
- rdtp->dynticks++;
- WARN_ON_ONCE(rdtp->dynticks & 0x1);
-
- /* If the interrupt queued a callback, get out of dyntick mode. */
- if (__this_cpu_read(rcu_sched_data.nxtlist) ||
- __this_cpu_read(rcu_bh_data.nxtlist))
- set_need_resched();
+ rcu_enter_nohz();
}
#ifdef CONFIG_SMP
@@ -436,19 +442,8 @@ void rcu_irq_exit(void)
*/
static int dyntick_save_progress_counter(struct rcu_data *rdp)
{
- int ret;
- int snap;
- int snap_nmi;
-
- snap = rdp->dynticks->dynticks;
- snap_nmi = rdp->dynticks->dynticks_nmi;
- smp_mb(); /* Order sampling of snap with end of grace period. */
- rdp->dynticks_snap = snap;
- rdp->dynticks_nmi_snap = snap_nmi;
- ret = ((snap & 0x1) == 0) && ((snap_nmi & 0x1) == 0);
- if (ret)
- rdp->dynticks_fqs++;
- return ret;
+ rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
+ return 0;
}
/*
@@ -459,16 +454,11 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp)
*/
static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
{
- long curr;
- long curr_nmi;
- long snap;
- long snap_nmi;
+ unsigned long curr;
+ unsigned long snap;
- curr = rdp->dynticks->dynticks;
- snap = rdp->dynticks_snap;
- curr_nmi = rdp->dynticks->dynticks_nmi;
- snap_nmi = rdp->dynticks_nmi_snap;
- smp_mb(); /* force ordering with cpu entering/leaving dynticks. */
+ curr = (unsigned long)atomic_add_return(0, &rdp->dynticks->dynticks);
+ snap = (unsigned long)rdp->dynticks_snap;
/*
* If the CPU passed through or entered a dynticks idle phase with
@@ -478,8 +468,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
* read-side critical section that started before the beginning
* of the current RCU grace period.
*/
- if ((curr != snap || (curr & 0x1) == 0) &&
- (curr_nmi != snap_nmi || (curr_nmi & 0x1) == 0)) {
+ if ((curr & 0x1) == 0 || ULONG_CMP_GE(curr, snap + 2)) {
rdp->dynticks_fqs++;
return 1;
}
@@ -908,6 +897,12 @@ static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
unsigned long gp_duration;
WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
+
+ /*
+ * Ensure that all grace-period and pre-grace-period activity
+ * is seen before the assignment to rsp->completed.
+ */
+ smp_mb(); /* See above block comment. */
gp_duration = jiffies - rsp->gp_start;
if (gp_duration > rsp->gp_max)
rsp->gp_max = gp_duration;
@@ -1455,25 +1450,11 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
*/
static void rcu_process_callbacks(void)
{
- /*
- * Memory references from any prior RCU read-side critical sections
- * executed by the interrupted code must be seen before any RCU
- * grace-period manipulations below.
- */
- smp_mb(); /* See above block comment. */
-
__rcu_process_callbacks(&rcu_sched_state,
&__get_cpu_var(rcu_sched_data));
__rcu_process_callbacks(&rcu_bh_state, &__get_cpu_var(rcu_bh_data));
rcu_preempt_process_callbacks();
- /*
- * Memory references from any later RCU read-side critical sections
- * executed by the interrupted code must be seen after any RCU
- * grace-period manipulations above.
- */
- smp_mb(); /* See above block comment. */
-
/* If we are last CPU on way to dyntick-idle mode, accelerate it. */
rcu_needs_cpu_flush();
}
@@ -1494,7 +1475,7 @@ static void invoke_rcu_cpu_kthread(void)
local_irq_restore(flags);
return;
}
- wake_up(&__get_cpu_var(rcu_cpu_wq));
+ wake_up_process(__this_cpu_read(rcu_cpu_kthread_task));
local_irq_restore(flags);
}
@@ -1544,13 +1525,10 @@ static void rcu_cpu_kthread_setrt(int cpu, int to_rt)
*/
static void rcu_cpu_kthread_timer(unsigned long arg)
{
- unsigned long flags;
struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, arg);
struct rcu_node *rnp = rdp->mynode;
- raw_spin_lock_irqsave(&rnp->lock, flags);
- rnp->wakemask |= rdp->grpmask;
- raw_spin_unlock_irqrestore(&rnp->lock, flags);
+ atomic_or(rdp->grpmask, &rnp->wakemask);
invoke_rcu_node_kthread(rnp);
}
@@ -1617,14 +1595,12 @@ static int rcu_cpu_kthread(void *arg)
unsigned long flags;
int spincnt = 0;
unsigned int *statusp = &per_cpu(rcu_cpu_kthread_status, cpu);
- wait_queue_head_t *wqp = &per_cpu(rcu_cpu_wq, cpu);
char work;
char *workp = &per_cpu(rcu_cpu_has_work, cpu);
for (;;) {
*statusp = RCU_KTHREAD_WAITING;
- wait_event_interruptible(*wqp,
- *workp != 0 || kthread_should_stop());
+ rcu_wait(*workp != 0 || kthread_should_stop());
local_bh_disable();
if (rcu_cpu_kthread_should_stop(cpu)) {
local_bh_enable();
@@ -1672,10 +1648,10 @@ static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu)
if (IS_ERR(t))
return PTR_ERR(t);
kthread_bind(t, cpu);
+ set_task_state(t, TASK_INTERRUPTIBLE);
per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task, cpu) != NULL);
per_cpu(rcu_cpu_kthread_task, cpu) = t;
- wake_up_process(t);
sp.sched_priority = RCU_KTHREAD_PRIO;
sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
return 0;
@@ -1698,11 +1674,10 @@ static int rcu_node_kthread(void *arg)
for (;;) {
rnp->node_kthread_status = RCU_KTHREAD_WAITING;
- wait_event_interruptible(rnp->node_wq, rnp->wakemask != 0);
+ rcu_wait(atomic_read(&rnp->wakemask) != 0);
rnp->node_kthread_status = RCU_KTHREAD_RUNNING;
raw_spin_lock_irqsave(&rnp->lock, flags);
- mask = rnp->wakemask;
- rnp->wakemask = 0;
+ mask = atomic_xchg(&rnp->wakemask, 0);
rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */
for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) {
if ((mask & 0x1) == 0)
@@ -1781,9 +1756,9 @@ static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp,
if (IS_ERR(t))
return PTR_ERR(t);
raw_spin_lock_irqsave(&rnp->lock, flags);
+ set_task_state(t, TASK_INTERRUPTIBLE);
rnp->node_kthread_task = t;
raw_spin_unlock_irqrestore(&rnp->lock, flags);
- wake_up_process(t);
sp.sched_priority = 99;
sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
}
@@ -1800,21 +1775,16 @@ static int __init rcu_spawn_kthreads(void)
rcu_kthreads_spawnable = 1;
for_each_possible_cpu(cpu) {
- init_waitqueue_head(&per_cpu(rcu_cpu_wq, cpu));
per_cpu(rcu_cpu_has_work, cpu) = 0;
if (cpu_online(cpu))
(void)rcu_spawn_one_cpu_kthread(cpu);
}
rnp = rcu_get_root(rcu_state);
- init_waitqueue_head(&rnp->node_wq);
- rcu_init_boost_waitqueue(rnp);
(void)rcu_spawn_one_node_kthread(rcu_state, rnp);
- if (NUM_RCU_NODES > 1)
- rcu_for_each_leaf_node(rcu_state, rnp) {
- init_waitqueue_head(&rnp->node_wq);
- rcu_init_boost_waitqueue(rnp);
+ if (NUM_RCU_NODES > 1) {
+ rcu_for_each_leaf_node(rcu_state, rnp)
(void)rcu_spawn_one_node_kthread(rcu_state, rnp);
- }
+ }
return 0;
}
early_initcall(rcu_spawn_kthreads);
diff --git a/kernel/rcutree.h b/kernel/rcutree.h
index 257664815d5d..7b9a08b4aaea 100644
--- a/kernel/rcutree.h
+++ b/kernel/rcutree.h
@@ -84,11 +84,9 @@
* Dynticks per-CPU state.
*/
struct rcu_dynticks {
- int dynticks_nesting; /* Track nesting level, sort of. */
- int dynticks; /* Even value for dynticks-idle, else odd. */
- int dynticks_nmi; /* Even value for either dynticks-idle or */
- /* not in nmi handler, else odd. So this */
- /* remains even for nmi from irq handler. */
+ int dynticks_nesting; /* Track irq/process nesting level. */
+ int dynticks_nmi_nesting; /* Track NMI nesting level. */
+ atomic_t dynticks; /* Even value for dynticks-idle, else odd. */
};
/* RCU's kthread states for tracing. */
@@ -121,7 +119,9 @@ struct rcu_node {
/* elements that need to drain to allow the */
/* current expedited grace period to */
/* complete (only for TREE_PREEMPT_RCU). */
- unsigned long wakemask; /* CPUs whose kthread needs to be awakened. */
+ atomic_t wakemask; /* CPUs whose kthread needs to be awakened. */
+ /* Since this has meaning only for leaf */
+ /* rcu_node structures, 32 bits suffices. */
unsigned long qsmaskinit;
/* Per-GP initial value for qsmask & expmask. */
unsigned long grpmask; /* Mask to apply to parent qsmask. */
@@ -159,9 +159,6 @@ struct rcu_node {
struct task_struct *boost_kthread_task;
/* kthread that takes care of priority */
/* boosting for this rcu_node structure. */
- wait_queue_head_t boost_wq;
- /* Wait queue on which to park the boost */
- /* kthread. */
unsigned int boost_kthread_status;
/* State of boost_kthread_task for tracing. */
unsigned long n_tasks_boosted;
@@ -188,9 +185,6 @@ struct rcu_node {
/* kthread that takes care of this rcu_node */
/* structure, for example, awakening the */
/* per-CPU kthreads as needed. */
- wait_queue_head_t node_wq;
- /* Wait queue on which to park the per-node */
- /* kthread. */
unsigned int node_kthread_status;
/* State of node_kthread_task for tracing. */
} ____cacheline_internodealigned_in_smp;
@@ -284,7 +278,6 @@ struct rcu_data {
/* 3) dynticks interface. */
struct rcu_dynticks *dynticks; /* Shared per-CPU dynticks state. */
int dynticks_snap; /* Per-GP tracking for dynticks. */
- int dynticks_nmi_snap; /* Per-GP tracking for dynticks_nmi. */
#endif /* #ifdef CONFIG_NO_HZ */
/* 4) reasons this CPU needed to be kicked by force_quiescent_state */
@@ -337,6 +330,16 @@ struct rcu_data {
/* scheduling clock irq */
/* before ratting on them. */
+#define rcu_wait(cond) \
+do { \
+ for (;;) { \
+ set_current_state(TASK_INTERRUPTIBLE); \
+ if (cond) \
+ break; \
+ schedule(); \
+ } \
+ __set_current_state(TASK_RUNNING); \
+} while (0)
/*
* RCU global state, including node hierarchy. This hierarchy is
@@ -446,7 +449,6 @@ static void __cpuinit rcu_preempt_init_percpu_data(int cpu);
static void rcu_preempt_send_cbs_to_online(void);
static void __init __rcu_init_preempt(void);
static void rcu_needs_cpu_flush(void);
-static void __init rcu_init_boost_waitqueue(struct rcu_node *rnp);
static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp,
cpumask_var_t cm);
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index 3f6559a5f5cd..a767b7dac365 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -1196,8 +1196,7 @@ static int rcu_boost_kthread(void *arg)
for (;;) {
rnp->boost_kthread_status = RCU_KTHREAD_WAITING;
- wait_event_interruptible(rnp->boost_wq, rnp->boost_tasks ||
- rnp->exp_tasks);
+ rcu_wait(rnp->boost_tasks || rnp->exp_tasks);
rnp->boost_kthread_status = RCU_KTHREAD_RUNNING;
more2boost = rcu_boost(rnp);
if (more2boost)
@@ -1275,14 +1274,6 @@ static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
}
/*
- * Initialize the RCU-boost waitqueue.
- */
-static void __init rcu_init_boost_waitqueue(struct rcu_node *rnp)
-{
- init_waitqueue_head(&rnp->boost_wq);
-}
-
-/*
* Create an RCU-boost kthread for the specified node if one does not
* already exist. We only create this kthread for preemptible RCU.
* Returns zero if all is well, a negated errno otherwise.
@@ -1304,9 +1295,9 @@ static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
if (IS_ERR(t))
return PTR_ERR(t);
raw_spin_lock_irqsave(&rnp->lock, flags);
+ set_task_state(t, TASK_INTERRUPTIBLE);
rnp->boost_kthread_task = t;
raw_spin_unlock_irqrestore(&rnp->lock, flags);
- wake_up_process(t);
sp.sched_priority = RCU_KTHREAD_PRIO;
sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
return 0;
@@ -1328,10 +1319,6 @@ static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
{
}
-static void __init rcu_init_boost_waitqueue(struct rcu_node *rnp)
-{
-}
-
static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
struct rcu_node *rnp,
int rnp_index)
@@ -1520,7 +1507,6 @@ int rcu_needs_cpu(int cpu)
{
int c = 0;
int snap;
- int snap_nmi;
int thatcpu;
/* Check for being in the holdoff period. */
@@ -1531,10 +1517,10 @@ int rcu_needs_cpu(int cpu)
for_each_online_cpu(thatcpu) {
if (thatcpu == cpu)
continue;
- snap = per_cpu(rcu_dynticks, thatcpu).dynticks;
- snap_nmi = per_cpu(rcu_dynticks, thatcpu).dynticks_nmi;
+ snap = atomic_add_return(0, &per_cpu(rcu_dynticks,
+ thatcpu).dynticks);
smp_mb(); /* Order sampling of snap with end of grace period. */
- if (((snap & 0x1) != 0) || ((snap_nmi & 0x1) != 0)) {
+ if ((snap & 0x1) != 0) {
per_cpu(rcu_dyntick_drain, cpu) = 0;
per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1;
return rcu_needs_cpu_quick_check(cpu);
diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
index aa0fd72b4bc7..9678cc3650f5 100644
--- a/kernel/rcutree_trace.c
+++ b/kernel/rcutree_trace.c
@@ -69,10 +69,10 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
rdp->passed_quiesc, rdp->passed_quiesc_completed,
rdp->qs_pending);
#ifdef CONFIG_NO_HZ
- seq_printf(m, " dt=%d/%d dn=%d df=%lu",
- rdp->dynticks->dynticks,
+ seq_printf(m, " dt=%d/%d/%d df=%lu",
+ atomic_read(&rdp->dynticks->dynticks),
rdp->dynticks->dynticks_nesting,
- rdp->dynticks->dynticks_nmi,
+ rdp->dynticks->dynticks_nmi_nesting,
rdp->dynticks_fqs);
#endif /* #ifdef CONFIG_NO_HZ */
seq_printf(m, " of=%lu ri=%lu", rdp->offline_fqs, rdp->resched_ipi);
@@ -141,9 +141,9 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
rdp->qs_pending);
#ifdef CONFIG_NO_HZ
seq_printf(m, ",%d,%d,%d,%lu",
- rdp->dynticks->dynticks,
+ atomic_read(&rdp->dynticks->dynticks),
rdp->dynticks->dynticks_nesting,
- rdp->dynticks->dynticks_nmi,
+ rdp->dynticks->dynticks_nmi_nesting,
rdp->dynticks_fqs);
#endif /* #ifdef CONFIG_NO_HZ */
seq_printf(m, ",%lu,%lu", rdp->offline_fqs, rdp->resched_ipi);
@@ -167,7 +167,7 @@ static int show_rcudata_csv(struct seq_file *m, void *unused)
{
seq_puts(m, "\"CPU\",\"Online?\",\"c\",\"g\",\"pq\",\"pqc\",\"pq\",");
#ifdef CONFIG_NO_HZ
- seq_puts(m, "\"dt\",\"dt nesting\",\"dn\",\"df\",");
+ seq_puts(m, "\"dt\",\"dt nesting\",\"dt NMI nesting\",\"df\",");
#endif /* #ifdef CONFIG_NO_HZ */
seq_puts(m, "\"of\",\"ri\",\"ql\",\"b\",\"ci\",\"co\",\"ca\"\n");
#ifdef CONFIG_TREE_PREEMPT_RCU
diff --git a/kernel/sched.c b/kernel/sched.c
index 5e43e9dc65d1..cbb3a0eee58e 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2573,7 +2573,26 @@ static void ttwu_queue_remote(struct task_struct *p, int cpu)
if (!next)
smp_send_reschedule(cpu);
}
-#endif
+
+#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
+static int ttwu_activate_remote(struct task_struct *p, int wake_flags)
+{
+ struct rq *rq;
+ int ret = 0;
+
+ rq = __task_rq_lock(p);
+ if (p->on_cpu) {
+ ttwu_activate(rq, p, ENQUEUE_WAKEUP);
+ ttwu_do_wakeup(rq, p, wake_flags);
+ ret = 1;
+ }
+ __task_rq_unlock(rq);
+
+ return ret;
+
+}
+#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
+#endif /* CONFIG_SMP */
static void ttwu_queue(struct task_struct *p, int cpu)
{
@@ -2631,17 +2650,17 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
while (p->on_cpu) {
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
/*
- * If called from interrupt context we could have landed in the
- * middle of schedule(), in this case we should take care not
- * to spin on ->on_cpu if p is current, since that would
- * deadlock.
+ * In case the architecture enables interrupts in
+ * context_switch(), we cannot busy wait, since that
+ * would lead to deadlocks when an interrupt hits and
+ * tries to wake up @prev. So bail and do a complete
+ * remote wakeup.
*/
- if (p == current) {
- ttwu_queue(p, cpu);
+ if (ttwu_activate_remote(p, wake_flags))
goto stat;
- }
-#endif
+#else
cpu_relax();
+#endif
}
/*
* Pairs with the smp_wmb() in finish_lock_switch().
@@ -5841,7 +5860,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
idle->state = TASK_RUNNING;
idle->se.exec_start = sched_clock();
- cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu));
+ do_set_cpus_allowed(idle, cpumask_of(cpu));
/*
* We're having a chicken and egg problem, even though we are
* holding rq->lock, the cpu isn't yet set to this cpu so the
@@ -5929,6 +5948,16 @@ static inline void sched_init_granularity(void)
}
#ifdef CONFIG_SMP
+void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
+{
+ if (p->sched_class && p->sched_class->set_cpus_allowed)
+ p->sched_class->set_cpus_allowed(p, new_mask);
+ else {
+ cpumask_copy(&p->cpus_allowed, new_mask);
+ p->rt.nr_cpus_allowed = cpumask_weight(new_mask);
+ }
+}
+
/*
* This is how migration works:
*
@@ -5974,12 +6003,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
goto out;
}
- if (p->sched_class->set_cpus_allowed)
- p->sched_class->set_cpus_allowed(p, new_mask);
- else {
- cpumask_copy(&p->cpus_allowed, new_mask);
- p->rt.nr_cpus_allowed = cpumask_weight(new_mask);
- }
+ do_set_cpus_allowed(p, new_mask);
/* Can the task run on the task's current CPU? If so, we're done */
if (cpumask_test_cpu(task_cpu(p), new_mask))
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index e32a9b70ee9c..433491c2dc8f 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1076,8 +1076,6 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
se->on_rq = 0;
update_cfs_load(cfs_rq, 0);
account_entity_dequeue(cfs_rq, se);
- update_min_vruntime(cfs_rq);
- update_cfs_shares(cfs_rq);
/*
* Normalize the entity after updating the min_vruntime because the
@@ -1086,6 +1084,9 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
*/
if (!(flags & DEQUEUE_SLEEP))
se->vruntime -= cfs_rq->min_vruntime;
+
+ update_min_vruntime(cfs_rq);
+ update_cfs_shares(cfs_rq);
}
/*
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 64b2a37c07d0..88725c939e0b 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -1263,6 +1263,7 @@ static int find_lowest_rq(struct task_struct *task)
if (!cpumask_test_cpu(this_cpu, lowest_mask))
this_cpu = -1; /* Skip this_cpu opt if not among lowest */
+ rcu_read_lock();
for_each_domain(cpu, sd) {
if (sd->flags & SD_WAKE_AFFINE) {
int best_cpu;
@@ -1272,15 +1273,20 @@ static int find_lowest_rq(struct task_struct *task)
* remote processor.
*/
if (this_cpu != -1 &&
- cpumask_test_cpu(this_cpu, sched_domain_span(sd)))
+ cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
+ rcu_read_unlock();
return this_cpu;
+ }
best_cpu = cpumask_first_and(lowest_mask,
sched_domain_span(sd));
- if (best_cpu < nr_cpu_ids)
+ if (best_cpu < nr_cpu_ids) {
+ rcu_read_unlock();
return best_cpu;
+ }
}
}
+ rcu_read_unlock();
/*
* And finally, if there were no matches within the domains
diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h
index 48ddf431db0e..331e01bcd026 100644
--- a/kernel/sched_stats.h
+++ b/kernel/sched_stats.h
@@ -37,7 +37,7 @@ static int show_schedstat(struct seq_file *seq, void *v)
#ifdef CONFIG_SMP
/* domain-specific stats */
- preempt_disable();
+ rcu_read_lock();
for_each_domain(cpu, sd) {
enum cpu_idle_type itype;
@@ -64,7 +64,7 @@ static int show_schedstat(struct seq_file *seq, void *v)
sd->ttwu_wake_remote, sd->ttwu_move_affine,
sd->ttwu_move_balance);
}
- preempt_enable();
+ rcu_read_unlock();
#endif
}
kfree(mask_str);
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index d017c2c82c44..1ee417fcbfa5 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -109,12 +109,18 @@ ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip);
static void ftrace_global_list_func(unsigned long ip,
unsigned long parent_ip)
{
- struct ftrace_ops *op = rcu_dereference_raw(ftrace_global_list); /*see above*/
+ struct ftrace_ops *op;
+
+ if (unlikely(trace_recursion_test(TRACE_GLOBAL_BIT)))
+ return;
+ trace_recursion_set(TRACE_GLOBAL_BIT);
+ op = rcu_dereference_raw(ftrace_global_list); /*see above*/
while (op != &ftrace_list_end) {
op->func(ip, parent_ip);
op = rcu_dereference_raw(op->next); /*see above*/
};
+ trace_recursion_clear(TRACE_GLOBAL_BIT);
}
static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
@@ -1638,12 +1644,12 @@ static void ftrace_startup_enable(int command)
ftrace_run_update_code(command);
}
-static void ftrace_startup(struct ftrace_ops *ops, int command)
+static int ftrace_startup(struct ftrace_ops *ops, int command)
{
bool hash_enable = true;
if (unlikely(ftrace_disabled))
- return;
+ return -ENODEV;
ftrace_start_up++;
command |= FTRACE_ENABLE_CALLS;
@@ -1662,6 +1668,8 @@ static void ftrace_startup(struct ftrace_ops *ops, int command)
ftrace_hash_rec_enable(ops, 1);
ftrace_startup_enable(command);
+
+ return 0;
}
static void ftrace_shutdown(struct ftrace_ops *ops, int command)
@@ -2501,7 +2509,7 @@ static void __enable_ftrace_function_probe(void)
ret = __register_ftrace_function(&trace_probe_ops);
if (!ret)
- ftrace_startup(&trace_probe_ops, 0);
+ ret = ftrace_startup(&trace_probe_ops, 0);
ftrace_probe_registered = 1;
}
@@ -3466,7 +3474,11 @@ device_initcall(ftrace_nodyn_init);
static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
static inline void ftrace_startup_enable(int command) { }
/* Keep as macros so we do not need to define the commands */
-# define ftrace_startup(ops, command) do { } while (0)
+# define ftrace_startup(ops, command) \
+ ({ \
+ (ops)->flags |= FTRACE_OPS_FL_ENABLED; \
+ 0; \
+ })
# define ftrace_shutdown(ops, command) do { } while (0)
# define ftrace_startup_sysctl() do { } while (0)
# define ftrace_shutdown_sysctl() do { } while (0)
@@ -3484,6 +3496,10 @@ ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip)
{
struct ftrace_ops *op;
+ if (unlikely(trace_recursion_test(TRACE_INTERNAL_BIT)))
+ return;
+
+ trace_recursion_set(TRACE_INTERNAL_BIT);
/*
* Some of the ops may be dynamically allocated,
* they must be freed after a synchronize_sched().
@@ -3496,6 +3512,7 @@ ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip)
op = rcu_dereference_raw(op->next);
};
preempt_enable_notrace();
+ trace_recursion_clear(TRACE_INTERNAL_BIT);
}
static void clear_ftrace_swapper(void)
@@ -3799,7 +3816,7 @@ int register_ftrace_function(struct ftrace_ops *ops)
ret = __register_ftrace_function(ops);
if (!ret)
- ftrace_startup(ops, 0);
+ ret = ftrace_startup(ops, 0);
out_unlock:
@@ -4045,7 +4062,7 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
ftrace_graph_return = retfunc;
ftrace_graph_entry = entryfunc;
- ftrace_startup(&global_ops, FTRACE_START_FUNC_RET);
+ ret = ftrace_startup(&global_ops, FTRACE_START_FUNC_RET);
out:
mutex_unlock(&ftrace_lock);
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 0ef7b4b2a1f7..b0c7aa407943 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -2216,7 +2216,7 @@ static noinline void trace_recursive_fail(void)
printk_once(KERN_WARNING "Tracing recursion: depth[%ld]:"
"HC[%lu]:SC[%lu]:NMI[%lu]\n",
- current->trace_recursion,
+ trace_recursion_buffer(),
hardirq_count() >> HARDIRQ_SHIFT,
softirq_count() >> SOFTIRQ_SHIFT,
in_nmi());
@@ -2226,9 +2226,9 @@ static noinline void trace_recursive_fail(void)
static inline int trace_recursive_lock(void)
{
- current->trace_recursion++;
+ trace_recursion_inc();
- if (likely(current->trace_recursion < TRACE_RECURSIVE_DEPTH))
+ if (likely(trace_recursion_buffer() < TRACE_RECURSIVE_DEPTH))
return 0;
trace_recursive_fail();
@@ -2238,9 +2238,9 @@ static inline int trace_recursive_lock(void)
static inline void trace_recursive_unlock(void)
{
- WARN_ON_ONCE(!current->trace_recursion);
+ WARN_ON_ONCE(!trace_recursion_buffer());
- current->trace_recursion--;
+ trace_recursion_dec();
}
#else
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 6b69c4bd306f..229f8591f61d 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -784,4 +784,19 @@ extern const char *__stop___trace_bprintk_fmt[];
FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print))
#include "trace_entries.h"
+/* Only current can touch trace_recursion */
+#define trace_recursion_inc() do { (current)->trace_recursion++; } while (0)
+#define trace_recursion_dec() do { (current)->trace_recursion--; } while (0)
+
+/* Ring buffer has the 10 LSB bits to count */
+#define trace_recursion_buffer() ((current)->trace_recursion & 0x3ff)
+
+/* for function tracing recursion */
+#define TRACE_INTERNAL_BIT (1<<11)
+#define TRACE_GLOBAL_BIT (1<<12)
+
+#define trace_recursion_set(bit) do { (current)->trace_recursion |= (bit); } while (0)
+#define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(bit); } while (0)
+#define trace_recursion_test(bit) ((current)->trace_recursion & (bit))
+
#endif /* _LINUX_KERNEL_TRACE_H */
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 2fe110341359..686ec399f2a8 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -1657,7 +1657,12 @@ static struct ftrace_ops trace_ops __initdata =
static __init void event_trace_self_test_with_function(void)
{
- register_ftrace_function(&trace_ops);
+ int ret;
+ ret = register_ftrace_function(&trace_ops);
+ if (WARN_ON(ret < 0)) {
+ pr_info("Failed to enable function tracer for event tests\n");
+ return;
+ }
pr_info("Running tests again, along with the function tracer\n");
event_trace_self_tests();
unregister_ftrace_function(&trace_ops);
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index cf535ccedc86..e37de492a9e1 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -353,6 +353,33 @@ ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val,
}
EXPORT_SYMBOL(ftrace_print_symbols_seq);
+#if BITS_PER_LONG == 32
+const char *
+ftrace_print_symbols_seq_u64(struct trace_seq *p, unsigned long long val,
+ const struct trace_print_flags_u64 *symbol_array)
+{
+ int i;
+ const char *ret = p->buffer + p->len;
+
+ for (i = 0; symbol_array[i].name; i++) {
+
+ if (val != symbol_array[i].mask)
+ continue;
+
+ trace_seq_puts(p, symbol_array[i].name);
+ break;
+ }
+
+ if (!p->len)
+ trace_seq_printf(p, "0x%llx", val);
+
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+EXPORT_SYMBOL(ftrace_print_symbols_seq_u64);
+#endif
+
const char *
ftrace_print_hex_seq(struct trace_seq *p, const unsigned char *buf, int buf_len)
{
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 7daa4b072e9f..3d0c56ad4792 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -415,15 +415,13 @@ static void watchdog_nmi_disable(int cpu) { return; }
#endif /* CONFIG_HARDLOCKUP_DETECTOR */
/* prepare/enable/disable routines */
-static int watchdog_prepare_cpu(int cpu)
+static void watchdog_prepare_cpu(int cpu)
{
struct hrtimer *hrtimer = &per_cpu(watchdog_hrtimer, cpu);
WARN_ON(per_cpu(softlockup_watchdog, cpu));
hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
hrtimer->function = watchdog_timer_fn;
-
- return 0;
}
static int watchdog_enable(int cpu)
@@ -542,17 +540,16 @@ static int __cpuinit
cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
{
int hotcpu = (unsigned long)hcpu;
- int err = 0;
switch (action) {
case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN:
- err = watchdog_prepare_cpu(hotcpu);
+ watchdog_prepare_cpu(hotcpu);
break;
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
if (watchdog_enabled)
- err = watchdog_enable(hotcpu);
+ watchdog_enable(hotcpu);
break;
#ifdef CONFIG_HOTPLUG_CPU
case CPU_UP_CANCELED:
diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c
index 619313ed6c46..507a22fab738 100644
--- a/lib/locking-selftest.c
+++ b/lib/locking-selftest.c
@@ -144,7 +144,7 @@ static void init_shared_classes(void)
#define HARDIRQ_ENTER() \
local_irq_disable(); \
- irq_enter(); \
+ __irq_enter(); \
WARN_ON(!in_irq());
#define HARDIRQ_EXIT() \
diff --git a/mm/filemap.c b/mm/filemap.c
index bcdc393b6580..d7b10578a64b 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1982,16 +1982,26 @@ static int __remove_suid(struct dentry *dentry, int kill)
int file_remove_suid(struct file *file)
{
struct dentry *dentry = file->f_path.dentry;
- int killsuid = should_remove_suid(dentry);
- int killpriv = security_inode_need_killpriv(dentry);
+ struct inode *inode = dentry->d_inode;
+ int killsuid;
+ int killpriv;
int error = 0;
+ /* Fast path for nothing security related */
+ if (IS_NOSEC(inode))
+ return 0;
+
+ killsuid = should_remove_suid(dentry);
+ killpriv = security_inode_need_killpriv(dentry);
+
if (killpriv < 0)
return killpriv;
if (killpriv)
error = security_inode_killpriv(dentry);
if (!error && killsuid)
error = __remove_suid(dentry, killsuid);
+ if (!error)
+ inode->i_flags |= S_NOSEC;
return error;
}
@@ -2327,7 +2337,7 @@ struct page *grab_cache_page_write_begin(struct address_space *mapping,
repeat:
page = find_lock_page(mapping, index);
if (page)
- return page;
+ goto found;
page = __page_cache_alloc(mapping_gfp_mask(mapping) & ~gfp_notmask);
if (!page)
@@ -2340,6 +2350,8 @@ repeat:
goto repeat;
return NULL;
}
+found:
+ wait_on_page_writeback(page);
return page;
}
EXPORT_SYMBOL(grab_cache_page_write_begin);
diff --git a/mm/maccess.c b/mm/maccess.c
index e2b6f5634e0d..4cee182ab5f3 100644
--- a/mm/maccess.c
+++ b/mm/maccess.c
@@ -15,10 +15,10 @@
* happens, handle that and return -EFAULT.
*/
-long __weak probe_kernel_read(void *dst, void *src, size_t size)
+long __weak probe_kernel_read(void *dst, const void *src, size_t size)
__attribute__((alias("__probe_kernel_read")));
-long __probe_kernel_read(void *dst, void *src, size_t size)
+long __probe_kernel_read(void *dst, const void *src, size_t size)
{
long ret;
mm_segment_t old_fs = get_fs();
@@ -43,10 +43,10 @@ EXPORT_SYMBOL_GPL(probe_kernel_read);
* Safely write to address @dst from the buffer at @src. If a kernel fault
* happens, handle that and return -EFAULT.
*/
-long __weak probe_kernel_write(void *dst, void *src, size_t size)
+long __weak probe_kernel_write(void *dst, const void *src, size_t size)
__attribute__((alias("__probe_kernel_write")));
-long __probe_kernel_write(void *dst, void *src, size_t size)
+long __probe_kernel_write(void *dst, const void *src, size_t size)
{
long ret;
mm_segment_t old_fs = get_fs();
diff --git a/mm/rmap.c b/mm/rmap.c
index 3a39b518a653..6bada99cd61c 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -405,6 +405,7 @@ out:
struct anon_vma *page_lock_anon_vma(struct page *page)
{
struct anon_vma *anon_vma = NULL;
+ struct anon_vma *root_anon_vma;
unsigned long anon_mapping;
rcu_read_lock();
@@ -415,13 +416,15 @@ struct anon_vma *page_lock_anon_vma(struct page *page)
goto out;
anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
- if (mutex_trylock(&anon_vma->root->mutex)) {
+ root_anon_vma = ACCESS_ONCE(anon_vma->root);
+ if (mutex_trylock(&root_anon_vma->mutex)) {
/*
- * If we observe a !0 refcount, then holding the lock ensures
- * the anon_vma will not go away, see __put_anon_vma().
+ * If the page is still mapped, then this anon_vma is still
+ * its anon_vma, and holding the mutex ensures that it will
+ * not go away, see __put_anon_vma().
*/
- if (!atomic_read(&anon_vma->refcount)) {
- anon_vma_unlock(anon_vma);
+ if (!page_mapped(page)) {
+ mutex_unlock(&root_anon_vma->mutex);
anon_vma = NULL;
}
goto out;
@@ -1014,7 +1017,7 @@ void do_page_add_anon_rmap(struct page *page,
return;
VM_BUG_ON(!PageLocked(page));
- VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
+ /* address might be in next vma when migration races vma_adjust */
if (first)
__page_set_anon_rmap(page, vma, address, exclusive);
else
@@ -1709,7 +1712,7 @@ void hugepage_add_anon_rmap(struct page *page,
BUG_ON(!PageLocked(page));
BUG_ON(!anon_vma);
- BUG_ON(address < vma->vm_start || address >= vma->vm_end);
+ /* address might be in next vma when migration races vma_adjust */
first = atomic_inc_and_test(&page->_mapcount);
if (first)
__hugepage_set_anon_rmap(page, vma, address, 0);
diff --git a/mm/shmem.c b/mm/shmem.c
index 1acfb2687bfa..d221a1cfd7b1 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1114,8 +1114,8 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
delete_from_page_cache(page);
shmem_swp_set(info, entry, swap.val);
shmem_swp_unmap(entry);
- spin_unlock(&info->lock);
swap_shmem_alloc(swap);
+ spin_unlock(&info->lock);
BUG_ON(page_mapped(page));
swap_writepage(page, wbc);
return 0;
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index b2274d1fd605..c7a581a96894 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -46,8 +46,6 @@ int vlan_net_id __read_mostly;
const char vlan_fullname[] = "802.1Q VLAN Support";
const char vlan_version[] = DRV_VERSION;
-static const char vlan_copyright[] = "Ben Greear <greearb@candelatech.com>";
-static const char vlan_buggyright[] = "David S. Miller <davem@redhat.com>";
/* End of global variables definitions. */
@@ -673,8 +671,7 @@ static int __init vlan_proto_init(void)
{
int err;
- pr_info("%s v%s %s\n", vlan_fullname, vlan_version, vlan_copyright);
- pr_info("All bugs added by %s\n", vlan_buggyright);
+ pr_info("%s v%s\n", vlan_fullname, vlan_version);
err = register_pernet_subsys(&vlan_net_ops);
if (err < 0)
diff --git a/net/atm/atm_sysfs.c b/net/atm/atm_sysfs.c
index f7fa67c78766..f49da5814bc3 100644
--- a/net/atm/atm_sysfs.c
+++ b/net/atm/atm_sysfs.c
@@ -59,6 +59,14 @@ static ssize_t show_atmaddress(struct device *cdev,
return pos - buf;
}
+static ssize_t show_atmindex(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct atm_dev *adev = to_atm_dev(cdev);
+
+ return sprintf(buf, "%d\n", adev->number);
+}
+
static ssize_t show_carrier(struct device *cdev,
struct device_attribute *attr, char *buf)
{
@@ -99,6 +107,7 @@ static ssize_t show_link_rate(struct device *cdev,
static DEVICE_ATTR(address, S_IRUGO, show_address, NULL);
static DEVICE_ATTR(atmaddress, S_IRUGO, show_atmaddress, NULL);
+static DEVICE_ATTR(atmindex, S_IRUGO, show_atmindex, NULL);
static DEVICE_ATTR(carrier, S_IRUGO, show_carrier, NULL);
static DEVICE_ATTR(type, S_IRUGO, show_type, NULL);
static DEVICE_ATTR(link_rate, S_IRUGO, show_link_rate, NULL);
@@ -106,6 +115,7 @@ static DEVICE_ATTR(link_rate, S_IRUGO, show_link_rate, NULL);
static struct device_attribute *atm_attrs[] = {
&dev_attr_atmaddress,
&dev_attr_address,
+ &dev_attr_atmindex,
&dev_attr_carrier,
&dev_attr_type,
&dev_attr_link_rate,
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 1a92b369c820..2b5ca1a0054d 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -1883,14 +1883,13 @@ static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
struct xt_target *wt;
void *dst = NULL;
int off, pad = 0;
- unsigned int size_kern, entry_offset, match_size = mwt->match_size;
+ unsigned int size_kern, match_size = mwt->match_size;
strlcpy(name, mwt->u.name, sizeof(name));
if (state->buf_kern_start)
dst = state->buf_kern_start + state->buf_kern_offset;
- entry_offset = (unsigned char *) mwt - base;
switch (compat_mwt) {
case EBT_COMPAT_MATCH:
match = try_then_request_module(xt_find_match(NFPROTO_BRIDGE,
@@ -1933,6 +1932,9 @@ static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
size_kern = wt->targetsize;
module_put(wt->me);
break;
+
+ default:
+ return -EINVAL;
}
state->buf_kern_offset += match_size + off;
diff --git a/net/can/proc.c b/net/can/proc.c
index f4265cc9c3fb..0016f7339699 100644
--- a/net/can/proc.c
+++ b/net/can/proc.c
@@ -204,12 +204,11 @@ static void can_print_rcvlist(struct seq_file *m, struct hlist_head *rx_list,
hlist_for_each_entry_rcu(r, n, rx_list, list) {
char *fmt = (r->can_id & CAN_EFF_FLAG)?
- " %-5s %08X %08x %08x %08x %8ld %s\n" :
- " %-5s %03X %08x %08lx %08lx %8ld %s\n";
+ " %-5s %08x %08x %pK %pK %8ld %s\n" :
+ " %-5s %03x %08x %pK %pK %8ld %s\n";
seq_printf(m, fmt, DNAME(dev), r->can_id, r->mask,
- (unsigned long)r->func, (unsigned long)r->data,
- r->matches, r->ident);
+ r->func, r->data, r->matches, r->ident);
}
}
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 84e7304532e6..fd14116ad7f0 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -233,6 +233,29 @@ static int ethtool_set_feature_compat(struct net_device *dev,
return 1;
}
+static int ethtool_set_flags_compat(struct net_device *dev,
+ int (*legacy_set)(struct net_device *, u32),
+ struct ethtool_set_features_block *features, u32 mask)
+{
+ u32 value;
+
+ if (!legacy_set)
+ return 0;
+
+ if (!(features[0].valid & mask))
+ return 0;
+
+ value = dev->features & ~features[0].valid;
+ value |= features[0].requested;
+
+ features[0].valid &= ~mask;
+
+ if (legacy_set(dev, value & mask) < 0)
+ netdev_info(dev, "Legacy flags change failed\n");
+
+ return 1;
+}
+
static int ethtool_set_features_compat(struct net_device *dev,
struct ethtool_set_features_block *features)
{
@@ -249,7 +272,7 @@ static int ethtool_set_features_compat(struct net_device *dev,
features, NETIF_F_ALL_TSO);
compat |= ethtool_set_feature_compat(dev, dev->ethtool_ops->set_rx_csum,
features, NETIF_F_RXCSUM);
- compat |= ethtool_set_feature_compat(dev, dev->ethtool_ops->set_flags,
+ compat |= ethtool_set_flags_compat(dev, dev->ethtool_ops->set_flags,
features, flags_dup_features);
return compat;
diff --git a/net/core/filter.c b/net/core/filter.c
index 0e3622f1dcb1..36f975fa87cb 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -38,6 +38,7 @@
#include <asm/unaligned.h>
#include <linux/filter.h>
#include <linux/reciprocal_div.h>
+#include <linux/ratelimit.h>
/* No hurry in this branch */
static void *__load_pointer(const struct sk_buff *skb, int k, unsigned int size)
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index a829e3f60aeb..77a65f031488 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -17,6 +17,7 @@
#include <net/ip.h>
#include <net/sock.h>
+#include <net/net_ratelimit.h>
#ifdef CONFIG_RPS
static int rps_sock_flow_sysctl(ctl_table *table, int write,
diff --git a/net/core/utils.c b/net/core/utils.c
index 2012bc797f9c..386e263f6066 100644
--- a/net/core/utils.c
+++ b/net/core/utils.c
@@ -27,6 +27,7 @@
#include <linux/ratelimit.h>
#include <net/sock.h>
+#include <net/net_ratelimit.h>
#include <asm/byteorder.h>
#include <asm/system.h>
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index 9df4e635fb5f..ce616d92cc54 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -154,11 +154,9 @@ void __init inet_initpeers(void)
/* Called with or without local BH being disabled. */
static void unlink_from_unused(struct inet_peer *p)
{
- if (!list_empty(&p->unused)) {
- spin_lock_bh(&unused_peers.lock);
- list_del_init(&p->unused);
- spin_unlock_bh(&unused_peers.lock);
- }
+ spin_lock_bh(&unused_peers.lock);
+ list_del_init(&p->unused);
+ spin_unlock_bh(&unused_peers.lock);
}
static int addr_compare(const struct inetpeer_addr *a,
@@ -205,6 +203,20 @@ static int addr_compare(const struct inetpeer_addr *a,
u; \
})
+static bool atomic_add_unless_return(atomic_t *ptr, int a, int u, int *newv)
+{
+ int cur, old = atomic_read(ptr);
+
+ while (old != u) {
+ *newv = old + a;
+ cur = atomic_cmpxchg(ptr, old, *newv);
+ if (cur == old)
+ return true;
+ old = cur;
+ }
+ return false;
+}
+
/*
* Called with rcu_read_lock()
* Because we hold no lock against a writer, its quite possible we fall
@@ -213,7 +225,8 @@ static int addr_compare(const struct inetpeer_addr *a,
* We exit from this function if number of links exceeds PEER_MAXDEPTH
*/
static struct inet_peer *lookup_rcu(const struct inetpeer_addr *daddr,
- struct inet_peer_base *base)
+ struct inet_peer_base *base,
+ int *newrefcnt)
{
struct inet_peer *u = rcu_dereference(base->root);
int count = 0;
@@ -226,7 +239,7 @@ static struct inet_peer *lookup_rcu(const struct inetpeer_addr *daddr,
* distinction between an unused entry (refcnt=0) and
* a freed one.
*/
- if (unlikely(!atomic_add_unless(&u->refcnt, 1, -1)))
+ if (!atomic_add_unless_return(&u->refcnt, 1, -1, newrefcnt))
u = NULL;
return u;
}
@@ -465,22 +478,23 @@ struct inet_peer *inet_getpeer(struct inetpeer_addr *daddr, int create)
struct inet_peer_base *base = family_to_base(daddr->family);
struct inet_peer *p;
unsigned int sequence;
- int invalidated;
+ int invalidated, newrefcnt = 0;
/* Look up for the address quickly, lockless.
* Because of a concurrent writer, we might not find an existing entry.
*/
rcu_read_lock();
sequence = read_seqbegin(&base->lock);
- p = lookup_rcu(daddr, base);
+ p = lookup_rcu(daddr, base, &newrefcnt);
invalidated = read_seqretry(&base->lock, sequence);
rcu_read_unlock();
if (p) {
- /* The existing node has been found.
+found: /* The existing node has been found.
* Remove the entry from unused list if it was there.
*/
- unlink_from_unused(p);
+ if (newrefcnt == 1)
+ unlink_from_unused(p);
return p;
}
@@ -494,11 +508,9 @@ struct inet_peer *inet_getpeer(struct inetpeer_addr *daddr, int create)
write_seqlock_bh(&base->lock);
p = lookup(daddr, stack, base);
if (p != peer_avl_empty) {
- atomic_inc(&p->refcnt);
+ newrefcnt = atomic_inc_return(&p->refcnt);
write_sequnlock_bh(&base->lock);
- /* Remove the entry from unused list if it was there. */
- unlink_from_unused(p);
- return p;
+ goto found;
}
p = create ? kmem_cache_alloc(peer_cachep, GFP_ATOMIC) : NULL;
if (p) {
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index a15c01524959..7f9124914b13 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -54,7 +54,7 @@
#include <asm/atomic.h>
#include <asm/ebcdic.h>
#include <asm/io.h>
-#include <asm/s390_ext.h>
+#include <asm/irq.h>
#include <asm/smp.h>
/*
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index 72d1ac611fdc..8041befc6555 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -815,7 +815,7 @@ ip_set_flush(struct sock *ctnl, struct sk_buff *skb,
ip_set_id_t i;
if (unlikely(protocol_failed(attr)))
- return -EPROTO;
+ return -IPSET_ERR_PROTOCOL;
if (!attr[IPSET_ATTR_SETNAME]) {
for (i = 0; i < ip_set_max; i++)
diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c
index 6b5dd6ddaae9..af63553fa332 100644
--- a/net/netfilter/ipvs/ip_vs_ftp.c
+++ b/net/netfilter/ipvs/ip_vs_ftp.c
@@ -411,25 +411,35 @@ static struct ip_vs_app ip_vs_ftp = {
static int __net_init __ip_vs_ftp_init(struct net *net)
{
int i, ret;
- struct ip_vs_app *app = &ip_vs_ftp;
+ struct ip_vs_app *app;
+ struct netns_ipvs *ipvs = net_ipvs(net);
+
+ app = kmemdup(&ip_vs_ftp, sizeof(struct ip_vs_app), GFP_KERNEL);
+ if (!app)
+ return -ENOMEM;
+ INIT_LIST_HEAD(&app->a_list);
+ INIT_LIST_HEAD(&app->incs_list);
+ ipvs->ftp_app = app;
ret = register_ip_vs_app(net, app);
if (ret)
- return ret;
+ goto err_exit;
for (i=0; i<IP_VS_APP_MAX_PORTS; i++) {
if (!ports[i])
continue;
ret = register_ip_vs_app_inc(net, app, app->protocol, ports[i]);
if (ret)
- break;
+ goto err_unreg;
pr_info("%s: loaded support on port[%d] = %d\n",
app->name, i, ports[i]);
}
+ return 0;
- if (ret)
- unregister_ip_vs_app(net, app);
-
+err_unreg:
+ unregister_ip_vs_app(net, app);
+err_exit:
+ kfree(ipvs->ftp_app);
return ret;
}
/*
@@ -437,9 +447,10 @@ static int __net_init __ip_vs_ftp_init(struct net *net)
*/
static void __ip_vs_ftp_exit(struct net *net)
{
- struct ip_vs_app *app = &ip_vs_ftp;
+ struct netns_ipvs *ipvs = net_ipvs(net);
- unregister_ip_vs_app(net, app);
+ unregister_ip_vs_app(net, ipvs->ftp_app);
+ kfree(ipvs->ftp_app);
}
static struct pernet_operations ip_vs_ftp_ops = {
diff --git a/scripts/recordmcount.h b/scripts/recordmcount.h
index 4be60364a405..f40a6af6bf40 100644
--- a/scripts/recordmcount.h
+++ b/scripts/recordmcount.h
@@ -43,6 +43,7 @@
#undef ELF_R_INFO
#undef Elf_r_info
#undef ELF_ST_BIND
+#undef ELF_ST_TYPE
#undef fn_ELF_R_SYM
#undef fn_ELF_R_INFO
#undef uint_t
@@ -76,6 +77,7 @@
# define ELF_R_INFO ELF64_R_INFO
# define Elf_r_info Elf64_r_info
# define ELF_ST_BIND ELF64_ST_BIND
+# define ELF_ST_TYPE ELF64_ST_TYPE
# define fn_ELF_R_SYM fn_ELF64_R_SYM
# define fn_ELF_R_INFO fn_ELF64_R_INFO
# define uint_t uint64_t
@@ -108,6 +110,7 @@
# define ELF_R_INFO ELF32_R_INFO
# define Elf_r_info Elf32_r_info
# define ELF_ST_BIND ELF32_ST_BIND
+# define ELF_ST_TYPE ELF32_ST_TYPE
# define fn_ELF_R_SYM fn_ELF32_R_SYM
# define fn_ELF_R_INFO fn_ELF32_R_INFO
# define uint_t uint32_t
@@ -427,6 +430,11 @@ static unsigned find_secsym_ndx(unsigned const txtndx,
if (txtndx == w2(symp->st_shndx)
/* avoid STB_WEAK */
&& (STB_LOCAL == st_bind || STB_GLOBAL == st_bind)) {
+ /* function symbols on ARM have quirks, avoid them */
+ if (w2(ehdr->e_machine) == EM_ARM
+ && ELF_ST_TYPE(symp->st_info) == STT_FUNC)
+ continue;
+
*recvalp = _w(symp->st_value);
return symp - sym0;
}
diff --git a/scripts/selinux/README b/scripts/selinux/README
index a936315ba2c8..4d020ecb7524 100644
--- a/scripts/selinux/README
+++ b/scripts/selinux/README
@@ -1,2 +1,2 @@
-Please see Documentation/SELinux.txt for information on
+Please see Documentation/security/SELinux.txt for information on
installing a dummy SELinux policy.
diff --git a/scripts/tags.sh b/scripts/tags.sh
index bd6185d529cf..75c5d24f1993 100755
--- a/scripts/tags.sh
+++ b/scripts/tags.sh
@@ -132,7 +132,7 @@ exuberant()
--regex-asm='/^ENTRY\(([^)]*)\).*/\1/' \
--regex-c='/^SYSCALL_DEFINE[[:digit:]]?\(([^,)]*).*/sys_\1/' \
--regex-c++='/^TRACE_EVENT\(([^,)]*).*/trace_\1/' \
- --regex-c++='/^DEFINE_EVENT\(([^,)]*).*/trace_\1/'
+ --regex-c++='/^DEFINE_EVENT\([^,)]*, *([^,)]*).*/trace_\1/'
all_kconfigs | xargs $1 -a \
--langdef=kconfig --language-force=kconfig \
@@ -152,7 +152,9 @@ emacs()
{
all_sources | xargs $1 -a \
--regex='/^ENTRY(\([^)]*\)).*/\1/' \
- --regex='/^SYSCALL_DEFINE[0-9]?(\([^,)]*\).*/sys_\1/'
+ --regex='/^SYSCALL_DEFINE[0-9]?(\([^,)]*\).*/sys_\1/' \
+ --regex='/^TRACE_EVENT(\([^,)]*\).*/trace_\1/' \
+ --regex='/^DEFINE_EVENT([^,)]*, *\([^,)]*\).*/trace_\1/'
all_kconfigs | xargs $1 -a \
--regex='/^[ \t]*\(\(menu\)*config\)[ \t]+\([a-zA-Z0-9_]+\)/\3/'
diff --git a/security/apparmor/match.c b/security/apparmor/match.c
index 06d764ccbbe5..94de6b4907c8 100644
--- a/security/apparmor/match.c
+++ b/security/apparmor/match.c
@@ -194,7 +194,7 @@ void aa_dfa_free_kref(struct kref *kref)
* @flags: flags controlling what type of accept tables are acceptable
*
* Unpack a dfa that has been serialized. To find information on the dfa
- * format look in Documentation/apparmor.txt
+ * format look in Documentation/security/apparmor.txt
* Assumes the dfa @blob stream has been aligned on a 8 byte boundary
*
* Returns: an unpacked dfa ready for matching or ERR_PTR on failure
diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c
index e33aaf7e5744..d6d9a57b5652 100644
--- a/security/apparmor/policy_unpack.c
+++ b/security/apparmor/policy_unpack.c
@@ -12,8 +12,8 @@
* published by the Free Software Foundation, version 2 of the
* License.
*
- * AppArmor uses a serialized binary format for loading policy.
- * To find policy format documentation look in Documentation/apparmor.txt
+ * AppArmor uses a serialized binary format for loading policy. To find
+ * policy format documentation look in Documentation/security/apparmor.txt
* All policy is validated before it is used.
*/
diff --git a/security/keys/encrypted.c b/security/keys/encrypted.c
index 69907a58a683..b1cba5bf0a5e 100644
--- a/security/keys/encrypted.c
+++ b/security/keys/encrypted.c
@@ -8,7 +8,7 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, version 2 of the License.
*
- * See Documentation/keys-trusted-encrypted.txt
+ * See Documentation/security/keys-trusted-encrypted.txt
*/
#include <linux/uaccess.h>
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
index b18a71745901..d31862e0aa1c 100644
--- a/security/keys/request_key.c
+++ b/security/keys/request_key.c
@@ -8,7 +8,7 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
- * See Documentation/keys-request-key.txt
+ * See Documentation/security/keys-request-key.txt
*/
#include <linux/module.h>
diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c
index f6337c9082eb..6cff37529b80 100644
--- a/security/keys/request_key_auth.c
+++ b/security/keys/request_key_auth.c
@@ -8,7 +8,7 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
- * See Documentation/keys-request-key.txt
+ * See Documentation/security/keys-request-key.txt
*/
#include <linux/module.h>
diff --git a/security/keys/trusted.c b/security/keys/trusted.c
index c99b9368368c..0c33e2ea1f3c 100644
--- a/security/keys/trusted.c
+++ b/security/keys/trusted.c
@@ -8,7 +8,7 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, version 2 of the License.
*
- * See Documentation/keys-trusted-encrypted.txt
+ * See Documentation/security/keys-trusted-encrypted.txt
*/
#include <linux/uaccess.h>
diff --git a/sound/core/control.c b/sound/core/control.c
index 5d98194bcad5..f8c5be464510 100644
--- a/sound/core/control.c
+++ b/sound/core/control.c
@@ -704,13 +704,12 @@ static int snd_ctl_elem_list(struct snd_card *card,
struct snd_ctl_elem_list list;
struct snd_kcontrol *kctl;
struct snd_ctl_elem_id *dst, *id;
- unsigned int offset, space, first, jidx;
+ unsigned int offset, space, jidx;
if (copy_from_user(&list, _list, sizeof(list)))
return -EFAULT;
offset = list.offset;
space = list.space;
- first = 0;
/* try limit maximum space */
if (space > 16384)
return -ENOMEM;
diff --git a/sound/core/init.c b/sound/core/init.c
index 30ecad41403c..2c041bb36ab3 100644
--- a/sound/core/init.c
+++ b/sound/core/init.c
@@ -342,7 +342,6 @@ static const struct file_operations snd_shutdown_f_ops =
int snd_card_disconnect(struct snd_card *card)
{
struct snd_monitor_file *mfile;
- struct file *file;
int err;
if (!card)
@@ -366,8 +365,6 @@ int snd_card_disconnect(struct snd_card *card)
spin_lock(&card->files_lock);
list_for_each_entry(mfile, &card->files_list, list) {
- file = mfile->file;
-
/* it's critical part, use endless loop */
/* we have no room to fail */
mfile->disconnected_f_op = mfile->file->f_op;
diff --git a/sound/core/oss/linear.c b/sound/core/oss/linear.c
index 13b3f6f49fae..2045697f449d 100644
--- a/sound/core/oss/linear.c
+++ b/sound/core/oss/linear.c
@@ -90,11 +90,8 @@ static snd_pcm_sframes_t linear_transfer(struct snd_pcm_plugin *plugin,
struct snd_pcm_plugin_channel *dst_channels,
snd_pcm_uframes_t frames)
{
- struct linear_priv *data;
-
if (snd_BUG_ON(!plugin || !src_channels || !dst_channels))
return -ENXIO;
- data = (struct linear_priv *)plugin->extra_data;
if (frames == 0)
return 0;
#ifdef CONFIG_SND_DEBUG
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index abfeff1611ce..f1341308beda 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -1756,8 +1756,18 @@ static int wait_for_avail(struct snd_pcm_substream *substream,
wait_queue_t wait;
int err = 0;
snd_pcm_uframes_t avail = 0;
- long tout;
-
+ long wait_time, tout;
+
+ if (runtime->no_period_wakeup)
+ wait_time = MAX_SCHEDULE_TIMEOUT;
+ else {
+ wait_time = 10;
+ if (runtime->rate) {
+ long t = runtime->period_size * 2 / runtime->rate;
+ wait_time = max(t, wait_time);
+ }
+ wait_time = msecs_to_jiffies(wait_time * 1000);
+ }
init_waitqueue_entry(&wait, current);
add_wait_queue(&runtime->tsleep, &wait);
for (;;) {
@@ -1765,9 +1775,8 @@ static int wait_for_avail(struct snd_pcm_substream *substream,
err = -ERESTARTSYS;
break;
}
- set_current_state(TASK_INTERRUPTIBLE);
snd_pcm_stream_unlock_irq(substream);
- tout = schedule_timeout(msecs_to_jiffies(10000));
+ tout = schedule_timeout_interruptible(wait_time);
snd_pcm_stream_lock_irq(substream);
switch (runtime->status->state) {
case SNDRV_PCM_STATE_SUSPENDED:
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 1a07750f3836..1c6be91dfb98 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -1481,11 +1481,20 @@ static int snd_pcm_drain(struct snd_pcm_substream *substream,
break; /* all drained */
init_waitqueue_entry(&wait, current);
add_wait_queue(&to_check->sleep, &wait);
- set_current_state(TASK_INTERRUPTIBLE);
snd_pcm_stream_unlock_irq(substream);
up_read(&snd_pcm_link_rwsem);
snd_power_unlock(card);
- tout = schedule_timeout(10 * HZ);
+ if (runtime->no_period_wakeup)
+ tout = MAX_SCHEDULE_TIMEOUT;
+ else {
+ tout = 10;
+ if (runtime->rate) {
+ long t = runtime->period_size * 2 / runtime->rate;
+ tout = max(t, tout);
+ }
+ tout = msecs_to_jiffies(tout * 1000);
+ }
+ tout = schedule_timeout_interruptible(tout);
snd_power_lock(card);
down_read(&snd_pcm_link_rwsem);
snd_pcm_stream_lock_irq(substream);
@@ -1518,13 +1527,11 @@ static int snd_pcm_drain(struct snd_pcm_substream *substream,
static int snd_pcm_drop(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime;
- struct snd_card *card;
int result = 0;
if (PCM_RUNTIME_CHECK(substream))
return -ENXIO;
runtime = substream->runtime;
- card = substream->pcm->card;
if (runtime->status->state == SNDRV_PCM_STATE_OPEN ||
runtime->status->state == SNDRV_PCM_STATE_DISCONNECTED ||
@@ -2056,7 +2063,6 @@ static int snd_pcm_open_file(struct file *file,
{
struct snd_pcm_file *pcm_file;
struct snd_pcm_substream *substream;
- struct snd_pcm_str *str;
int err;
if (rpcm_file)
@@ -2073,7 +2079,6 @@ static int snd_pcm_open_file(struct file *file,
}
pcm_file->substream = substream;
if (substream->ref_count == 1) {
- str = substream->pstr;
substream->file = pcm_file;
substream->pcm_release = pcm_release_private;
}
@@ -3015,11 +3020,9 @@ static const struct vm_operations_struct snd_pcm_vm_ops_status =
static int snd_pcm_mmap_status(struct snd_pcm_substream *substream, struct file *file,
struct vm_area_struct *area)
{
- struct snd_pcm_runtime *runtime;
long size;
if (!(area->vm_flags & VM_READ))
return -EINVAL;
- runtime = substream->runtime;
size = area->vm_end - area->vm_start;
if (size != PAGE_ALIGN(sizeof(struct snd_pcm_mmap_status)))
return -EINVAL;
@@ -3054,11 +3057,9 @@ static const struct vm_operations_struct snd_pcm_vm_ops_control =
static int snd_pcm_mmap_control(struct snd_pcm_substream *substream, struct file *file,
struct vm_area_struct *area)
{
- struct snd_pcm_runtime *runtime;
long size;
if (!(area->vm_flags & VM_READ))
return -EINVAL;
- runtime = substream->runtime;
size = area->vm_end - area->vm_start;
if (size != PAGE_ALIGN(sizeof(struct snd_pcm_mmap_control)))
return -EINVAL;
diff --git a/sound/core/seq/seq_queue.c b/sound/core/seq/seq_queue.c
index e7a8e9e4edb2..f9077361c119 100644
--- a/sound/core/seq/seq_queue.c
+++ b/sound/core/seq/seq_queue.c
@@ -467,13 +467,11 @@ int snd_seq_queue_timer_open(int queueid)
int snd_seq_queue_timer_close(int queueid)
{
struct snd_seq_queue *queue;
- struct snd_seq_timer *tmr;
int result = 0;
queue = queueptr(queueid);
if (queue == NULL)
return -EINVAL;
- tmr = queue->timer;
snd_seq_timer_close(queue);
queuefree(queue);
return result;
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 8edd998509f7..45b4a8d70e08 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -4719,7 +4719,7 @@ int snd_hda_parse_pin_def_config(struct hda_codec *codec,
cfg->dig_out_pins[0], cfg->dig_out_pins[1]);
snd_printd(" inputs:");
for (i = 0; i < cfg->num_inputs; i++) {
- snd_printdd(" %s=0x%x",
+ snd_printd(" %s=0x%x",
hda_get_autocfg_input_label(codec, cfg, i),
cfg->inputs[i].pin);
}
diff --git a/sound/pci/hda/hda_eld.c b/sound/pci/hda/hda_eld.c
index 74b0560289c0..b05f7be9dc1b 100644
--- a/sound/pci/hda/hda_eld.c
+++ b/sound/pci/hda/hda_eld.c
@@ -312,23 +312,6 @@ out_fail:
return -EINVAL;
}
-static int hdmi_eld_valid(struct hda_codec *codec, hda_nid_t nid)
-{
- int eldv;
- int present;
-
- present = snd_hda_pin_sense(codec, nid);
- eldv = (present & AC_PINSENSE_ELDV);
- present = (present & AC_PINSENSE_PRESENCE);
-
-#ifdef CONFIG_SND_DEBUG_VERBOSE
- printk(KERN_INFO "HDMI: sink_present = %d, eld_valid = %d\n",
- !!present, !!eldv);
-#endif
-
- return eldv && present;
-}
-
int snd_hdmi_get_eld_size(struct hda_codec *codec, hda_nid_t nid)
{
return snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_HDMI_DIP_SIZE,
@@ -343,7 +326,7 @@ int snd_hdmi_get_eld(struct hdmi_eld *eld,
int size;
unsigned char *buf;
- if (!hdmi_eld_valid(codec, nid))
+ if (!eld->eld_valid)
return -ENOENT;
size = snd_hdmi_get_eld_size(codec, nid);
@@ -477,6 +460,8 @@ static void hdmi_print_eld_info(struct snd_info_entry *entry,
snd_iprintf(buffer, "monitor_present\t\t%d\n", e->monitor_present);
snd_iprintf(buffer, "eld_valid\t\t%d\n", e->eld_valid);
+ if (!e->eld_valid)
+ return;
snd_iprintf(buffer, "monitor_name\t\t%s\n", e->monitor_name);
snd_iprintf(buffer, "connection_type\t\t%s\n",
eld_connection_type_names[e->conn_type]);
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 43a036716d25..486f6deb3eee 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -391,6 +391,7 @@ struct azx {
/* chip type specific */
int driver_type;
+ unsigned int driver_caps;
int playback_streams;
int playback_index_offset;
int capture_streams;
@@ -464,6 +465,34 @@ enum {
AZX_NUM_DRIVERS, /* keep this as last entry */
};
+/* driver quirks (capabilities) */
+/* bits 0-7 are used for indicating driver type */
+#define AZX_DCAPS_NO_TCSEL (1 << 8) /* No Intel TCSEL bit */
+#define AZX_DCAPS_NO_MSI (1 << 9) /* No MSI support */
+#define AZX_DCAPS_ATI_SNOOP (1 << 10) /* ATI snoop enable */
+#define AZX_DCAPS_NVIDIA_SNOOP (1 << 11) /* Nvidia snoop enable */
+#define AZX_DCAPS_SCH_SNOOP (1 << 12) /* SCH/PCH snoop enable */
+#define AZX_DCAPS_RIRB_DELAY (1 << 13) /* Long delay in read loop */
+#define AZX_DCAPS_RIRB_PRE_DELAY (1 << 14) /* Put a delay before read */
+#define AZX_DCAPS_CTX_WORKAROUND (1 << 15) /* X-Fi workaround */
+#define AZX_DCAPS_POSFIX_LPIB (1 << 16) /* Use LPIB as default */
+#define AZX_DCAPS_POSFIX_VIA (1 << 17) /* Use VIACOMBO as default */
+#define AZX_DCAPS_NO_64BIT (1 << 18) /* No 64bit address */
+#define AZX_DCAPS_SYNC_WRITE (1 << 19) /* sync each cmd write */
+
+/* quirks for ATI SB / AMD Hudson */
+#define AZX_DCAPS_PRESET_ATI_SB \
+ (AZX_DCAPS_ATI_SNOOP | AZX_DCAPS_NO_TCSEL | \
+ AZX_DCAPS_SYNC_WRITE | AZX_DCAPS_POSFIX_LPIB)
+
+/* quirks for ATI/AMD HDMI */
+#define AZX_DCAPS_PRESET_ATI_HDMI \
+ (AZX_DCAPS_NO_TCSEL | AZX_DCAPS_SYNC_WRITE | AZX_DCAPS_POSFIX_LPIB)
+
+/* quirks for Nvidia */
+#define AZX_DCAPS_PRESET_NVIDIA \
+ (AZX_DCAPS_NVIDIA_SNOOP | AZX_DCAPS_RIRB_DELAY | AZX_DCAPS_NO_MSI)
+
static char *driver_short_names[] __devinitdata = {
[AZX_DRIVER_ICH] = "HDA Intel",
[AZX_DRIVER_PCH] = "HDA Intel PCH",
@@ -566,7 +595,7 @@ static void azx_init_cmd_io(struct azx *chip)
/* reset the rirb hw write pointer */
azx_writew(chip, RIRBWP, ICH6_RIRBWP_RST);
/* set N=1, get RIRB response interrupt for new entry */
- if (chip->driver_type == AZX_DRIVER_CTX)
+ if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND)
azx_writew(chip, RINTCNT, 0xc0);
else
azx_writew(chip, RINTCNT, 1);
@@ -1056,19 +1085,24 @@ static void azx_init_pci(struct azx *chip)
* codecs.
* The PCI register TCSEL is defined in the Intel manuals.
*/
- if (chip->driver_type != AZX_DRIVER_ATI &&
- chip->driver_type != AZX_DRIVER_ATIHDMI)
+ if (!(chip->driver_caps & AZX_DCAPS_NO_TCSEL)) {
+ snd_printdd(SFX "Clearing TCSEL\n");
update_pci_byte(chip->pci, ICH6_PCIREG_TCSEL, 0x07, 0);
+ }
- switch (chip->driver_type) {
- case AZX_DRIVER_ATI:
- /* For ATI SB450 azalia HD audio, we need to enable snoop */
+ /* For ATI SB450/600/700/800/900 and AMD Hudson azalia HD audio,
+ * we need to enable snoop.
+ */
+ if (chip->driver_caps & AZX_DCAPS_ATI_SNOOP) {
+ snd_printdd(SFX "Enabling ATI snoop\n");
update_pci_byte(chip->pci,
ATI_SB450_HDAUDIO_MISC_CNTR2_ADDR,
0x07, ATI_SB450_HDAUDIO_ENABLE_SNOOP);
- break;
- case AZX_DRIVER_NVIDIA:
- /* For NVIDIA HDA, enable snoop */
+ }
+
+ /* For NVIDIA HDA, enable snoop */
+ if (chip->driver_caps & AZX_DCAPS_NVIDIA_SNOOP) {
+ snd_printdd(SFX "Enabling Nvidia snoop\n");
update_pci_byte(chip->pci,
NVIDIA_HDA_TRANSREG_ADDR,
0x0f, NVIDIA_HDA_ENABLE_COHBITS);
@@ -1078,9 +1112,10 @@ static void azx_init_pci(struct azx *chip)
update_pci_byte(chip->pci,
NVIDIA_HDA_OSTRM_COH,
0x01, NVIDIA_HDA_ENABLE_COHBIT);
- break;
- case AZX_DRIVER_SCH:
- case AZX_DRIVER_PCH:
+ }
+
+ /* Enable SCH/PCH snoop if needed */
+ if (chip->driver_caps & AZX_DCAPS_SCH_SNOOP) {
pci_read_config_word(chip->pci, INTEL_SCH_HDA_DEVC, &snoop);
if (snoop & INTEL_SCH_HDA_DEVC_NOSNOOP) {
pci_write_config_word(chip->pci, INTEL_SCH_HDA_DEVC,
@@ -1091,14 +1126,6 @@ static void azx_init_pci(struct azx *chip)
(snoop & INTEL_SCH_HDA_DEVC_NOSNOOP)
? "Failed" : "OK");
}
- break;
- default:
- /* AMD Hudson needs the similar snoop, as it seems... */
- if (chip->pci->vendor == PCI_VENDOR_ID_AMD)
- update_pci_byte(chip->pci,
- ATI_SB450_HDAUDIO_MISC_CNTR2_ADDR,
- 0x07, ATI_SB450_HDAUDIO_ENABLE_SNOOP);
- break;
}
}
@@ -1152,7 +1179,7 @@ static irqreturn_t azx_interrupt(int irq, void *dev_id)
status = azx_readb(chip, RIRBSTS);
if (status & RIRB_INT_MASK) {
if (status & RIRB_INT_RESPONSE) {
- if (chip->driver_type == AZX_DRIVER_CTX)
+ if (chip->driver_caps & AZX_DCAPS_RIRB_PRE_DELAY)
udelay(80);
azx_update_rirb(chip);
}
@@ -1421,8 +1448,10 @@ static int __devinit azx_codec_create(struct azx *chip, const char *model)
if (err < 0)
return err;
- if (chip->driver_type == AZX_DRIVER_NVIDIA)
+ if (chip->driver_caps & AZX_DCAPS_RIRB_DELAY) {
+ snd_printd(SFX "Enable delay in RIRB handling\n");
chip->bus->needs_damn_long_delay = 1;
+ }
codecs = 0;
max_slots = azx_max_codecs[chip->driver_type];
@@ -1457,9 +1486,8 @@ static int __devinit azx_codec_create(struct azx *chip, const char *model)
* sequence like the pin-detection. It seems that forcing the synced
* access works around the stall. Grrr...
*/
- if (chip->pci->vendor == PCI_VENDOR_ID_AMD ||
- chip->pci->vendor == PCI_VENDOR_ID_ATI) {
- snd_printk(KERN_INFO SFX "Enable sync_write for AMD chipset\n");
+ if (chip->driver_caps & AZX_DCAPS_SYNC_WRITE) {
+ snd_printd(SFX "Enable sync_write for stable communication\n");
chip->bus->sync_write = 1;
chip->bus->allow_bus_reset = 1;
}
@@ -1720,7 +1748,7 @@ static int azx_pcm_prepare(struct snd_pcm_substream *substream)
stream_tag = azx_dev->stream_tag;
/* CA-IBG chips need the playback stream starting from 1 */
- if (chip->driver_type == AZX_DRIVER_CTX &&
+ if ((chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND) &&
stream_tag > chip->capture_streams)
stream_tag -= chip->capture_streams;
return snd_hda_codec_prepare(apcm->codec, hinfo, stream_tag,
@@ -2365,20 +2393,14 @@ static int __devinit check_position_fix(struct azx *chip, int fix)
}
/* Check VIA/ATI HD Audio Controller exist */
- switch (chip->driver_type) {
- case AZX_DRIVER_VIA:
- /* Use link position directly, avoid any transfer problem. */
+ if (chip->driver_caps & AZX_DCAPS_POSFIX_VIA) {
+ snd_printd(SFX "Using VIACOMBO position fix\n");
return POS_FIX_VIACOMBO;
- case AZX_DRIVER_ATI:
- /* ATI chipsets don't work well with position-buffer */
+ }
+ if (chip->driver_caps & AZX_DCAPS_POSFIX_LPIB) {
+ snd_printd(SFX "Using LPIB position fix\n");
return POS_FIX_LPIB;
- case AZX_DRIVER_GENERIC:
- /* AMD chipsets also don't work with position-buffer */
- if (chip->pci->vendor == PCI_VENDOR_ID_AMD)
- return POS_FIX_LPIB;
- break;
}
-
return POS_FIX_AUTO;
}
@@ -2460,8 +2482,8 @@ static void __devinit check_msi(struct azx *chip)
}
/* NVidia chipsets seem to cause troubles with MSI */
- if (chip->driver_type == AZX_DRIVER_NVIDIA) {
- printk(KERN_INFO "hda_intel: Disable MSI for Nvidia chipset\n");
+ if (chip->driver_caps & AZX_DCAPS_NO_MSI) {
+ printk(KERN_INFO "hda_intel: Disabling MSI\n");
chip->msi = 0;
}
}
@@ -2471,7 +2493,7 @@ static void __devinit check_msi(struct azx *chip)
* constructor
*/
static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
- int dev, int driver_type,
+ int dev, unsigned int driver_caps,
struct azx **rchip)
{
struct azx *chip;
@@ -2499,7 +2521,8 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
chip->card = card;
chip->pci = pci;
chip->irq = -1;
- chip->driver_type = driver_type;
+ chip->driver_caps = driver_caps;
+ chip->driver_type = driver_caps & 0xff;
check_msi(chip);
chip->dev_index = dev;
INIT_WORK(&chip->irq_pending_work, azx_irq_pending_work);
@@ -2563,8 +2586,7 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
snd_printdd(SFX "chipset global capabilities = 0x%x\n", gcap);
/* disable SB600 64bit support for safety */
- if ((chip->driver_type == AZX_DRIVER_ATI) ||
- (chip->driver_type == AZX_DRIVER_ATIHDMI)) {
+ if (chip->pci->vendor == PCI_VENDOR_ID_ATI) {
struct pci_dev *p_smbus;
p_smbus = pci_get_device(PCI_VENDOR_ID_ATI,
PCI_DEVICE_ID_ATI_SBX00_SMBUS,
@@ -2574,19 +2596,13 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
gcap &= ~ICH6_GCAP_64OK;
pci_dev_put(p_smbus);
}
- } else {
- /* FIXME: not sure whether this is really needed, but
- * Hudson isn't stable enough for allowing everything...
- * let's check later again.
- */
- if (chip->pci->vendor == PCI_VENDOR_ID_AMD)
- gcap &= ~ICH6_GCAP_64OK;
}
- /* disable 64bit DMA address for Teradici */
- /* it does not work with device 6549:1200 subsys e4a2:040b */
- if (chip->driver_type == AZX_DRIVER_TERA)
+ /* disable 64bit DMA address on some devices */
+ if (chip->driver_caps & AZX_DCAPS_NO_64BIT) {
+ snd_printd(SFX "Disabling 64bit DMA\n");
gcap &= ~ICH6_GCAP_64OK;
+ }
/* allow 64bit DMA address if supported by H/W */
if ((gcap & ICH6_GCAP_64OK) && !pci_set_dma_mask(pci, DMA_BIT_MASK(64)))
@@ -2788,38 +2804,62 @@ static void __devexit azx_remove(struct pci_dev *pci)
/* PCI IDs */
static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
/* CPT */
- { PCI_DEVICE(0x8086, 0x1c20), .driver_data = AZX_DRIVER_PCH },
+ { PCI_DEVICE(0x8086, 0x1c20),
+ .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_SCH_SNOOP },
/* PBG */
- { PCI_DEVICE(0x8086, 0x1d20), .driver_data = AZX_DRIVER_PCH },
+ { PCI_DEVICE(0x8086, 0x1d20),
+ .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_SCH_SNOOP },
/* Panther Point */
- { PCI_DEVICE(0x8086, 0x1e20), .driver_data = AZX_DRIVER_PCH },
+ { PCI_DEVICE(0x8086, 0x1e20),
+ .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_SCH_SNOOP },
/* SCH */
- { PCI_DEVICE(0x8086, 0x811b), .driver_data = AZX_DRIVER_SCH },
+ { PCI_DEVICE(0x8086, 0x811b),
+ .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_SCH_SNOOP },
/* Generic Intel */
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_ANY_ID),
.class = PCI_CLASS_MULTIMEDIA_HD_AUDIO << 8,
.class_mask = 0xffffff,
.driver_data = AZX_DRIVER_ICH },
- /* ATI SB 450/600 */
- { PCI_DEVICE(0x1002, 0x437b), .driver_data = AZX_DRIVER_ATI },
- { PCI_DEVICE(0x1002, 0x4383), .driver_data = AZX_DRIVER_ATI },
+ /* ATI SB 450/600/700/800/900 */
+ { PCI_DEVICE(0x1002, 0x437b),
+ .driver_data = AZX_DRIVER_ATI | AZX_DCAPS_PRESET_ATI_SB },
+ { PCI_DEVICE(0x1002, 0x4383),
+ .driver_data = AZX_DRIVER_ATI | AZX_DCAPS_PRESET_ATI_SB },
+ /* AMD Hudson */
+ { PCI_DEVICE(0x1022, 0x780d),
+ .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB },
/* ATI HDMI */
- { PCI_DEVICE(0x1002, 0x793b), .driver_data = AZX_DRIVER_ATIHDMI },
- { PCI_DEVICE(0x1002, 0x7919), .driver_data = AZX_DRIVER_ATIHDMI },
- { PCI_DEVICE(0x1002, 0x960f), .driver_data = AZX_DRIVER_ATIHDMI },
- { PCI_DEVICE(0x1002, 0x970f), .driver_data = AZX_DRIVER_ATIHDMI },
- { PCI_DEVICE(0x1002, 0xaa00), .driver_data = AZX_DRIVER_ATIHDMI },
- { PCI_DEVICE(0x1002, 0xaa08), .driver_data = AZX_DRIVER_ATIHDMI },
- { PCI_DEVICE(0x1002, 0xaa10), .driver_data = AZX_DRIVER_ATIHDMI },
- { PCI_DEVICE(0x1002, 0xaa18), .driver_data = AZX_DRIVER_ATIHDMI },
- { PCI_DEVICE(0x1002, 0xaa20), .driver_data = AZX_DRIVER_ATIHDMI },
- { PCI_DEVICE(0x1002, 0xaa28), .driver_data = AZX_DRIVER_ATIHDMI },
- { PCI_DEVICE(0x1002, 0xaa30), .driver_data = AZX_DRIVER_ATIHDMI },
- { PCI_DEVICE(0x1002, 0xaa38), .driver_data = AZX_DRIVER_ATIHDMI },
- { PCI_DEVICE(0x1002, 0xaa40), .driver_data = AZX_DRIVER_ATIHDMI },
- { PCI_DEVICE(0x1002, 0xaa48), .driver_data = AZX_DRIVER_ATIHDMI },
+ { PCI_DEVICE(0x1002, 0x793b),
+ .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
+ { PCI_DEVICE(0x1002, 0x7919),
+ .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
+ { PCI_DEVICE(0x1002, 0x960f),
+ .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
+ { PCI_DEVICE(0x1002, 0x970f),
+ .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
+ { PCI_DEVICE(0x1002, 0xaa00),
+ .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
+ { PCI_DEVICE(0x1002, 0xaa08),
+ .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
+ { PCI_DEVICE(0x1002, 0xaa10),
+ .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
+ { PCI_DEVICE(0x1002, 0xaa18),
+ .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
+ { PCI_DEVICE(0x1002, 0xaa20),
+ .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
+ { PCI_DEVICE(0x1002, 0xaa28),
+ .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
+ { PCI_DEVICE(0x1002, 0xaa30),
+ .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
+ { PCI_DEVICE(0x1002, 0xaa38),
+ .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
+ { PCI_DEVICE(0x1002, 0xaa40),
+ .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
+ { PCI_DEVICE(0x1002, 0xaa48),
+ .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
/* VIA VT8251/VT8237A */
- { PCI_DEVICE(0x1106, 0x3288), .driver_data = AZX_DRIVER_VIA },
+ { PCI_DEVICE(0x1106, 0x3288),
+ .driver_data = AZX_DRIVER_VIA | AZX_DCAPS_POSFIX_VIA },
/* SIS966 */
{ PCI_DEVICE(0x1039, 0x7502), .driver_data = AZX_DRIVER_SIS },
/* ULI M5461 */
@@ -2828,9 +2868,10 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
{ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID),
.class = PCI_CLASS_MULTIMEDIA_HD_AUDIO << 8,
.class_mask = 0xffffff,
- .driver_data = AZX_DRIVER_NVIDIA },
+ .driver_data = AZX_DRIVER_NVIDIA | AZX_DCAPS_PRESET_NVIDIA },
/* Teradici */
- { PCI_DEVICE(0x6549, 0x1200), .driver_data = AZX_DRIVER_TERA },
+ { PCI_DEVICE(0x6549, 0x1200),
+ .driver_data = AZX_DRIVER_TERA | AZX_DCAPS_NO_64BIT },
/* Creative X-Fi (CA0110-IBG) */
#if !defined(CONFIG_SND_CTXFI) && !defined(CONFIG_SND_CTXFI_MODULE)
/* the following entry conflicts with snd-ctxfi driver,
@@ -2840,10 +2881,13 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
{ PCI_DEVICE(PCI_VENDOR_ID_CREATIVE, PCI_ANY_ID),
.class = PCI_CLASS_MULTIMEDIA_HD_AUDIO << 8,
.class_mask = 0xffffff,
- .driver_data = AZX_DRIVER_CTX },
+ .driver_data = AZX_DRIVER_CTX | AZX_DCAPS_CTX_WORKAROUND |
+ AZX_DCAPS_RIRB_PRE_DELAY },
#else
/* this entry seems still valid -- i.e. without emu20kx chip */
- { PCI_DEVICE(0x1102, 0x0009), .driver_data = AZX_DRIVER_CTX },
+ { PCI_DEVICE(0x1102, 0x0009),
+ .driver_data = AZX_DRIVER_CTX | AZX_DCAPS_CTX_WORKAROUND |
+ AZX_DCAPS_RIRB_PRE_DELAY },
#endif
/* Vortex86MX */
{ PCI_DEVICE(0x17f3, 0x3010), .driver_data = AZX_DRIVER_GENERIC },
@@ -2853,11 +2897,11 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_ANY_ID),
.class = PCI_CLASS_MULTIMEDIA_HD_AUDIO << 8,
.class_mask = 0xffffff,
- .driver_data = AZX_DRIVER_GENERIC },
+ .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_HDMI },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_ANY_ID),
.class = PCI_CLASS_MULTIMEDIA_HD_AUDIO << 8,
.class_mask = 0xffffff,
- .driver_data = AZX_DRIVER_GENERIC },
+ .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_HDMI },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, azx_ids);
diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
index f1b3875c57df..696ac2590307 100644
--- a/sound/pci/hda/patch_analog.c
+++ b/sound/pci/hda/patch_analog.c
@@ -3159,6 +3159,7 @@ static const struct snd_pci_quirk ad1988_cfg_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x81ec, "Asus P5B-DLX", AD1988_6STACK_DIG),
SND_PCI_QUIRK(0x1043, 0x81f6, "Asus M2N-SLI", AD1988_6STACK_DIG),
SND_PCI_QUIRK(0x1043, 0x8277, "Asus P5K-E/WIFI-AP", AD1988_6STACK_DIG),
+ SND_PCI_QUIRK(0x1043, 0x82c0, "Asus M3N-HT Deluxe", AD1988_6STACK_DIG),
SND_PCI_QUIRK(0x1043, 0x8311, "Asus P5Q-Premium/Pro", AD1988_6STACK_DIG),
{}
};
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 4f37477d3c71..3e6b9a8539c2 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -3098,7 +3098,9 @@ static const struct snd_pci_quirk cxt5066_cfg_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo Thinkpad", CXT5066_THINKPAD),
SND_PCI_QUIRK(0x17aa, 0x21da, "Lenovo X220", CXT5066_THINKPAD),
SND_PCI_QUIRK(0x17aa, 0x21db, "Lenovo X220-tablet", CXT5066_THINKPAD),
+ SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo U350", CXT5066_ASUS),
SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo G560", CXT5066_ASUS),
+ SND_PCI_QUIRK(0x17aa, 0x3938, "Lenovo G565", CXT5066_AUTO),
SND_PCI_QUIRK_VENDOR(0x17aa, "Lenovo", CXT5066_IDEAPAD), /* Fallback for Lenovos without dock mic */
{}
};
@@ -3433,7 +3435,9 @@ static void cx_auto_parse_output(struct hda_codec *codec)
break;
}
}
- if (spec->auto_mute && cfg->line_out_pins[0] &&
+ if (spec->auto_mute &&
+ cfg->line_out_pins[0] &&
+ cfg->line_out_type != AUTO_PIN_SPEAKER_OUT &&
cfg->line_out_pins[0] != cfg->hp_pins[0] &&
cfg->line_out_pins[0] != cfg->speaker_pins[0]) {
for (i = 0; i < cfg->line_outs; i++) {
@@ -3481,25 +3485,32 @@ static void cx_auto_update_speakers(struct hda_codec *codec)
{
struct conexant_spec *spec = codec->spec;
struct auto_pin_cfg *cfg = &spec->autocfg;
- int on;
+ int on = 1;
- if (!spec->auto_mute)
- on = 0;
- else
- on = spec->hp_present | spec->line_present;
+ /* turn on HP EAPD when HP jacks are present */
+ if (spec->auto_mute)
+ on = spec->hp_present;
cx_auto_turn_eapd(codec, cfg->hp_outs, cfg->hp_pins, on);
- do_automute(codec, cfg->speaker_outs, cfg->speaker_pins, !on);
+ /* mute speakers in auto-mode if HP or LO jacks are plugged */
+ if (spec->auto_mute)
+ on = !(spec->hp_present ||
+ (spec->detect_line && spec->line_present));
+ do_automute(codec, cfg->speaker_outs, cfg->speaker_pins, on);
/* toggle line-out mutes if needed, too */
/* if LO is a copy of either HP or Speaker, don't need to handle it */
if (cfg->line_out_pins[0] == cfg->hp_pins[0] ||
cfg->line_out_pins[0] == cfg->speaker_pins[0])
return;
- if (!spec->automute_lines || !spec->auto_mute)
- on = 0;
- else
- on = spec->hp_present;
- do_automute(codec, cfg->line_outs, cfg->line_out_pins, !on);
+ if (spec->auto_mute) {
+ /* mute LO in auto-mode when HP jack is present */
+ if (cfg->line_out_type == AUTO_PIN_SPEAKER_OUT ||
+ spec->automute_lines)
+ on = !spec->hp_present;
+ else
+ on = 1;
+ }
+ do_automute(codec, cfg->line_outs, cfg->line_out_pins, on);
}
static void cx_auto_hp_automute(struct hda_codec *codec)
@@ -3696,13 +3707,14 @@ static int cx_auto_mux_enum_update(struct hda_codec *codec,
{
struct conexant_spec *spec = codec->spec;
hda_nid_t adc;
+ int changed = 1;
if (!imux->num_items)
return 0;
if (idx >= imux->num_items)
idx = imux->num_items - 1;
if (spec->cur_mux[0] == idx)
- return 0;
+ changed = 0;
adc = spec->imux_info[idx].adc;
select_input_connection(codec, spec->imux_info[idx].adc,
spec->imux_info[idx].pin);
@@ -3715,7 +3727,7 @@ static int cx_auto_mux_enum_update(struct hda_codec *codec,
spec->cur_adc_format);
}
spec->cur_mux[0] = idx;
- return 1;
+ return changed;
}
static int cx_auto_mux_enum_put(struct snd_kcontrol *kcontrol,
@@ -3789,7 +3801,7 @@ static void cx_auto_check_auto_mic(struct hda_codec *codec)
int pset[INPUT_PIN_ATTR_NORMAL + 1];
int i;
- for (i = 0; i < INPUT_PIN_ATTR_NORMAL; i++)
+ for (i = 0; i < ARRAY_SIZE(pset); i++)
pset[i] = -1;
for (i = 0; i < spec->private_imux.num_items; i++) {
hda_nid_t pin = spec->imux_info[i].pin;
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 322901873222..bd0ae697f9c4 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -48,8 +48,8 @@ MODULE_PARM_DESC(static_hdmi_pcm, "Don't restrict PCM parameters per ELD info");
*
* The HDA correspondence of pipes/ports are converter/pin nodes.
*/
-#define MAX_HDMI_CVTS 3
-#define MAX_HDMI_PINS 3
+#define MAX_HDMI_CVTS 4
+#define MAX_HDMI_PINS 4
struct hdmi_spec {
int num_cvts;
@@ -78,10 +78,6 @@ struct hdmi_spec {
*/
struct hda_multi_out multiout;
const struct hda_pcm_stream *pcm_playback;
-
- /* misc flags */
- /* PD bit indicates only the update, not the current state */
- unsigned int old_pin_detect:1;
};
@@ -300,13 +296,6 @@ static int hda_node_index(hda_nid_t *nids, hda_nid_t nid)
return -EINVAL;
}
-static void hdmi_get_show_eld(struct hda_codec *codec, hda_nid_t pin_nid,
- struct hdmi_eld *eld)
-{
- if (!snd_hdmi_get_eld(eld, codec, pin_nid))
- snd_hdmi_show_eld(eld);
-}
-
#ifdef BE_PARANOID
static void hdmi_get_dip_index(struct hda_codec *codec, hda_nid_t pin_nid,
int *packet_index, int *byte_index)
@@ -694,35 +683,20 @@ static void hdmi_present_sense(struct hda_codec *codec, hda_nid_t pin_nid,
static void hdmi_intrinsic_event(struct hda_codec *codec, unsigned int res)
{
struct hdmi_spec *spec = codec->spec;
- int tag = res >> AC_UNSOL_RES_TAG_SHIFT;
- int pind = !!(res & AC_UNSOL_RES_PD);
+ int pin_nid = res >> AC_UNSOL_RES_TAG_SHIFT;
+ int pd = !!(res & AC_UNSOL_RES_PD);
int eldv = !!(res & AC_UNSOL_RES_ELDV);
int index;
printk(KERN_INFO
"HDMI hot plug event: Pin=%d Presence_Detect=%d ELD_Valid=%d\n",
- tag, pind, eldv);
+ pin_nid, pd, eldv);
- index = hda_node_index(spec->pin, tag);
+ index = hda_node_index(spec->pin, pin_nid);
if (index < 0)
return;
- if (spec->old_pin_detect) {
- if (pind)
- hdmi_present_sense(codec, tag, &spec->sink_eld[index]);
- pind = spec->sink_eld[index].monitor_present;
- }
-
- spec->sink_eld[index].monitor_present = pind;
- spec->sink_eld[index].eld_valid = eldv;
-
- if (pind && eldv) {
- hdmi_get_show_eld(codec, spec->pin[index],
- &spec->sink_eld[index]);
- /* TODO: do real things about ELD */
- }
-
- snd_hda_input_jack_report(codec, tag);
+ hdmi_present_sense(codec, pin_nid, &spec->sink_eld[index]);
}
static void hdmi_non_intrinsic_event(struct hda_codec *codec, unsigned int res)
@@ -903,13 +877,33 @@ static int hdmi_read_pin_conn(struct hda_codec *codec, hda_nid_t pin_nid)
static void hdmi_present_sense(struct hda_codec *codec, hda_nid_t pin_nid,
struct hdmi_eld *eld)
{
+ /*
+ * Always execute a GetPinSense verb here, even when called from
+ * hdmi_intrinsic_event; for some NVIDIA HW, the unsolicited
+ * response's PD bit is not the real PD value, but indicates that
+ * the real PD value changed. An older version of the HD-audio
+ * specification worked this way. Hence, we just ignore the data in
+ * the unsolicited response to avoid custom WARs.
+ */
int present = snd_hda_pin_sense(codec, pin_nid);
+ memset(eld, 0, sizeof(*eld));
+
eld->monitor_present = !!(present & AC_PINSENSE_PRESENCE);
- eld->eld_valid = !!(present & AC_PINSENSE_ELDV);
+ if (eld->monitor_present)
+ eld->eld_valid = !!(present & AC_PINSENSE_ELDV);
+ else
+ eld->eld_valid = 0;
- if (present & AC_PINSENSE_ELDV)
- hdmi_get_show_eld(codec, pin_nid, eld);
+ printk(KERN_INFO
+ "HDMI status: Pin=%d Presence_Detect=%d ELD_Valid=%d\n",
+ pin_nid, eld->monitor_present, eld->eld_valid);
+
+ if (eld->eld_valid)
+ if (!snd_hdmi_get_eld(eld, codec, pin_nid))
+ snd_hdmi_show_eld(eld);
+
+ snd_hda_input_jack_report(codec, pin_nid);
}
static int hdmi_add_pin(struct hda_codec *codec, hda_nid_t pin_nid)
@@ -927,7 +921,6 @@ static int hdmi_add_pin(struct hda_codec *codec, hda_nid_t pin_nid)
SND_JACK_VIDEOOUT, NULL);
if (err < 0)
return err;
- snd_hda_input_jack_report(codec, pin_nid);
hdmi_present_sense(codec, pin_nid, &spec->sink_eld[spec->num_pins]);
@@ -1034,6 +1027,7 @@ static char *generic_hdmi_pcm_names[MAX_HDMI_CVTS] = {
"HDMI 0",
"HDMI 1",
"HDMI 2",
+ "HDMI 3",
};
/*
@@ -1490,18 +1484,6 @@ static const struct hda_codec_ops nvhdmi_patch_ops_2ch = {
.free = generic_hdmi_free,
};
-static int patch_nvhdmi_8ch_89(struct hda_codec *codec)
-{
- struct hdmi_spec *spec;
- int err = patch_generic_hdmi(codec);
-
- if (err < 0)
- return err;
- spec = codec->spec;
- spec->old_pin_detect = 1;
- return 0;
-}
-
static int patch_nvhdmi_2ch(struct hda_codec *codec)
{
struct hdmi_spec *spec;
@@ -1515,7 +1497,6 @@ static int patch_nvhdmi_2ch(struct hda_codec *codec)
spec->multiout.num_dacs = 0; /* no analog */
spec->multiout.max_channels = 2;
spec->multiout.dig_out_nid = nvhdmi_master_con_nid_7x;
- spec->old_pin_detect = 1;
spec->num_cvts = 1;
spec->cvt[0] = nvhdmi_master_con_nid_7x;
spec->pcm_playback = &nvhdmi_pcm_playback_2ch;
@@ -1658,28 +1639,28 @@ static const struct hda_codec_preset snd_hda_preset_hdmi[] = {
{ .id = 0x10de0005, .name = "MCP77/78 HDMI", .patch = patch_nvhdmi_8ch_7x },
{ .id = 0x10de0006, .name = "MCP77/78 HDMI", .patch = patch_nvhdmi_8ch_7x },
{ .id = 0x10de0007, .name = "MCP79/7A HDMI", .patch = patch_nvhdmi_8ch_7x },
-{ .id = 0x10de000a, .name = "GPU 0a HDMI/DP", .patch = patch_nvhdmi_8ch_89 },
-{ .id = 0x10de000b, .name = "GPU 0b HDMI/DP", .patch = patch_nvhdmi_8ch_89 },
-{ .id = 0x10de000c, .name = "MCP89 HDMI", .patch = patch_nvhdmi_8ch_89 },
-{ .id = 0x10de000d, .name = "GPU 0d HDMI/DP", .patch = patch_nvhdmi_8ch_89 },
-{ .id = 0x10de0010, .name = "GPU 10 HDMI/DP", .patch = patch_nvhdmi_8ch_89 },
-{ .id = 0x10de0011, .name = "GPU 11 HDMI/DP", .patch = patch_nvhdmi_8ch_89 },
-{ .id = 0x10de0012, .name = "GPU 12 HDMI/DP", .patch = patch_nvhdmi_8ch_89 },
-{ .id = 0x10de0013, .name = "GPU 13 HDMI/DP", .patch = patch_nvhdmi_8ch_89 },
-{ .id = 0x10de0014, .name = "GPU 14 HDMI/DP", .patch = patch_nvhdmi_8ch_89 },
-{ .id = 0x10de0015, .name = "GPU 15 HDMI/DP", .patch = patch_nvhdmi_8ch_89 },
-{ .id = 0x10de0016, .name = "GPU 16 HDMI/DP", .patch = patch_nvhdmi_8ch_89 },
+{ .id = 0x10de000a, .name = "GPU 0a HDMI/DP", .patch = patch_generic_hdmi },
+{ .id = 0x10de000b, .name = "GPU 0b HDMI/DP", .patch = patch_generic_hdmi },
+{ .id = 0x10de000c, .name = "MCP89 HDMI", .patch = patch_generic_hdmi },
+{ .id = 0x10de000d, .name = "GPU 0d HDMI/DP", .patch = patch_generic_hdmi },
+{ .id = 0x10de0010, .name = "GPU 10 HDMI/DP", .patch = patch_generic_hdmi },
+{ .id = 0x10de0011, .name = "GPU 11 HDMI/DP", .patch = patch_generic_hdmi },
+{ .id = 0x10de0012, .name = "GPU 12 HDMI/DP", .patch = patch_generic_hdmi },
+{ .id = 0x10de0013, .name = "GPU 13 HDMI/DP", .patch = patch_generic_hdmi },
+{ .id = 0x10de0014, .name = "GPU 14 HDMI/DP", .patch = patch_generic_hdmi },
+{ .id = 0x10de0015, .name = "GPU 15 HDMI/DP", .patch = patch_generic_hdmi },
+{ .id = 0x10de0016, .name = "GPU 16 HDMI/DP", .patch = patch_generic_hdmi },
/* 17 is known to be absent */
-{ .id = 0x10de0018, .name = "GPU 18 HDMI/DP", .patch = patch_nvhdmi_8ch_89 },
-{ .id = 0x10de0019, .name = "GPU 19 HDMI/DP", .patch = patch_nvhdmi_8ch_89 },
-{ .id = 0x10de001a, .name = "GPU 1a HDMI/DP", .patch = patch_nvhdmi_8ch_89 },
-{ .id = 0x10de001b, .name = "GPU 1b HDMI/DP", .patch = patch_nvhdmi_8ch_89 },
-{ .id = 0x10de001c, .name = "GPU 1c HDMI/DP", .patch = patch_nvhdmi_8ch_89 },
-{ .id = 0x10de0040, .name = "GPU 40 HDMI/DP", .patch = patch_nvhdmi_8ch_89 },
-{ .id = 0x10de0041, .name = "GPU 41 HDMI/DP", .patch = patch_nvhdmi_8ch_89 },
-{ .id = 0x10de0042, .name = "GPU 42 HDMI/DP", .patch = patch_nvhdmi_8ch_89 },
-{ .id = 0x10de0043, .name = "GPU 43 HDMI/DP", .patch = patch_nvhdmi_8ch_89 },
-{ .id = 0x10de0044, .name = "GPU 44 HDMI/DP", .patch = patch_nvhdmi_8ch_89 },
+{ .id = 0x10de0018, .name = "GPU 18 HDMI/DP", .patch = patch_generic_hdmi },
+{ .id = 0x10de0019, .name = "GPU 19 HDMI/DP", .patch = patch_generic_hdmi },
+{ .id = 0x10de001a, .name = "GPU 1a HDMI/DP", .patch = patch_generic_hdmi },
+{ .id = 0x10de001b, .name = "GPU 1b HDMI/DP", .patch = patch_generic_hdmi },
+{ .id = 0x10de001c, .name = "GPU 1c HDMI/DP", .patch = patch_generic_hdmi },
+{ .id = 0x10de0040, .name = "GPU 40 HDMI/DP", .patch = patch_generic_hdmi },
+{ .id = 0x10de0041, .name = "GPU 41 HDMI/DP", .patch = patch_generic_hdmi },
+{ .id = 0x10de0042, .name = "GPU 42 HDMI/DP", .patch = patch_generic_hdmi },
+{ .id = 0x10de0043, .name = "GPU 43 HDMI/DP", .patch = patch_generic_hdmi },
+{ .id = 0x10de0044, .name = "GPU 44 HDMI/DP", .patch = patch_generic_hdmi },
{ .id = 0x10de0067, .name = "MCP67 HDMI", .patch = patch_nvhdmi_2ch },
{ .id = 0x10de8001, .name = "MCP73 HDMI", .patch = patch_nvhdmi_2ch },
{ .id = 0x80860054, .name = "IbexPeak HDMI", .patch = patch_generic_hdmi },
diff --git a/sound/soc/atmel/sam9g20_wm8731.c b/sound/soc/atmel/sam9g20_wm8731.c
index 28afbbf69ce0..95572d290c27 100644
--- a/sound/soc/atmel/sam9g20_wm8731.c
+++ b/sound/soc/atmel/sam9g20_wm8731.c
@@ -146,7 +146,7 @@ static int at91sam9g20ek_wm8731_init(struct snd_soc_pcm_runtime *rtd)
"at91sam9g20ek_wm8731 "
": at91sam9g20ek_wm8731_init() called\n");
- ret = snd_soc_dai_set_sysclk(codec_dai, WM8731_SYSCLK_XTAL,
+ ret = snd_soc_dai_set_sysclk(codec_dai, WM8731_SYSCLK_MCLK,
MCLK_RATE, SND_SOC_CLOCK_IN);
if (ret < 0) {
printk(KERN_ERR "Failed to set WM8731 SYSCLK: %d\n", ret);
diff --git a/sound/soc/codecs/wm1250-ev1.c b/sound/soc/codecs/wm1250-ev1.c
index 14d0716bf009..bcc208967917 100644
--- a/sound/soc/codecs/wm1250-ev1.c
+++ b/sound/soc/codecs/wm1250-ev1.c
@@ -22,7 +22,7 @@ SND_SOC_DAPM_ADC("ADC", "wm1250-ev1 Capture", SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_DAC("DAC", "wm1250-ev1 Playback", SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_INPUT("WM1250 Input"),
-SND_SOC_DAPM_INPUT("WM1250 Output"),
+SND_SOC_DAPM_OUTPUT("WM1250 Output"),
};
static const struct snd_soc_dapm_route wm1250_ev1_dapm_routes[] = {
diff --git a/sound/soc/codecs/wm8731.c b/sound/soc/codecs/wm8731.c
index 6dec7cee2cb4..2dc964b55e4f 100644
--- a/sound/soc/codecs/wm8731.c
+++ b/sound/soc/codecs/wm8731.c
@@ -198,7 +198,7 @@ static int wm8731_check_osc(struct snd_soc_dapm_widget *source,
{
struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(source->codec);
- return wm8731->sysclk_type == WM8731_SYSCLK_MCLK;
+ return wm8731->sysclk_type == WM8731_SYSCLK_XTAL;
}
static const struct snd_soc_dapm_route wm8731_intercon[] = {
diff --git a/sound/soc/codecs/wm8915.c b/sound/soc/codecs/wm8915.c
index ccc9bd832794..a0b1a7278284 100644
--- a/sound/soc/codecs/wm8915.c
+++ b/sound/soc/codecs/wm8915.c
@@ -19,7 +19,6 @@
#include <linux/gcd.h>
#include <linux/gpio.h>
#include <linux/i2c.h>
-#include <linux/delay.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
diff --git a/sound/soc/pxa/raumfeld.c b/sound/soc/pxa/raumfeld.c
index 2afabaf59491..1a591f1ebfbd 100644
--- a/sound/soc/pxa/raumfeld.c
+++ b/sound/soc/pxa/raumfeld.c
@@ -151,13 +151,13 @@ static struct snd_soc_ops raumfeld_cs4270_ops = {
.hw_params = raumfeld_cs4270_hw_params,
};
-static int raumfeld_line_suspend(struct snd_soc_card *card)
+static int raumfeld_analog_suspend(struct snd_soc_card *card)
{
raumfeld_enable_audio(false);
return 0;
}
-static int raumfeld_line_resume(struct snd_soc_card *card)
+static int raumfeld_analog_resume(struct snd_soc_card *card)
{
raumfeld_enable_audio(true);
return 0;
@@ -225,32 +225,53 @@ static struct snd_soc_ops raumfeld_ak4104_ops = {
.hw_params = raumfeld_ak4104_hw_params,
};
-static struct snd_soc_dai_link raumfeld_dai[] = {
+#define DAI_LINK_CS4270 \
+{ \
+ .name = "CS4270", \
+ .stream_name = "CS4270", \
+ .cpu_dai_name = "pxa-ssp-dai.0", \
+ .platform_name = "pxa-pcm-audio", \
+ .codec_dai_name = "cs4270-hifi", \
+ .codec_name = "cs4270-codec.0-0048", \
+ .ops = &raumfeld_cs4270_ops, \
+}
+
+#define DAI_LINK_AK4104 \
+{ \
+ .name = "ak4104", \
+ .stream_name = "Playback", \
+ .cpu_dai_name = "pxa-ssp-dai.1", \
+ .codec_dai_name = "ak4104-hifi", \
+ .platform_name = "pxa-pcm-audio", \
+ .ops = &raumfeld_ak4104_ops, \
+ .codec_name = "spi0.0", \
+}
+
+static struct snd_soc_dai_link snd_soc_raumfeld_connector_dai[] =
{
- .name = "ak4104",
- .stream_name = "Playback",
- .cpu_dai_name = "pxa-ssp-dai.1",
- .codec_dai_name = "ak4104-hifi",
- .platform_name = "pxa-pcm-audio",
- .ops = &raumfeld_ak4104_ops,
- .codec_name = "ak4104-codec.0",
-},
+ DAI_LINK_CS4270,
+ DAI_LINK_AK4104,
+};
+
+static struct snd_soc_dai_link snd_soc_raumfeld_speaker_dai[] =
{
- .name = "CS4270",
- .stream_name = "CS4270",
- .cpu_dai_name = "pxa-ssp-dai.0",
- .platform_name = "pxa-pcm-audio",
- .codec_dai_name = "cs4270-hifi",
- .codec_name = "cs4270-codec.0-0048",
- .ops = &raumfeld_cs4270_ops,
-},};
-
-static struct snd_soc_card snd_soc_raumfeld = {
- .name = "Raumfeld",
- .dai_link = raumfeld_dai,
- .suspend_post = raumfeld_line_suspend,
- .resume_pre = raumfeld_line_resume,
- .num_links = ARRAY_SIZE(raumfeld_dai),
+ DAI_LINK_CS4270,
+};
+
+static struct snd_soc_card snd_soc_raumfeld_connector = {
+ .name = "Raumfeld Connector",
+ .dai_link = snd_soc_raumfeld_connector_dai,
+ .num_links = ARRAY_SIZE(snd_soc_raumfeld_connector_dai),
+ .suspend_post = raumfeld_analog_suspend,
+ .resume_pre = raumfeld_analog_resume,
+};
+
+static struct snd_soc_card snd_soc_raumfeld_speaker = {
+ .name = "Raumfeld Speaker",
+ .dai_link = snd_soc_raumfeld_speaker_dai,
+ .num_links = ARRAY_SIZE(snd_soc_raumfeld_speaker_dai),
+ .suspend_post = raumfeld_analog_suspend,
+ .resume_pre = raumfeld_analog_resume,
};
static struct platform_device *raumfeld_audio_device;
@@ -271,22 +292,25 @@ static int __init raumfeld_audio_init(void)
set_max9485_clk(MAX9485_MCLK_FREQ_122880);
- /* Register LINE and SPDIF */
+ /* Register analog device */
raumfeld_audio_device = platform_device_alloc("soc-audio", 0);
if (!raumfeld_audio_device)
return -ENOMEM;
- platform_set_drvdata(raumfeld_audio_device,
- &snd_soc_raumfeld);
- ret = platform_device_add(raumfeld_audio_device);
-
- /* no S/PDIF on Speakers */
if (machine_is_raumfeld_speaker())
+ platform_set_drvdata(raumfeld_audio_device,
+ &snd_soc_raumfeld_speaker);
+
+ if (machine_is_raumfeld_connector())
+ platform_set_drvdata(raumfeld_audio_device,
+ &snd_soc_raumfeld_connector);
+
+ ret = platform_device_add(raumfeld_audio_device);
+ if (ret < 0)
return ret;
raumfeld_enable_audio(true);
-
- return ret;
+ return 0;
}
static void __exit raumfeld_audio_exit(void)
diff --git a/sound/soc/samsung/Kconfig b/sound/soc/samsung/Kconfig
index 459566bfcd35..d155cbb58e1c 100644
--- a/sound/soc/samsung/Kconfig
+++ b/sound/soc/samsung/Kconfig
@@ -1,6 +1,6 @@
config SND_SOC_SAMSUNG
tristate "ASoC support for Samsung"
- depends on ARCH_S3C2410 || ARCH_S3C64XX || ARCH_S5PC100 || ARCH_S5PV210 || ARCH_S5P64X0 || ARCH_S5P6442 || ARCH_EXYNOS4
+ depends on ARCH_S3C2410 || ARCH_S3C64XX || ARCH_S5PC100 || ARCH_S5PV210 || ARCH_S5P64X0 || ARCH_EXYNOS4
select S3C64XX_DMA if ARCH_S3C64XX
select S3C2410_DMA if ARCH_S3C2410
help
@@ -55,7 +55,7 @@ config SND_SOC_SAMSUNG_JIVE_WM8750
config SND_SOC_SAMSUNG_SMDK_WM8580
tristate "SoC I2S Audio support for WM8580 on SMDK"
- depends on SND_SOC_SAMSUNG && (MACH_SMDK6410 || MACH_SMDKC100 || MACH_SMDK6440 || MACH_SMDK6450 || MACH_SMDK6442 || MACH_SMDKV210 || MACH_SMDKC110)
+ depends on SND_SOC_SAMSUNG && (MACH_SMDK6410 || MACH_SMDKC100 || MACH_SMDK6440 || MACH_SMDK6450 || MACH_SMDKV210 || MACH_SMDKC110)
select SND_SOC_WM8580
select SND_SAMSUNG_I2S
help
diff --git a/sound/soc/samsung/smdk_wm8580.c b/sound/soc/samsung/smdk_wm8580.c
index 8aacf23d6f3a..3d26f6607aa4 100644
--- a/sound/soc/samsung/smdk_wm8580.c
+++ b/sound/soc/samsung/smdk_wm8580.c
@@ -249,7 +249,7 @@ static int __init smdk_audio_init(void)
int ret;
char *str;
- if (machine_is_smdkc100() || machine_is_smdk6442()
+ if (machine_is_smdkc100()
|| machine_is_smdkv210() || machine_is_smdkc110()) {
smdk.num_links = 3;
/* Secondary is at offset SAMSUNG_I2S_SECOFF from Primary */
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index bb7cd5812945..d75043ed7fc0 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -1306,10 +1306,6 @@ static int soc_bind_dai_link(struct snd_soc_card *card, int num)
/* no, then find CPU DAI from registered DAIs*/
list_for_each_entry(cpu_dai, &dai_list, list) {
if (!strcmp(cpu_dai->name, dai_link->cpu_dai_name)) {
-
- if (!try_module_get(cpu_dai->dev->driver->owner))
- return -ENODEV;
-
rtd->cpu_dai = cpu_dai;
goto find_codec;
}
@@ -1622,11 +1618,15 @@ static int soc_probe_dai_link(struct snd_soc_card *card, int num)
/* probe the cpu_dai */
if (!cpu_dai->probed) {
+ if (!try_module_get(cpu_dai->dev->driver->owner))
+ return -ENODEV;
+
if (cpu_dai->driver->probe) {
ret = cpu_dai->driver->probe(cpu_dai);
if (ret < 0) {
printk(KERN_ERR "asoc: failed to probe CPU DAI %s\n",
cpu_dai->name);
+ module_put(cpu_dai->dev->driver->owner);
return ret;
}
}
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 456617e63789..999bb08cdfb1 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -1110,7 +1110,7 @@ static int dapm_power_widgets(struct snd_soc_dapm_context *dapm, int event)
trace_snd_soc_dapm_start(card);
list_for_each_entry(d, &card->dapm_list, list)
- if (d->n_widgets)
+ if (d->n_widgets || d->codec == NULL)
d->dev_power = 0;
/* Check which widgets we need to power and store them in
diff --git a/sound/usb/card.c b/sound/usb/card.c
index a90662af2d6b..220c6167dd86 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -48,6 +48,7 @@
#include <linux/usb/audio.h>
#include <linux/usb/audio-v2.h>
+#include <sound/control.h>
#include <sound/core.h>
#include <sound/info.h>
#include <sound/pcm.h>
@@ -492,14 +493,6 @@ static void *snd_usb_audio_probe(struct usb_device *dev,
}
}
- chip->txfr_quirk = 0;
- err = 1; /* continue */
- if (quirk && quirk->ifnum != QUIRK_NO_INTERFACE) {
- /* need some special handlings */
- if ((err = snd_usb_create_quirk(chip, intf, &usb_audio_driver, quirk)) < 0)
- goto __error;
- }
-
/*
* For devices with more than one control interface, we assume the
* first contains the audio controls. We might need a more specific
@@ -508,6 +501,14 @@ static void *snd_usb_audio_probe(struct usb_device *dev,
if (!chip->ctrl_intf)
chip->ctrl_intf = alts;
+ chip->txfr_quirk = 0;
+ err = 1; /* continue */
+ if (quirk && quirk->ifnum != QUIRK_NO_INTERFACE) {
+ /* need some special handlings */
+ if ((err = snd_usb_create_quirk(chip, intf, &usb_audio_driver, quirk)) < 0)
+ goto __error;
+ }
+
if (err > 0) {
/* create normal USB audio interfaces */
if (snd_usb_create_streams(chip, ifnum) < 0 ||
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index eab06edcc9b7..c22fa76e363a 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -86,16 +86,6 @@ struct mixer_build {
const struct usbmix_selector_map *selector_map;
};
-enum {
- USB_MIXER_BOOLEAN,
- USB_MIXER_INV_BOOLEAN,
- USB_MIXER_S8,
- USB_MIXER_U8,
- USB_MIXER_S16,
- USB_MIXER_U16,
-};
-
-
/*E-mu 0202/0404/0204 eXtension Unit(XU) control*/
enum {
USB_XU_CLOCK_RATE = 0xe301,
@@ -535,20 +525,21 @@ static int check_matrix_bitmap(unsigned char *bmap, int ich, int och, int num_ou
* if failed, give up and free the control instance.
*/
-static int add_control_to_empty(struct mixer_build *state, struct snd_kcontrol *kctl)
+int snd_usb_mixer_add_control(struct usb_mixer_interface *mixer,
+ struct snd_kcontrol *kctl)
{
struct usb_mixer_elem_info *cval = kctl->private_data;
int err;
- while (snd_ctl_find_id(state->chip->card, &kctl->id))
+ while (snd_ctl_find_id(mixer->chip->card, &kctl->id))
kctl->id.index++;
- if ((err = snd_ctl_add(state->chip->card, kctl)) < 0) {
+ if ((err = snd_ctl_add(mixer->chip->card, kctl)) < 0) {
snd_printd(KERN_ERR "cannot add control (err = %d)\n", err);
return err;
}
cval->elem_id = &kctl->id;
- cval->next_id_elem = state->mixer->id_elems[cval->id];
- state->mixer->id_elems[cval->id] = cval;
+ cval->next_id_elem = mixer->id_elems[cval->id];
+ mixer->id_elems[cval->id] = cval;
return 0;
}
@@ -984,6 +975,9 @@ static struct snd_kcontrol_new usb_feature_unit_ctl_ro = {
.put = NULL,
};
+/* This symbol is exported in order to allow the mixer quirks to
+ * hook up to the standard feature unit control mechanism */
+struct snd_kcontrol_new *snd_usb_feature_unit_ctl = &usb_feature_unit_ctl;
/*
* build a feature control
@@ -1176,7 +1170,7 @@ static void build_feature_ctl(struct mixer_build *state, void *raw_desc,
snd_printdd(KERN_INFO "[%d] FU [%s] ch = %d, val = %d/%d/%d\n",
cval->id, kctl->id.name, cval->channels, cval->min, cval->max, cval->res);
- add_control_to_empty(state, kctl);
+ snd_usb_mixer_add_control(state->mixer, kctl);
}
@@ -1340,7 +1334,7 @@ static void build_mixer_unit_ctl(struct mixer_build *state,
snd_printdd(KERN_INFO "[%d] MU [%s] ch = %d, val = %d/%d\n",
cval->id, kctl->id.name, cval->channels, cval->min, cval->max);
- add_control_to_empty(state, kctl);
+ snd_usb_mixer_add_control(state->mixer, kctl);
}
@@ -1641,7 +1635,7 @@ static int build_audio_procunit(struct mixer_build *state, int unitid, void *raw
snd_printdd(KERN_INFO "[%d] PU [%s] ch = %d, val = %d/%d\n",
cval->id, kctl->id.name, cval->channels, cval->min, cval->max);
- if ((err = add_control_to_empty(state, kctl)) < 0)
+ if ((err = snd_usb_mixer_add_control(state->mixer, kctl)) < 0)
return err;
}
return 0;
@@ -1858,7 +1852,7 @@ static int parse_audio_selector_unit(struct mixer_build *state, int unitid, void
snd_printdd(KERN_INFO "[%d] SU [%s] items = %d\n",
cval->id, kctl->id.name, desc->bNrInPins);
- if ((err = add_control_to_empty(state, kctl)) < 0)
+ if ((err = snd_usb_mixer_add_control(state->mixer, kctl)) < 0)
return err;
return 0;
diff --git a/sound/usb/mixer.h b/sound/usb/mixer.h
index b4a2c8165e4b..ae1a14dcfe82 100644
--- a/sound/usb/mixer.h
+++ b/sound/usb/mixer.h
@@ -24,7 +24,16 @@ struct usb_mixer_interface {
u8 xonar_u1_status;
};
-#define MAX_CHANNELS 10 /* max logical channels */
+#define MAX_CHANNELS 16 /* max logical channels */
+
+enum {
+ USB_MIXER_BOOLEAN,
+ USB_MIXER_INV_BOOLEAN,
+ USB_MIXER_S8,
+ USB_MIXER_U8,
+ USB_MIXER_S16,
+ USB_MIXER_U16,
+};
struct usb_mixer_elem_info {
struct usb_mixer_interface *mixer;
@@ -55,4 +64,7 @@ int snd_usb_mixer_set_ctl_value(struct usb_mixer_elem_info *cval,
void snd_usb_mixer_inactivate(struct usb_mixer_interface *mixer);
int snd_usb_mixer_activate(struct usb_mixer_interface *mixer);
+int snd_usb_mixer_add_control(struct usb_mixer_interface *mixer,
+ struct snd_kcontrol *kctl);
+
#endif /* __USBMIXER_H */
diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
index 9146cffa6ede..3d0f4873112b 100644
--- a/sound/usb/mixer_quirks.c
+++ b/sound/usb/mixer_quirks.c
@@ -40,6 +40,8 @@
#include "mixer_quirks.h"
#include "helper.h"
+extern struct snd_kcontrol_new *snd_usb_feature_unit_ctl;
+
/*
* Sound Blaster remote control configuration
*
@@ -492,6 +494,69 @@ static int snd_nativeinstruments_create_mixer(struct usb_mixer_interface *mixer,
return err;
}
+/* M-Audio FastTrack Ultra quirks */
+
+/* private_free callback */
+static void usb_mixer_elem_free(struct snd_kcontrol *kctl)
+{
+ kfree(kctl->private_data);
+ kctl->private_data = NULL;
+}
+
+static int snd_maudio_ftu_create_ctl(struct usb_mixer_interface *mixer,
+ int in, int out, const char *name)
+{
+ struct usb_mixer_elem_info *cval;
+ struct snd_kcontrol *kctl;
+
+ cval = kzalloc(sizeof(*cval), GFP_KERNEL);
+ if (!cval)
+ return -ENOMEM;
+
+ cval->id = 5;
+ cval->mixer = mixer;
+ cval->val_type = USB_MIXER_S16;
+ cval->channels = 1;
+ cval->control = out + 1;
+ cval->cmask = 1 << in;
+
+ kctl = snd_ctl_new1(snd_usb_feature_unit_ctl, cval);
+ if (!kctl) {
+ kfree(cval);
+ return -ENOMEM;
+ }
+
+ snprintf(kctl->id.name, sizeof(kctl->id.name), name);
+ kctl->private_free = usb_mixer_elem_free;
+ return snd_usb_mixer_add_control(mixer, kctl);
+}
+
+static int snd_maudio_ftu_create_mixer(struct usb_mixer_interface *mixer)
+{
+ char name[64];
+ int in, out, err;
+
+ for (out = 0; out < 8; out++) {
+ for (in = 0; in < 8; in++) {
+ snprintf(name, sizeof(name),
+ "AIn%d - Out%d Capture Volume", in + 1, out + 1);
+ err = snd_maudio_ftu_create_ctl(mixer, in, out, name);
+ if (err < 0)
+ return err;
+ }
+
+ for (in = 8; in < 16; in++) {
+ snprintf(name, sizeof(name),
+ "DIn%d - Out%d Playback Volume", in - 7, out + 1);
+ err = snd_maudio_ftu_create_ctl(mixer, in, out, name);
+ if (err < 0)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
void snd_emuusb_set_samplerate(struct snd_usb_audio *chip,
unsigned char samplerate_id)
{
@@ -533,6 +598,11 @@ int snd_usb_mixer_apply_create_quirk(struct usb_mixer_interface *mixer)
snd_audigy2nx_proc_read);
break;
+ case USB_ID(0x0763, 0x2080): /* M-Audio Fast Track Ultra */
+ case USB_ID(0x0763, 0x2081): /* M-Audio Fast Track Ultra 8R */
+ err = snd_maudio_ftu_create_mixer(mixer);
+ break;
+
case USB_ID(0x0b05, 0x1739):
case USB_ID(0x0b05, 0x1743):
err = snd_xonar_u1_controls_create(mixer);
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
index 78792a8900c3..0b2ae8e1c02d 100644
--- a/sound/usb/quirks-table.h
+++ b/sound/usb/quirks-table.h
@@ -1988,7 +1988,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
.data = & (const struct snd_usb_audio_quirk[]) {
{
.ifnum = 0,
- .type = QUIRK_IGNORE_INTERFACE
+ .type = QUIRK_AUDIO_STANDARD_MIXER,
},
{
.ifnum = 1,
@@ -2055,7 +2055,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
.data = & (const struct snd_usb_audio_quirk[]) {
{
.ifnum = 0,
- .type = QUIRK_IGNORE_INTERFACE
+ .type = QUIRK_AUDIO_STANDARD_MIXER,
},
{
.ifnum = 1,
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index bd13d7257240..2e969cbb393b 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -19,6 +19,7 @@
#include <linux/usb.h>
#include <linux/usb/audio.h>
+#include <sound/control.h>
#include <sound/core.h>
#include <sound/info.h>
#include <sound/pcm.h>
@@ -263,6 +264,20 @@ static int create_uaxx_quirk(struct snd_usb_audio *chip,
}
/*
+ * Create a standard mixer for the specified interface.
+ */
+static int create_standard_mixer_quirk(struct snd_usb_audio *chip,
+ struct usb_interface *iface,
+ struct usb_driver *driver,
+ const struct snd_usb_audio_quirk *quirk)
+{
+ if (quirk->ifnum < 0)
+ return 0;
+
+ return snd_usb_create_mixer(chip, quirk->ifnum, 0);
+}
+
+/*
* audio-interface quirks
*
* returns zero if no standard audio/MIDI parsing is needed.
@@ -294,7 +309,8 @@ int snd_usb_create_quirk(struct snd_usb_audio *chip,
[QUIRK_AUDIO_STANDARD_INTERFACE] = create_standard_audio_quirk,
[QUIRK_AUDIO_FIXED_ENDPOINT] = create_fixed_stream_quirk,
[QUIRK_AUDIO_EDIROL_UAXX] = create_uaxx_quirk,
- [QUIRK_AUDIO_ALIGN_TRANSFER] = create_align_transfer_quirk
+ [QUIRK_AUDIO_ALIGN_TRANSFER] = create_align_transfer_quirk,
+ [QUIRK_AUDIO_STANDARD_MIXER] = create_standard_mixer_quirk,
};
if (quirk->type < QUIRK_TYPE_COUNT) {
diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h
index 32f2a97f2f14..1e79986b5777 100644
--- a/sound/usb/usbaudio.h
+++ b/sound/usb/usbaudio.h
@@ -84,6 +84,7 @@ enum quirk_type {
QUIRK_AUDIO_FIXED_ENDPOINT,
QUIRK_AUDIO_EDIROL_UAXX,
QUIRK_AUDIO_ALIGN_TRANSFER,
+ QUIRK_AUDIO_STANDARD_MIXER,
QUIRK_TYPE_COUNT
};
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index 1455413ec7a7..032ba6398a5c 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -215,11 +215,13 @@ LIB_FILE=$(OUTPUT)libperf.a
LIB_H += ../../include/linux/perf_event.h
LIB_H += ../../include/linux/rbtree.h
LIB_H += ../../include/linux/list.h
+LIB_H += ../../include/linux/const.h
LIB_H += ../../include/linux/hash.h
LIB_H += ../../include/linux/stringify.h
LIB_H += util/include/linux/bitmap.h
LIB_H += util/include/linux/bitops.h
LIB_H += util/include/linux/compiler.h
+LIB_H += util/include/linux/const.h
LIB_H += util/include/linux/ctype.h
LIB_H += util/include/linux/kernel.h
LIB_H += util/include/linux/list.h
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index e18eb7ed30ae..7b139e1e7e86 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -8,8 +8,6 @@
#include "builtin.h"
#include "util/util.h"
-
-#include "util/util.h"
#include "util/color.h"
#include <linux/list.h>
#include "util/cache.h"
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 0974f957b8fa..8e2c85798185 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -823,6 +823,16 @@ int cmd_record(int argc, const char **argv, const char *prefix __used)
symbol__init();
+ if (symbol_conf.kptr_restrict)
+ pr_warning(
+"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
+"check /proc/sys/kernel/kptr_restrict.\n\n"
+"Samples in kernel functions may not be resolved if a suitable vmlinux\n"
+"file is not found in the buildid cache or in the vmlinux path.\n\n"
+"Samples in kernel modules won't be resolved at all.\n\n"
+"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
+"even with a suitable vmlinux or kallsyms file.\n\n");
+
if (no_buildid_cache || no_buildid)
disable_buildid_cache();
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 498c6f70a747..287a173523a7 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -116,6 +116,9 @@ static int process_sample_event(union perf_event *event,
if (al.filtered || (hide_unresolved && al.sym == NULL))
return 0;
+ if (al.map != NULL)
+ al.map->dso->hit = 1;
+
if (perf_session__add_hist_entry(session, &al, sample, evsel)) {
pr_debug("problem incrementing symbol period, skipping event\n");
return -1;
@@ -249,6 +252,8 @@ static int __cmd_report(void)
u64 nr_samples;
struct perf_session *session;
struct perf_evsel *pos;
+ struct map *kernel_map;
+ struct kmap *kernel_kmap;
const char *help = "For a higher level overview, try: perf report --sort comm,dso";
signal(SIGINT, sig_handler);
@@ -268,6 +273,24 @@ static int __cmd_report(void)
if (ret)
goto out_delete;
+ kernel_map = session->host_machine.vmlinux_maps[MAP__FUNCTION];
+ kernel_kmap = map__kmap(kernel_map);
+ if (kernel_map == NULL ||
+ (kernel_map->dso->hit &&
+ (kernel_kmap->ref_reloc_sym == NULL ||
+ kernel_kmap->ref_reloc_sym->addr == 0))) {
+ const struct dso *kdso = kernel_map->dso;
+
+ ui__warning(
+"Kernel address maps (/proc/{kallsyms,modules}) were restricted.\n\n"
+"Check /proc/sys/kernel/kptr_restrict before running 'perf record'.\n\n%s\n\n"
+"Samples in kernel modules can't be resolved as well.\n\n",
+ RB_EMPTY_ROOT(&kdso->symbols[MAP__FUNCTION]) ?
+"As no suitable kallsyms nor vmlinux was found, kernel samples\n"
+"can't be resolved." :
+"If some relocation was applied (e.g. kexec) symbols may be misresolved.");
+ }
+
if (dump_trace) {
perf_session__fprintf_nr_events(session, stdout);
goto out_delete;
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index 974f6d3f4e53..22747de7234b 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -10,7 +10,6 @@
#include "util/symbol.h"
#include "util/thread.h"
#include "util/trace-event.h"
-#include "util/parse-options.h"
#include "util/util.h"
#include "util/evlist.h"
#include "util/evsel.h"
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 2d7934e9de38..f2f3f4937aa2 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -62,8 +62,6 @@
#include <linux/unistd.h>
#include <linux/types.h>
-#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
-
static struct perf_top top = {
.count_filter = 5,
.delay_secs = 2,
@@ -82,6 +80,8 @@ static bool use_tui, use_stdio;
static int default_interval = 0;
+static bool kptr_restrict_warned;
+static bool vmlinux_warned;
static bool inherit = false;
static int realtime_prio = 0;
static bool group = false;
@@ -740,7 +740,22 @@ static void perf_event__process_sample(const union perf_event *event,
al.filtered)
return;
+ if (!kptr_restrict_warned &&
+ symbol_conf.kptr_restrict &&
+ al.cpumode == PERF_RECORD_MISC_KERNEL) {
+ ui__warning(
+"Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n"
+"Check /proc/sys/kernel/kptr_restrict.\n\n"
+"Kernel%s samples will not be resolved.\n",
+ !RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION]) ?
+ " modules" : "");
+ if (use_browser <= 0)
+ sleep(5);
+ kptr_restrict_warned = true;
+ }
+
if (al.sym == NULL) {
+ const char *msg = "Kernel samples will not be resolved.\n";
/*
* As we do lazy loading of symtabs we only will know if the
* specified vmlinux file is invalid when we actually have a
@@ -752,12 +767,20 @@ static void perf_event__process_sample(const union perf_event *event,
* --hide-kernel-symbols, even if the user specifies an
* invalid --vmlinux ;-)
*/
- if (al.map == machine->vmlinux_maps[MAP__FUNCTION] &&
+ if (!kptr_restrict_warned && !vmlinux_warned &&
+ al.map == machine->vmlinux_maps[MAP__FUNCTION] &&
RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION])) {
- ui__warning("The %s file can't be used\n",
- symbol_conf.vmlinux_name);
- exit_browser(0);
- exit(1);
+ if (symbol_conf.vmlinux_name) {
+ ui__warning("The %s file can't be used.\n%s",
+ symbol_conf.vmlinux_name, msg);
+ } else {
+ ui__warning("A vmlinux file was not found.\n%s",
+ msg);
+ }
+
+ if (use_browser <= 0)
+ sleep(5);
+ vmlinux_warned = true;
}
return;
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index 6635fcd11ca5..0fe9adf76379 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -553,9 +553,18 @@ static int perf_event__process_kernel_mmap(union perf_event *event,
goto out_problem;
perf_event__set_kernel_mmap_len(event, machine->vmlinux_maps);
- perf_session__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps,
- symbol_name,
- event->mmap.pgoff);
+
+ /*
+ * Avoid using a zero address (kptr_restrict) for the ref reloc
+ * symbol. Effectively having zero here means that at record
+ * time /proc/sys/kernel/kptr_restrict was non zero.
+ */
+ if (event->mmap.pgoff != 0) {
+ perf_session__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps,
+ symbol_name,
+ event->mmap.pgoff);
+ }
+
if (machine__is_default_guest(machine)) {
/*
* preload dso of guest kernel and modules
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index ee0fe0dffa71..cca29ededb5b 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -35,7 +35,17 @@ struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx)
int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
{
+ int cpu, thread;
evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
+
+ if (evsel->fd) {
+ for (cpu = 0; cpu < ncpus; cpu++) {
+ for (thread = 0; thread < nthreads; thread++) {
+ FD(evsel, cpu, thread) = -1;
+ }
+ }
+ }
+
return evsel->fd != NULL ? 0 : -ENOMEM;
}
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 0717bebc7649..afb0849fe530 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -193,9 +193,13 @@ int build_id_cache__add_s(const char *sbuild_id, const char *debugdir,
*linkname = malloc(size), *targetname;
int len, err = -1;
- if (is_kallsyms)
+ if (is_kallsyms) {
+ if (symbol_conf.kptr_restrict) {
+ pr_debug("Not caching a kptr_restrict'ed /proc/kallsyms\n");
+ return 0;
+ }
realname = (char *)name;
- else
+ } else
realname = realpath(name, NULL);
if (realname == NULL || filename == NULL || linkname == NULL)
diff --git a/tools/perf/util/include/linux/const.h b/tools/perf/util/include/linux/const.h
new file mode 100644
index 000000000000..1b476c9ae649
--- /dev/null
+++ b/tools/perf/util/include/linux/const.h
@@ -0,0 +1 @@
+#include "../../../../include/linux/const.h"
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 516876dfbe52..eec196329fd9 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -676,9 +676,30 @@ discard_symbol: rb_erase(&pos->rb_node, root);
return count + moved;
}
+static bool symbol__restricted_filename(const char *filename,
+ const char *restricted_filename)
+{
+ bool restricted = false;
+
+ if (symbol_conf.kptr_restrict) {
+ char *r = realpath(filename, NULL);
+
+ if (r != NULL) {
+ restricted = strcmp(r, restricted_filename) == 0;
+ free(r);
+ return restricted;
+ }
+ }
+
+ return restricted;
+}
+
int dso__load_kallsyms(struct dso *dso, const char *filename,
struct map *map, symbol_filter_t filter)
{
+ if (symbol__restricted_filename(filename, "/proc/kallsyms"))
+ return -1;
+
if (dso__load_all_kallsyms(dso, filename, map) < 0)
return -1;
@@ -1790,6 +1811,9 @@ static int machine__create_modules(struct machine *machine)
modules = path;
}
+ if (symbol__restricted_filename(path, "/proc/modules"))
+ return -1;
+
file = fopen(modules, "r");
if (file == NULL)
return -1;
@@ -2239,6 +2263,9 @@ static u64 machine__get_kernel_start_addr(struct machine *machine)
}
}
+ if (symbol__restricted_filename(filename, "/proc/kallsyms"))
+ return 0;
+
if (kallsyms__parse(filename, &args, symbol__in_kernel) <= 0)
return 0;
@@ -2410,6 +2437,25 @@ static int setup_list(struct strlist **list, const char *list_str,
return 0;
}
+static bool symbol__read_kptr_restrict(void)
+{
+ bool value = false;
+
+ if (geteuid() != 0) {
+ FILE *fp = fopen("/proc/sys/kernel/kptr_restrict", "r");
+ if (fp != NULL) {
+ char line[8];
+
+ if (fgets(line, sizeof(line), fp) != NULL)
+ value = atoi(line) != 0;
+
+ fclose(fp);
+ }
+ }
+
+ return value;
+}
+
int symbol__init(void)
{
const char *symfs;
@@ -2456,6 +2502,8 @@ int symbol__init(void)
if (symfs != symbol_conf.symfs)
free((void *)symfs);
+ symbol_conf.kptr_restrict = symbol__read_kptr_restrict();
+
symbol_conf.initialized = true;
return 0;
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index 242de0101a86..325ee36a9d29 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -75,7 +75,8 @@ struct symbol_conf {
use_callchain,
exclude_other,
show_cpu_utilization,
- initialized;
+ initialized,
+ kptr_restrict;
const char *vmlinux_name,
*kallsyms_name,
*source_prefix,